All of lore.kernel.org
 help / color / mirror / Atom feed
* [Intel-gfx] [PATCH 0/5] drm/i915/mtl: Add C10 phy support
@ 2022-09-29 13:17 Mika Kahola
  2022-09-29 13:17 ` [Intel-gfx] [PATCH 1/5] drm/i915/mtl: Add Support for C10, C20 PHY Message Bus Mika Kahola
                   ` (7 more replies)
  0 siblings, 8 replies; 17+ messages in thread
From: Mika Kahola @ 2022-09-29 13:17 UTC (permalink / raw)
  To: intel-gfx

PHY programming support for message bus and phy programming.
Updates for HDMI programming and vswing tables.

Radhakrishna Sripada (5):
  drm/i915/mtl: Add Support for C10,C20 PHY Message Bus
  drm/i915/mtl: Add PLL programming support for C10 phy
  drm/i915/mtl: Add support for C10 phy programming
  drm/i915/mtl: Add C10 phy programming for HDMI
  drm/i915/mtl: Add vswing programming for C10 phys

 drivers/gpu/drm/i915/Makefile                 |    1 +
 drivers/gpu/drm/i915/display/intel_cx0_phy.c  | 1300 +++++++++++++++++
 drivers/gpu/drm/i915/display/intel_cx0_phy.h  |  155 ++
 drivers/gpu/drm/i915/display/intel_ddi.c      |   26 +-
 .../drm/i915/display/intel_ddi_buf_trans.c    |   36 +-
 .../drm/i915/display/intel_ddi_buf_trans.h    |    6 +
 drivers/gpu/drm/i915/display/intel_display.c  |    1 +
 .../drm/i915/display/intel_display_power.c    |    3 +-
 .../i915/display/intel_display_power_map.c    |    1 +
 .../i915/display/intel_display_power_well.c   |    2 +-
 .../drm/i915/display/intel_display_types.h    |    6 +
 drivers/gpu/drm/i915/display/intel_dp.c       |   15 +-
 drivers/gpu/drm/i915/display/intel_dpll.c     |   22 +-
 drivers/gpu/drm/i915/display/intel_dpll_mgr.c |    2 +-
 drivers/gpu/drm/i915/display/intel_hdmi.c     |    5 +-
 .../drm/i915/display/intel_modeset_verify.c   |    2 +
 drivers/gpu/drm/i915/i915_reg.h               |  142 ++
 17 files changed, 1716 insertions(+), 9 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/display/intel_cx0_phy.c
 create mode 100644 drivers/gpu/drm/i915/display/intel_cx0_phy.h

-- 
2.34.1


^ permalink raw reply	[flat|nested] 17+ messages in thread

* [Intel-gfx] [PATCH 1/5] drm/i915/mtl: Add Support for C10, C20 PHY Message Bus
  2022-09-29 13:17 [Intel-gfx] [PATCH 0/5] drm/i915/mtl: Add C10 phy support Mika Kahola
@ 2022-09-29 13:17 ` Mika Kahola
  2022-09-30  9:04   ` Jani Nikula
  2022-10-11  0:00   ` Lucas De Marchi
  2022-09-29 13:17 ` [Intel-gfx] [PATCH 2/5] drm/i915/mtl: Add PLL programming support for C10 phy Mika Kahola
                   ` (6 subsequent siblings)
  7 siblings, 2 replies; 17+ messages in thread
From: Mika Kahola @ 2022-09-29 13:17 UTC (permalink / raw)
  To: intel-gfx

From: Radhakrishna Sripada <radhakrishna.sripada@intel.com>

XELPDP has C10 and C20 phys from Synopsys to drive displays. Each phy
has a dedicated PIPE 5.2 Message bus for configuration. This message
bus is used to configure the phy internal registers.

Bspec: 64599, 65100, 65101, 67610, 67636

Cc: Mika Kahola <mika.kahola@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Uma Shankar <uma.shankar@intel.com>
Signed-off-by: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
Signed-off-by: Mika Kahola <mika.kahola@intel.com> (v4)
---
 drivers/gpu/drm/i915/display/intel_cx0_phy.c | 179 +++++++++++++++++++
 1 file changed, 179 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/display/intel_cx0_phy.c

diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
new file mode 100644
index 000000000000..7930b0255cfa
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "intel_de.h"
+#include "intel_uncore.h"
+
+static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, int lane)
+{
+	enum phy phy = intel_port_to_phy(i915, port);
+
+	/* Bring the phy to idle. */
+	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+		       XELPDP_PORT_M2P_TRANSACTION_RESET);
+
+	/* Wait for Idle Clear. */
+	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+				    XELPDP_PORT_M2P_TRANSACTION_RESET,
+				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+		drm_err_once(&i915->drm, "Failed to bring PHY %c to idle. \n", phy_name(phy));
+		return;
+	}
+
+	intel_de_write(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane), ~0);
+	return;
+}
+
+__maybe_unused static u8 intel_cx0_read(struct drm_i915_private *i915, enum port port,
+			 int lane, u16 addr)
+{
+	enum phy phy = intel_port_to_phy(i915, port);
+	u32 val = 0;
+	int attempts = 0;
+
+retry:
+	if (attempts == 3) {
+		drm_err_once(&i915->drm, "PHY %c Read %04x failed after %d retries. Status: 0x%x\n", phy_name(phy), addr, attempts, val ?: 0);
+		return 0;
+	}
+
+	/* Wait for pending transactions.*/
+	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+				    XELPDP_PORT_M2P_TRANSACTION_PENDING,
+				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+		drm_dbg(&i915->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));
+		attempts++;
+		intel_cx0_bus_reset(i915, port, lane);
+		goto retry;
+	}
+
+	/* Issue the read command. */
+	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+		       XELPDP_PORT_M2P_TRANSACTION_PENDING |
+		       XELPDP_PORT_M2P_COMMAND_READ |
+		       XELPDP_PORT_M2P_ADDRESS(addr));
+
+	/* Wait for response ready. And read response.*/
+	if (__intel_wait_for_register(&i915->uncore,
+				      XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
+				      XELPDP_PORT_P2M_RESPONSE_READY,
+				      XELPDP_PORT_P2M_RESPONSE_READY,
+				      XELPDP_MSGBUS_TIMEOUT_FAST_US,
+				      XELPDP_MSGBUS_TIMEOUT_SLOW, &val)) {
+		drm_dbg(&i915->drm, "PHY %c Timeout waiting for Read response ACK. Status: 0x%x\n", phy_name(phy), val);
+		attempts++;
+		intel_cx0_bus_reset(i915, port, lane);
+		goto retry;
+	}
+
+	/* Check for error. */
+	if (val & XELPDP_PORT_P2M_ERROR_SET) {
+		drm_dbg(&i915->drm, "PHY %c Error occurred during read command. Status: 0x%x\n", phy_name(phy), val);
+		attempts++;
+		intel_cx0_bus_reset(i915, port, lane);
+		goto retry;
+	}
+
+	/* Check for Read Ack. */
+	if (REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, val) !=
+	    XELPDP_PORT_P2M_COMMAND_READ_ACK) {
+		drm_dbg(&i915->drm, "PHY %c Not a Read response. MSGBUS Status: 0x%x.\n", phy_name(phy), val);
+		attempts++;
+		intel_cx0_bus_reset(i915, port, lane);
+		goto retry;
+	}
+
+	/* Clear Response Ready flag.*/
+	intel_de_write(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane), ~0);
+	return (u8)REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val);
+}
+
+static int intel_cx0_wait_cwrite_ack(struct drm_i915_private *i915,
+				      enum port port, int lane)
+{
+	enum phy phy = intel_port_to_phy(i915, port);
+	u32 val;
+
+	/* Check for write ack. */
+	if (__intel_wait_for_register(&i915->uncore,
+				      XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
+				      XELPDP_PORT_P2M_RESPONSE_READY,
+				      XELPDP_PORT_P2M_RESPONSE_READY,
+				      XELPDP_MSGBUS_TIMEOUT_FAST_US,
+				      XELPDP_MSGBUS_TIMEOUT_SLOW, &val)) {
+		drm_dbg(&i915->drm, "PHY %c Timeout waiting for Committed message ACK. Status: 0x%x\n", phy_name(phy), val);
+		return -ETIMEDOUT;
+	}
+
+	if ((REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, val) !=
+	     XELPDP_PORT_P2M_COMMAND_WRITE_ACK) || val & XELPDP_PORT_P2M_ERROR_SET) {
+		drm_dbg(&i915->drm, "PHY %c Unexpected ACK received. MSGBUS STATUS: 0x%x.\n", phy_name(phy), val);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+__maybe_unused static void intel_cx0_write(struct drm_i915_private *i915, enum port port,
+			    int lane, u16 addr, u8 data, bool committed)
+{
+	enum phy phy = intel_port_to_phy(i915, port);
+	int attempts = 0;
+
+retry:
+	if (attempts == 3) {
+		drm_err_once(&i915->drm, "PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, attempts);
+		return;
+	}
+
+	/* Wait for pending transactions.*/
+	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+				    XELPDP_PORT_M2P_TRANSACTION_PENDING,
+				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+		drm_dbg(&i915->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));
+		attempts++;
+		intel_cx0_bus_reset(i915, port, lane);
+		goto retry;
+	}
+
+	/* Issue the write command. */
+	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+		       XELPDP_PORT_M2P_TRANSACTION_PENDING |
+		       (committed ? XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED :
+		       XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED) |
+		       XELPDP_PORT_M2P_DATA(data) |
+		       XELPDP_PORT_M2P_ADDRESS(addr));
+
+	/* Check for error. */
+	if (committed) {
+		if (intel_cx0_wait_cwrite_ack(i915, port, lane) < 0) {
+			attempts++;
+			intel_cx0_bus_reset(i915, port, lane);
+			goto retry;
+		}
+	} else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(phy, lane)) &
+			    XELPDP_PORT_P2M_ERROR_SET)) {
+		drm_dbg(&i915->drm, "PHY %c Error occurred during write command.\n", phy_name(phy));
+		attempts++;
+		intel_cx0_bus_reset(i915, port, lane);
+		goto retry;
+	}
+
+	intel_de_write(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane), ~0);
+
+	return;
+}
+
+__maybe_unused static void intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
+			  int lane, u16 addr, u8 clear, u8 set, bool committed)
+{
+	u8 old, val;
+
+	old = intel_cx0_read(i915, port, lane, addr);
+	val = (old & ~clear) | set;
+
+	if (val != old)
+		intel_cx0_write(i915, port, lane, addr, val, committed);
+}
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [Intel-gfx] [PATCH 2/5] drm/i915/mtl: Add PLL programming support for C10 phy
  2022-09-29 13:17 [Intel-gfx] [PATCH 0/5] drm/i915/mtl: Add C10 phy support Mika Kahola
  2022-09-29 13:17 ` [Intel-gfx] [PATCH 1/5] drm/i915/mtl: Add Support for C10, C20 PHY Message Bus Mika Kahola
@ 2022-09-29 13:17 ` Mika Kahola
  2022-09-30  9:19   ` Jani Nikula
  2022-09-29 13:17 ` [Intel-gfx] [PATCH 3/5] drm/i915/mtl: Add support for C10 phy programming Mika Kahola
                   ` (5 subsequent siblings)
  7 siblings, 1 reply; 17+ messages in thread
From: Mika Kahola @ 2022-09-29 13:17 UTC (permalink / raw)
  To: intel-gfx

From: Radhakrishna Sripada <radhakrishna.sripada@intel.com>

XELPDP has C10 phys to drive output to the EDP and the native output
from the display engine. Add structures, programming hardware state
readout logic. Port clock calculations are similar to DG2. Use the DG2
formulae to calculate the port clock but use the relevant pll signals.
Note: PHY lane 0 is always used for PLL programming.

Bspec: 64568, 64539, 67636

Cc: Mika Kahola <mika.kahola@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Uma Shankar <uma.shankar@intel.com>
Signed-off-by: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
---
 drivers/gpu/drm/i915/display/intel_cx0_phy.c  | 516 +++++++++++++++++-
 drivers/gpu/drm/i915/display/intel_cx0_phy.h  | 128 +++++
 drivers/gpu/drm/i915/display/intel_ddi.c      |  20 +-
 drivers/gpu/drm/i915/display/intel_display.c  |   1 +
 .../drm/i915/display/intel_display_power.c    |   3 +-
 .../i915/display/intel_display_power_well.c   |   2 +-
 .../drm/i915/display/intel_display_types.h    |   6 +
 drivers/gpu/drm/i915/display/intel_dpll.c     |  20 +-
 drivers/gpu/drm/i915/display/intel_dpll_mgr.c |   2 +-
 .../drm/i915/display/intel_modeset_verify.c   |   2 +
 10 files changed, 690 insertions(+), 10 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/display/intel_cx0_phy.h

diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 7930b0255cfa..2f401116d1d0 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -3,7 +3,11 @@
  * Copyright © 2021 Intel Corporation
  */
 
+#include "intel_cx0_phy.h"
 #include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_panel.h"
 #include "intel_uncore.h"
 
 static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, int lane)
@@ -26,7 +30,7 @@ static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, i
 	return;
 }
 
-__maybe_unused static u8 intel_cx0_read(struct drm_i915_private *i915, enum port port,
+static u8 intel_cx0_read(struct drm_i915_private *i915, enum port port,
 			 int lane, u16 addr)
 {
 	enum phy phy = intel_port_to_phy(i915, port);
@@ -116,8 +120,8 @@ static int intel_cx0_wait_cwrite_ack(struct drm_i915_private *i915,
 	return 0;
 }
 
-__maybe_unused static void intel_cx0_write(struct drm_i915_private *i915, enum port port,
-			    int lane, u16 addr, u8 data, bool committed)
+static void __intel_cx0_write(struct drm_i915_private *i915, enum port port,
+			      int lane, u16 addr, u8 data, bool committed)
 {
 	enum phy phy = intel_port_to_phy(i915, port);
 	int attempts = 0;
@@ -166,8 +170,19 @@ __maybe_unused static void intel_cx0_write(struct drm_i915_private *i915, enum p
 	return;
 }
 
-__maybe_unused static void intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
-			  int lane, u16 addr, u8 clear, u8 set, bool committed)
+static void intel_cx0_write(struct drm_i915_private *i915, enum port port,
+			    int lane, u16 addr, u8 data, bool committed)
+{
+	if (lane == INTEL_CX0_BOTH_LANES) {
+		__intel_cx0_write(i915, port, INTEL_CX0_LANE0, addr, data, committed);
+		__intel_cx0_write(i915, port, INTEL_CX0_LANE1, addr, data, committed);
+	} else {
+		__intel_cx0_write(i915, port, lane, addr, data, committed);
+	}
+}
+
+static void __intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
+			    int lane, u16 addr, u8 clear, u8 set, bool committed)
 {
 	u8 old, val;
 
@@ -177,3 +192,494 @@ __maybe_unused static void intel_cx0_rmw(struct drm_i915_private *i915, enum por
 	if (val != old)
 		intel_cx0_write(i915, port, lane, addr, val, committed);
 }
+
+static void intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
+			  int lane, u16 addr, u8 clear, u8 set, bool committed)
+{
+	if (lane == INTEL_CX0_BOTH_LANES) {
+		__intel_cx0_rmw(i915, port, INTEL_CX0_LANE0, addr, clear, set, committed);
+		__intel_cx0_rmw(i915, port, INTEL_CX0_LANE1, addr, clear, set, committed);
+	} else {
+		__intel_cx0_rmw(i915, port, lane, addr, clear, set, committed);
+	}
+}
+
+/*
+ * Basic DP link rates with 38.4 MHz reference clock.
+ * Note: The tables below are with SSC. In non-ssc
+ * registers 0xC04 to 0xC08(pll[4] to pll[8]) will be
+ * programmed 0.
+ */
+
+static const struct intel_c10mpllb_state mtl_c10_dp_rbr = {
+	.clock = 162000,
+	.pll[0] = 0xB4,
+	.pll[1] = 0,
+	.pll[2] = 0x30,
+	.pll[3] = 0x1,
+	.pll[4] = 0x26,
+	.pll[5] = 0x0C,
+	.pll[6] = 0x98,
+	.pll[7] = 0x46,
+	.pll[8] = 0x1,
+	.pll[9] = 0x1,
+	.pll[10] = 0,
+	.pll[11] = 0,
+	.pll[12] = 0xC0,
+	.pll[13] = 0,
+	.pll[14] = 0,
+	.pll[15] = 0x2,
+	.pll[16] = 0x84,
+	.pll[17] = 0x4F,
+	.pll[18] = 0xE5,
+	.pll[19] = 0x23,
+};
+
+static const struct intel_c10mpllb_state mtl_c10_edp_r216 = {
+	.clock = 216000,
+	.pll[0] = 0x4,
+	.pll[1] = 0,
+	.pll[2] = 0xA2,
+	.pll[3] = 0x1,
+	.pll[4] = 0x33,
+	.pll[5] = 0x10,
+	.pll[6] = 0x75,
+	.pll[7] = 0xB3,
+	.pll[8] = 0x1,
+	.pll[9] = 0x1,
+	.pll[10] = 0,
+	.pll[11] = 0,
+	.pll[12] = 0,
+	.pll[13] = 0,
+	.pll[14] = 0,
+	.pll[15] = 0x2,
+	.pll[16] = 0x85,
+	.pll[17] = 0x0F,
+	.pll[18] = 0xE6,
+	.pll[19] = 0x23,
+};
+
+static const struct intel_c10mpllb_state mtl_c10_edp_r243 = {
+	.clock = 243000,
+	.pll[0] = 0x34,
+	.pll[1] = 0,
+	.pll[2] = 0xDA,
+	.pll[3] = 0x1,
+	.pll[4] = 0x39,
+	.pll[5] = 0x12,
+	.pll[6] = 0xE3,
+	.pll[7] = 0xE9,
+	.pll[8] = 0x1,
+	.pll[9] = 0x1,
+	.pll[10] = 0,
+	.pll[11] = 0,
+	.pll[12] = 0x20,
+	.pll[13] = 0,
+	.pll[14] = 0,
+	.pll[15] = 0x2,
+	.pll[16] = 0x85,
+	.pll[17] = 0x8F,
+	.pll[18] = 0xE6,
+	.pll[19] = 0x23,
+};
+
+static const struct intel_c10mpllb_state mtl_c10_dp_hbr1 = {
+	.clock = 270000,
+	.pll[0] = 0xF4,
+	.pll[1] = 0,
+	.pll[2] = 0xF8,
+	.pll[3] = 0x0,
+	.pll[4] = 0x20,
+	.pll[5] = 0x0A,
+	.pll[6] = 0x29,
+	.pll[7] = 0x10,
+	.pll[8] = 0x1,   /* Verify */
+	.pll[9] = 0x1,
+	.pll[10] = 0,
+	.pll[11] = 0,
+	.pll[12] = 0xA0,
+	.pll[13] = 0,
+	.pll[14] = 0,
+	.pll[15] = 0x1,
+	.pll[16] = 0x84,
+	.pll[17] = 0x4F,
+	.pll[18] = 0xE5,
+	.pll[19] = 0x23,
+};
+
+static const struct intel_c10mpllb_state mtl_c10_edp_r324 = {
+	.clock = 324000,
+	.pll[0] = 0xB4,
+	.pll[1] = 0,
+	.pll[2] = 0x30,
+	.pll[3] = 0x1,
+	.pll[4] = 0x26,
+	.pll[5] = 0x0C,
+	.pll[6] = 0x98,
+	.pll[7] = 0x46,
+	.pll[8] = 0x1,
+	.pll[9] = 0x1,
+	.pll[10] = 0,
+	.pll[11] = 0,
+	.pll[12] = 0xC0,
+	.pll[13] = 0,
+	.pll[14] = 0,
+	.pll[15] = 0x1,
+	.pll[16] = 0x85,
+	.pll[17] = 0x4F,
+	.pll[18] = 0xE6,
+	.pll[19] = 0x23,
+};
+
+static const struct intel_c10mpllb_state mtl_c10_edp_r432 = {
+	.clock = 432000,
+	.pll[0] = 0x4,
+	.pll[1] = 0,
+	.pll[2] = 0xA2,
+	.pll[3] = 0x1,
+	.pll[4] = 0x33,
+	.pll[5] = 0x10,
+	.pll[6] = 0x75,
+	.pll[7] = 0xB3,
+	.pll[8] = 0x1,
+	.pll[9] = 0x1,
+	.pll[10] = 0,
+	.pll[11] = 0,
+	.pll[12] = 0,
+	.pll[13] = 0,
+	.pll[14] = 0,
+	.pll[15] = 0x1,
+	.pll[16] = 0x85,
+	.pll[17] = 0x0F,
+	.pll[18] = 0xE6,
+	.pll[19] = 0x23,
+};
+
+static const struct intel_c10mpllb_state mtl_c10_dp_hbr2 = {
+	.clock = 540000,
+	.pll[0] = 0xF4,
+	.pll[1] = 0,
+	.pll[2] = 0xF8,
+	.pll[3] = 0,
+	.pll[4] = 0x20,
+	.pll[5] = 0x0A,
+	.pll[6] = 0x29,
+	.pll[7] = 0x10,
+	.pll[8] = 0x1,
+	.pll[9] = 0x1,
+	.pll[10] = 0,
+	.pll[11] = 0,
+	.pll[12] = 0xA0,
+	.pll[13] = 0,
+	.pll[14] = 0,
+	.pll[15] = 0,
+	.pll[16] = 0x84,
+	.pll[17] = 0x4F,
+	.pll[18] = 0xE5,
+	.pll[19] = 0x23,
+};
+
+static const struct intel_c10mpllb_state mtl_c10_edp_r675 = {
+	.clock = 675000,
+	.pll[0] = 0xB4,
+	.pll[1] = 0,
+	.pll[2] = 0x3E,
+	.pll[3] = 0x1,
+	.pll[4] = 0xA8,
+	.pll[5] = 0x0C,
+	.pll[6] = 0x33,
+	.pll[7] = 0x54,
+	.pll[8] = 0x1,
+	.pll[9] = 0x1,
+	.pll[10] = 0,
+	.pll[11] = 0,
+	.pll[12] = 0xC8,
+	.pll[13] = 0,
+	.pll[14] = 0,
+	.pll[15] = 0,
+	.pll[16] = 0x85,
+	.pll[17] = 0x8F,
+	.pll[18] = 0xE6,
+	.pll[19] = 0x23,
+};
+
+static const struct intel_c10mpllb_state mtl_c10_dp_hbr3 = {
+	.clock = 810000,
+	.pll[0] = 0x34,
+	.pll[1] = 0,
+	.pll[2] = 0x84,
+	.pll[3] = 0x1,
+	.pll[4] = 0x30,
+	.pll[5] = 0x0F,
+	.pll[6] = 0x3D,
+	.pll[7] = 0x98,
+	.pll[8] = 0x1,
+	.pll[9] = 0x1,
+	.pll[10] = 0,
+	.pll[11] = 0,
+	.pll[12] = 0xF0,
+	.pll[13] = 0,
+	.pll[14] = 0,
+	.pll[15] = 0,
+	.pll[16] = 0x84,
+	.pll[17] = 0x0F,
+	.pll[18] = 0xE5,
+	.pll[19] = 0x23,
+};
+
+static const struct intel_c10mpllb_state * const mtl_c10_dp_tables[] = {
+	&mtl_c10_dp_rbr,
+	&mtl_c10_dp_hbr1,
+	&mtl_c10_dp_hbr2,
+	&mtl_c10_dp_hbr3,
+	NULL,
+};
+
+static const struct intel_c10mpllb_state * const mtl_c10_edp_tables[] = {
+	&mtl_c10_dp_rbr,
+	&mtl_c10_edp_r216,
+	&mtl_c10_edp_r243,
+	&mtl_c10_dp_hbr1,
+	&mtl_c10_edp_r324,
+	&mtl_c10_edp_r432,
+	&mtl_c10_dp_hbr2,
+	&mtl_c10_edp_r675,
+	&mtl_c10_dp_hbr3,
+	NULL,
+};
+
+static const struct intel_c10mpllb_state * const *
+intel_c10_mpllb_tables_get(struct intel_crtc_state *crtc_state,
+			   struct intel_encoder *encoder)
+{
+	if (intel_crtc_has_dp_encoder(crtc_state)) {
+		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+			return mtl_c10_edp_tables;
+		else
+			return mtl_c10_dp_tables;
+	}
+
+	/* TODO: Add HDMI Support */
+	MISSING_CASE(encoder->type);
+	return NULL;
+}
+
+static int intel_c10mpllb_calc_state(struct intel_crtc_state *crtc_state,
+				     struct intel_encoder *encoder)
+{
+	const struct intel_c10mpllb_state * const *tables;
+	int i;
+
+	tables = intel_c10_mpllb_tables_get(crtc_state, encoder);
+	if (!tables)
+		return -EINVAL;
+
+	for (i = 0; tables[i]; i++) {
+		if (crtc_state->port_clock <= tables[i]->clock) {
+			crtc_state->c10mpllb_state = *tables[i];
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+int intel_cx0mpllb_calc_state(struct intel_crtc_state *crtc_state,
+			      struct intel_encoder *encoder)
+{
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+	drm_WARN_ON(&i915->drm, !intel_is_c10phy(i915, phy));
+
+	return intel_c10mpllb_calc_state(crtc_state, encoder);
+}
+
+void intel_c10mpllb_readout_hw_state(struct intel_encoder *encoder,
+				     struct intel_c10mpllb_state *pll_state)
+{
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+	enum intel_cx0_lanes lane = lane_reversal ? INTEL_CX0_LANE1 :
+				    INTEL_CX0_LANE0;
+	enum phy phy = intel_port_to_phy(i915, encoder->port);
+	int i;
+	u8 cmn, tx0;
+
+	/*
+	 * According to C10 VDR Register programming Sequence we need
+	 * to do this to read PHY internal registers from MsgBus.
+	 */
+	intel_cx0_rmw(i915, encoder->port, lane, PHY_C10_VDR_CONTROL(1), 0,
+		      C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);
+
+	for (i = 0; i < 20; i++)
+		pll_state->pll[i] = intel_cx0_read(i915, encoder->port, lane,
+						   PHY_C10_VDR_PLL(i));
+
+	cmn = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_CMN(0));
+	tx0 = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_TX(0));
+
+	if (tx0 != C10_TX0_VAL || cmn != C10_CMN0_DP_VAL)
+		drm_warn(&i915->drm, "Unexpected tx: %x or cmn: %x for phy: %c.\n",
+			 tx0, cmn, phy_name(phy));
+}
+
+__maybe_unused static void intel_c10_pll_program(struct drm_i915_private *i915,
+						 const struct intel_crtc_state *crtc_state,
+						 struct intel_encoder *encoder)
+{
+	const struct intel_c10mpllb_state *pll_state = &crtc_state->c10mpllb_state;
+	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+	enum intel_cx0_lanes master_lane = lane_reversal ? INTEL_CX0_LANE1 :
+				 INTEL_CX0_LANE0;
+	enum intel_cx0_lanes follower_lane = lane_reversal ? INTEL_CX0_LANE0 :
+				 INTEL_CX0_LANE1;
+
+	int i;
+	struct intel_dp *intel_dp;
+	bool use_ssc = false;
+	u8 cmn0 = 0;
+
+	if (intel_crtc_has_dp_encoder(crtc_state)) {
+		intel_dp = enc_to_intel_dp(encoder);
+		use_ssc = (intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
+			  DP_MAX_DOWNSPREAD_0_5);
+
+		if (intel_dp_is_edp(intel_dp) && !intel_panel_use_ssc(i915))
+			use_ssc = false;
+
+		cmn0 = C10_CMN0_DP_VAL;
+	}
+
+	intel_cx0_write(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
+		        C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);
+	/* Custom width needs to be programmed to 0 for both the phy lanes */
+	intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES,
+		      PHY_C10_VDR_CUSTOM_WIDTH, 0x3, 0, MB_WRITE_COMMITTED);
+	intel_cx0_rmw(i915, encoder->port, follower_lane, PHY_C10_VDR_CONTROL(1),
+		      C10_VDR_CTRL_MASTER_LANE, C10_VDR_CTRL_UPDATE_CFG,
+		      MB_WRITE_COMMITTED);
+
+	/* Program the pll values only for the master lane */
+	for (i = 0; i < 20; i++)
+		/* If not using ssc pll[4] through pll[8] must be 0*/
+		intel_cx0_write(i915, encoder->port, master_lane, PHY_C10_VDR_PLL(i),
+				(!use_ssc && (i > 3 && i < 9)) ? 0 : pll_state->pll[i],
+				(i % 4) ? MB_WRITE_UNCOMMITTED : MB_WRITE_COMMITTED);
+
+	intel_cx0_write(i915, encoder->port, master_lane, PHY_C10_VDR_CMN(0), cmn0, MB_WRITE_COMMITTED);
+	intel_cx0_write(i915, encoder->port, master_lane, PHY_C10_VDR_TX(0), C10_TX0_VAL, MB_WRITE_COMMITTED);
+	intel_cx0_rmw(i915, encoder->port, master_lane, PHY_C10_VDR_CONTROL(1),
+		      C10_VDR_CTRL_MSGBUS_ACCESS, C10_VDR_CTRL_MASTER_LANE |
+		      C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED);
+}
+
+void intel_c10mpllb_dump_hw_state(struct drm_i915_private *dev_priv,
+				  const struct intel_c10mpllb_state *hw_state)
+{
+	bool fracen;
+	int i;
+	unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1;
+	unsigned int multiplier, tx_clk_div;
+
+	fracen = hw_state->pll[0] & C10_PLL0_FRACEN;
+	drm_dbg_kms(&dev_priv->drm, "c10pll_hw_state: fracen: %s, ",
+		    str_yes_no(fracen));
+
+	if (fracen) {
+		frac_quot = hw_state->pll[12] << 8 | hw_state->pll[11];
+		frac_rem =  hw_state->pll[14] << 8 | hw_state->pll[13];
+		frac_den =  hw_state->pll[10] << 8 | hw_state->pll[9];
+		drm_dbg_kms(&dev_priv->drm, "quot: %u, rem: %u, den: %u,\n",
+			    frac_quot, frac_rem, frac_den);
+	}
+
+	multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, hw_state->pll[3]) << 8 |
+		      hw_state->pll[2]) / 2 + 16;
+	tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, hw_state->pll[15]);
+	drm_dbg_kms(&dev_priv->drm,
+		    "multiplier: %u, tx_clk_div: %u.\n", multiplier, tx_clk_div);
+
+	drm_dbg_kms(&dev_priv->drm, "c10pll_rawhw_state:");
+
+	for (i = 0; i < 20; i = i + 4)
+		drm_dbg_kms(&dev_priv->drm, "pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x\n",
+			    i, hw_state->pll[i], i + 1, hw_state->pll[i+ 1],
+			    i + 2, hw_state->pll[i + 2], i + 3, hw_state->pll[i + 3]);
+}
+
+int intel_c10mpllb_calc_port_clock(struct intel_encoder *encoder,
+				   const struct intel_c10mpllb_state *pll_state)
+{
+	unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1;
+	unsigned int multiplier, tx_clk_div, refclk = 38400;
+
+	if (pll_state->pll[0] & C10_PLL0_FRACEN) {
+		frac_quot = pll_state->pll[12] << 8 | pll_state->pll[11];
+		frac_rem =  pll_state->pll[14] << 8 | pll_state->pll[13];
+		frac_den =  pll_state->pll[10] << 8 | pll_state->pll[9];
+	}
+
+	multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, pll_state->pll[3]) << 8 |
+		      pll_state->pll[2]) / 2 + 16;
+
+	tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, pll_state->pll[15]);
+
+	return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, (multiplier << 16) + frac_quot) +
+				     DIV_ROUND_CLOSEST(refclk * frac_rem, frac_den),
+				     10 << (tx_clk_div + 16));
+}
+
+void intel_c10mpllb_state_verify(struct intel_atomic_state *state,
+				 struct intel_crtc_state *new_crtc_state)
+{
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
+	struct intel_c10mpllb_state mpllb_hw_state = { 0 };
+	struct intel_c10mpllb_state *mpllb_sw_state = &new_crtc_state->c10mpllb_state;
+	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+	struct intel_encoder *encoder;
+	struct intel_dp *intel_dp;
+	enum phy phy;
+	int i;
+	bool use_ssc = false;
+
+	if (DISPLAY_VER(i915) < 14)
+		return;
+
+	if (!new_crtc_state->hw.active)
+		return;
+
+	encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
+	phy = intel_port_to_phy(i915, encoder->port);
+
+	if (intel_crtc_has_dp_encoder(new_crtc_state)) {
+		intel_dp = enc_to_intel_dp(encoder);
+		use_ssc = (intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
+			  DP_MAX_DOWNSPREAD_0_5);
+
+		if (intel_dp_is_edp(intel_dp) && !intel_panel_use_ssc(i915))
+			use_ssc = false;
+	}
+
+	if (!intel_is_c10phy(i915, phy))
+		return;
+
+	intel_c10mpllb_readout_hw_state(encoder, &mpllb_hw_state);
+
+	for (i = 0; i < 20; i++) {
+		u8 expected;
+
+		if (!use_ssc && i > 3 && i < 9)
+			expected = 0;
+		else
+			expected = mpllb_sw_state->pll[i];
+
+		I915_STATE_WARN(mpllb_hw_state.pll[i] != expected,
+				"[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)",
+				crtc->base.base.id, crtc->base.name,
+				i, expected, mpllb_hw_state.pll[i]);
+	}
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
new file mode 100644
index 000000000000..cf1f300b6a7b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_CX0_PHY_H__
+#define __INTEL_CX0_PHY_H__
+
+#include <linux/types.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+
+/**
+ * REG_BIT8() - Prepare a u8 bit value
+ * @__n: 0-based bit number
+ *
+ * Local wrapper for BIT() to force u8, with compile time checks.
+ *
+ * @return: Value with bit @__n set.
+ */
+#define REG_BIT8(__n)							\
+	((u8)(BIT(__n) +						\
+	       BUILD_BUG_ON_ZERO(__is_constexpr(__n) &&		\
+				 ((__n) < 0 || (__n) > 7))))
+
+/**
+ * REG_GENMASK8() - Prepare a continuous u8 bitmask
+ * @__high: 0-based high bit
+ * @__low: 0-based low bit
+ *
+ * Local wrapper for GENMASK() to force u8, with compile time checks.
+ *
+ * @return: Continuous bitmask from @__high to @__low, inclusive.
+ */
+#define REG_GENMASK8(__high, __low)					\
+	((u8)(GENMASK(__high, __low) +					\
+	       BUILD_BUG_ON_ZERO(__is_constexpr(__high) &&	\
+				 __is_constexpr(__low) &&		\
+				 ((__low) < 0 || (__high) > 7 || (__low) > (__high)))))
+
+/*
+ * Local integer constant expression version of is_power_of_2().
+ */
+#define IS_POWER_OF_2(__x)		((__x) && (((__x) & ((__x) - 1)) == 0))
+
+/**
+ * REG_FIELD_PREP8() - Prepare a u8 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to put in the field
+ *
+ * Local copy of FIELD_PREP8() to generate an integer constant expression, force
+ * u8 and for consistency with REG_FIELD_GET8(), REG_BIT8() and REG_GENMASK8().
+ *
+ * @return: @__val masked and shifted into the field defined by @__mask.
+ */
+#define REG_FIELD_PREP8(__mask, __val)						\
+	((u8)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) +	\
+	       BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) +		\
+	       BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U8_MAX) +		\
+	       BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
+	       BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
+
+/**
+ * REG_FIELD_GET8() - Extract a u8 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to extract the bitfield value from
+ *
+ * Local wrapper for FIELD_GET() to force u8 and for consistency with
+ * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK().
+ *
+ * @return: Masked and shifted value of the field defined by @__mask in @__val.
+ */
+#define REG_FIELD_GET8(__mask, __val)	((u8)FIELD_GET(__mask, __val))
+
+struct drm_i915_private;
+struct intel_encoder;
+struct intel_crtc_state;
+enum phy;
+
+enum intel_cx0_lanes {
+	INTEL_CX0_LANE0,
+	INTEL_CX0_LANE1,
+	INTEL_CX0_BOTH_LANES,
+};
+
+#define MB_WRITE_COMMITTED		1
+#define MB_WRITE_UNCOMMITTED		0
+
+/* C10 Vendor Registers */
+#define PHY_C10_VDR_PLL(idx)		(0xC00 + (idx))
+#define  C10_PLL0_FRACEN		REG_BIT8(4)
+#define  C10_PLL3_MULTIPLIERH_MASK	REG_GENMASK8(3, 0)
+#define  C10_PLL15_TXCLKDIV_MASK	REG_GENMASK8(2, 0)
+#define PHY_C10_VDR_CMN(idx)		(0xC20 + (idx))
+#define  C10_CMN0_DP_VAL		0x21
+#define  C10_CMN3_TXVBOOST_MASK		REG_GENMASK8(7, 5)
+#define  C10_CMN3_TXVBOOST(val)		REG_FIELD_PREP8(C10_CMN3_TXVBOOST_MASK, val)
+#define PHY_C10_VDR_TX(idx)		(0xC30 + (idx))
+#define  C10_TX0_VAL			0x10
+#define PHY_C10_VDR_CONTROL(idx)	(0xC70 + (idx) - 1)
+#define  C10_VDR_CTRL_MSGBUS_ACCESS	REG_BIT8(2)
+#define  C10_VDR_CTRL_MASTER_LANE	REG_BIT8(1)
+#define  C10_VDR_CTRL_UPDATE_CFG	REG_BIT8(0)
+#define PHY_C10_VDR_CUSTOM_WIDTH	0xD02
+
+static inline bool intel_is_c10phy(struct drm_i915_private *dev_priv, enum phy phy)
+{
+	if (!IS_METEORLAKE(dev_priv))
+		return false;
+	else
+		return (phy < PHY_C);
+}
+
+void intel_c10mpllb_readout_hw_state(struct intel_encoder *encoder,
+				     struct intel_c10mpllb_state *pll_state);
+int intel_cx0mpllb_calc_state(struct intel_crtc_state *crtc_state,
+			      struct intel_encoder *encoder);
+void intel_c10mpllb_dump_hw_state(struct drm_i915_private *dev_priv,
+				  const struct intel_c10mpllb_state *hw_state);
+int intel_c10mpllb_calc_port_clock(struct intel_encoder *encoder,
+				   const struct intel_c10mpllb_state *pll_state);
+void intel_c10mpllb_state_verify(struct intel_atomic_state *state,
+				 struct intel_crtc_state *new_crtc_state);
+
+#endif /* __INTEL_CX0_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 971356237eca..aaa8846c3b18 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -38,6 +38,7 @@
 #include "intel_combo_phy_regs.h"
 #include "intel_connector.h"
 #include "intel_crtc.h"
+#include "intel_cx0_phy.h"
 #include "intel_ddi.h"
 #include "intel_ddi_buf_trans.h"
 #include "intel_de.h"
@@ -3487,6 +3488,21 @@ void intel_ddi_get_clock(struct intel_encoder *encoder,
 						     &crtc_state->dpll_hw_state);
 }
 
+static void mtl_ddi_get_config(struct intel_encoder *encoder,
+			       struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+	drm_WARN_ON(&i915->drm, !intel_is_c10phy(i915, phy));
+
+	intel_c10mpllb_readout_hw_state(encoder, &crtc_state->c10mpllb_state);
+	intel_c10mpllb_dump_hw_state(i915, &crtc_state->c10mpllb_state);
+	crtc_state->port_clock = intel_c10mpllb_calc_port_clock(encoder, &crtc_state->c10mpllb_state);
+
+	intel_ddi_get_config(encoder, crtc_state);
+}
+
 static void dg2_ddi_get_config(struct intel_encoder *encoder,
 				struct intel_crtc_state *crtc_state)
 {
@@ -4367,7 +4383,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 	encoder->cloneable = 0;
 	encoder->pipe_mask = ~0;
 
-	if (IS_DG2(dev_priv)) {
+	if (DISPLAY_VER(dev_priv) >= 14) {
+		encoder->get_config = mtl_ddi_get_config;
+	} else if (IS_DG2(dev_priv)) {
 		encoder->enable_clock = intel_mpllb_enable;
 		encoder->disable_clock = intel_mpllb_disable;
 		encoder->get_config = dg2_ddi_get_config;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index eb8eaeb19881..5f9272f6e186 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -47,6 +47,7 @@
 
 #include "display/intel_audio.h"
 #include "display/intel_crt.h"
+#include "display/intel_cx0_phy.h"
 #include "display/intel_ddi.h"
 #include "display/intel_display_debugfs.h"
 #include "display/intel_display_power.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 1e608b9e5055..451c90b6d08d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -1626,7 +1626,8 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
 		return;
 
 	/* 2. Initialize all combo phys */
-	intel_combo_phy_init(dev_priv);
+	if (DISPLAY_VER(dev_priv) < 14)
+		intel_combo_phy_init(dev_priv);
 
 	/*
 	 * 3. Enable Power Well 1 (PG1).
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index df7ee4969ef1..84e7f9d44ff9 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -980,7 +980,7 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
 		bxt_verify_ddi_phy_power_wells(dev_priv);
 
-	if (DISPLAY_VER(dev_priv) >= 11)
+	if (DISPLAY_VER(dev_priv) >= 11 && DISPLAY_VER(dev_priv) < 14)
 		/*
 		 * DMC retains HW context only for port A, the other combo
 		 * PHY's HW context for port B is lost after DC transitions,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index e2b853e9e51d..be6ff6cdfb0b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -969,6 +969,11 @@ struct intel_mpllb_state {
 	u32 mpllb_sscstep;
 };
 
+struct intel_c10mpllb_state {
+	u32 clock; /* in KHz */
+	u8 pll[20];
+};
+
 struct intel_crtc_state {
 	/*
 	 * uapi (drm) state. This is the software state shown to userspace.
@@ -1108,6 +1113,7 @@ struct intel_crtc_state {
 	union {
 		struct intel_dpll_hw_state dpll_hw_state;
 		struct intel_mpllb_state mpllb_state;
+		struct intel_c10mpllb_state c10mpllb_state;
 	};
 
 	/*
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index b15ba78d64d6..73f541050913 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -7,6 +7,7 @@
 #include <linux/string_helpers.h>
 
 #include "intel_crtc.h"
+#include "intel_cx0_phy.h"
 #include "intel_de.h"
 #include "intel_display.h"
 #include "intel_display_types.h"
@@ -993,6 +994,17 @@ static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
 	return 0;
 }
 
+static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
+				  struct intel_crtc *crtc)
+{
+	struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	struct intel_encoder *encoder =
+		intel_get_crtc_new_encoder(state, crtc_state);
+
+	return intel_cx0mpllb_calc_state(crtc_state, encoder);
+}
+
 static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
 {
 	return dpll->m < factor * dpll->n;
@@ -1421,6 +1433,10 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
 	return 0;
 }
 
+static const struct intel_dpll_funcs mtl_dpll_funcs = {
+	.crtc_compute_clock = mtl_crtc_compute_clock,
+};
+
 static const struct intel_dpll_funcs dg2_dpll_funcs = {
 	.crtc_compute_clock = dg2_crtc_compute_clock,
 };
@@ -1515,7 +1531,9 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
 void
 intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
 {
-	if (IS_DG2(dev_priv))
+	if (DISPLAY_VER(dev_priv) >= 14)
+		dev_priv->display.funcs.dpll = &mtl_dpll_funcs;
+	else if (IS_DG2(dev_priv))
 		dev_priv->display.funcs.dpll = &dg2_dpll_funcs;
 	else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
 		dev_priv->display.funcs.dpll = &hsw_dpll_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index b63600d8ebeb..a3d015f44eed 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -4173,7 +4173,7 @@ void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
 
 	mutex_init(&dev_priv->display.dpll.lock);
 
-	if (IS_DG2(dev_priv))
+	if (DISPLAY_VER(dev_priv) >= 14 || IS_DG2(dev_priv))
 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
 		dpll_mgr = NULL;
 	else if (IS_ALDERLAKE_P(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
index 0fdcf2e6d57f..dfd9a0108b0f 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -11,6 +11,7 @@
 #include "intel_atomic.h"
 #include "intel_crtc.h"
 #include "intel_crtc_state_dump.h"
+#include "intel_cx0_phy.h"
 #include "intel_display.h"
 #include "intel_display_types.h"
 #include "intel_fdi.h"
@@ -235,6 +236,7 @@ void intel_modeset_verify_crtc(struct intel_crtc *crtc,
 	verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
 	intel_shared_dpll_state_verify(crtc, old_crtc_state, new_crtc_state);
 	intel_mpllb_state_verify(state, new_crtc_state);
+	intel_c10mpllb_state_verify(state, new_crtc_state);
 }
 
 void intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [Intel-gfx] [PATCH 3/5] drm/i915/mtl: Add support for C10 phy programming
  2022-09-29 13:17 [Intel-gfx] [PATCH 0/5] drm/i915/mtl: Add C10 phy support Mika Kahola
  2022-09-29 13:17 ` [Intel-gfx] [PATCH 1/5] drm/i915/mtl: Add Support for C10, C20 PHY Message Bus Mika Kahola
  2022-09-29 13:17 ` [Intel-gfx] [PATCH 2/5] drm/i915/mtl: Add PLL programming support for C10 phy Mika Kahola
@ 2022-09-29 13:17 ` Mika Kahola
  2022-09-30  9:32   ` Jani Nikula
  2022-09-29 13:17 ` [Intel-gfx] [PATCH 4/5] drm/i915/mtl: Add C10 phy programming for HDMI Mika Kahola
                   ` (4 subsequent siblings)
  7 siblings, 1 reply; 17+ messages in thread
From: Mika Kahola @ 2022-09-29 13:17 UTC (permalink / raw)
  To: intel-gfx

From: Radhakrishna Sripada <radhakrishna.sripada@intel.com>

Add sequences for C10 phy enable/disable phy lane reset,
powerdown change sequence and phy lane programming.

Bspec: 64539, 67636, 65451, 65450, 64568

Cc: Imre Deak <imre.deak@intel.com>
Cc: Mika Kahola <mika.kahola@intel.com>
Cc: Uma Shankar <uma.shankar@intel.com>
Signed-off-by: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
Signed-off-by: Mika Kahola <mika.kahola@intel.com> (v9)
---
 drivers/gpu/drm/i915/Makefile                |   1 +
 drivers/gpu/drm/i915/display/intel_cx0_phy.c | 352 ++++++++++++++++++-
 drivers/gpu/drm/i915/display/intel_cx0_phy.h |  17 +
 drivers/gpu/drm/i915/display/intel_ddi.c     |   2 +
 drivers/gpu/drm/i915/display/intel_dp.c      |  15 +-
 drivers/gpu/drm/i915/display/intel_dpll.c    |   2 +
 drivers/gpu/drm/i915/i915_reg.h              | 141 ++++++++
 7 files changed, 526 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index a26edcdadc21..994f87a12782 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -279,6 +279,7 @@ i915-y += \
 	display/icl_dsi.o \
 	display/intel_backlight.o \
 	display/intel_crt.o \
+	display/intel_cx0_phy.o \
 	display/intel_ddi.o \
 	display/intel_ddi_buf_trans.o \
 	display/intel_display_trace.o \
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 2f401116d1d0..6ba11cd7cd75 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -526,9 +526,9 @@ void intel_c10mpllb_readout_hw_state(struct intel_encoder *encoder,
 			 tx0, cmn, phy_name(phy));
 }
 
-__maybe_unused static void intel_c10_pll_program(struct drm_i915_private *i915,
-						 const struct intel_crtc_state *crtc_state,
-						 struct intel_encoder *encoder)
+static void intel_c10_pll_program(struct drm_i915_private *i915,
+				  const struct intel_crtc_state *crtc_state,
+				  struct intel_encoder *encoder)
 {
 	const struct intel_c10mpllb_state *pll_state = &crtc_state->c10mpllb_state;
 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
@@ -633,6 +633,352 @@ int intel_c10mpllb_calc_port_clock(struct intel_encoder *encoder,
 				     10 << (tx_clk_div + 16));
 }
 
+#define PHY_LANES_VAL_ARG(FIELD, lanes, arg)	({u32 __val; switch(lanes) {\
+						  case INTEL_CX0_BOTH_LANES:	\
+							__val = ((XELPDP_LANE0_##FIELD(arg)) |\
+							        (XELPDP_LANE1_##FIELD(arg))); \
+							break;				\
+						  case INTEL_CX0_LANE0:         \
+							__val = (XELPDP_LANE0_##FIELD(arg));\
+							break;				\
+						  case INTEL_CX0_LANE1:         \
+							__val = (XELPDP_LANE1_##FIELD(arg));\
+							break;  \
+						 }; __val; })
+
+#define PHY_LANES_VAL(FIELD, lanes)	({u32 __val; switch(lanes) {\
+						  case INTEL_CX0_BOTH_LANES:	\
+							__val = (XELPDP_LANE0_##FIELD | \
+							        XELPDP_LANE1_##FIELD); \
+							break;				\
+						  case INTEL_CX0_LANE0:         \
+							__val = (XELPDP_LANE0_##FIELD);	     \
+							break;				\
+						  case INTEL_CX0_LANE1:         \
+							__val = (XELPDP_LANE1_##FIELD);\
+							break;  \
+						 }; __val; })
+
+static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
+					 const struct intel_crtc_state *crtc_state,
+					 bool lane_reversal)
+{
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	struct intel_dp *intel_dp;
+	bool ssc_enabled;
+	u32 val = 0;
+
+	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(encoder->port), XELPDP_PORT_REVERSAL,
+		     lane_reversal ? XELPDP_PORT_REVERSAL : 0);
+
+	if (lane_reversal)
+		val |= XELPDP_LANE1_PHY_CLOCK_SELECT;
+
+	val |= XELPDP_FORWARD_CLOCK_UNGATE;
+	val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
+
+	if (intel_crtc_has_dp_encoder(crtc_state)) {
+		intel_dp = enc_to_intel_dp(encoder);
+		ssc_enabled = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
+			      DP_MAX_DOWNSPREAD_0_5;
+
+		/* TODO: DP2.0 10G and 20G rates enable MPLLA*/
+		val |= ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
+	}
+	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+		     XELPDP_LANE1_PHY_CLOCK_SELECT |
+		     XELPDP_FORWARD_CLOCK_UNGATE |
+		     XELPDP_DDI_CLOCK_SELECT_MASK |
+		     XELPDP_SSC_ENABLE_PLLB, val);
+}
+
+static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
+						enum port port,
+						enum intel_cx0_lanes lane, u8 state)
+{
+	enum phy phy = intel_port_to_phy(i915, port);
+
+	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
+		     PHY_LANES_VAL(POWERDOWN_NEW_STATE_MASK, lane),
+		     PHY_LANES_VAL_ARG(POWERDOWN_NEW_STATE, lane, state));
+	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
+		     PHY_LANES_VAL(POWERDOWN_UPDATE, lane),
+		     PHY_LANES_VAL(POWERDOWN_UPDATE, lane));
+
+	/* Update Timeout Value */
+	if (__intel_wait_for_register(&i915->uncore, XELPDP_PORT_BUF_CTL2(port),
+				      PHY_LANES_VAL(POWERDOWN_UPDATE, lane), 0,
+				      XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
+		drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
+			 phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
+}
+
+static void intel_cx0_setup_powerdown(struct drm_i915_private *i915, enum port port)
+{
+	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
+		     XELPDP_POWER_STATE_READY_MASK,
+		     XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY));
+	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL3(port),
+		     XELPDP_POWER_STATE_ACTIVE_MASK |
+		     XELPDP_PLL_LANE_STAGGERING_DELAY_MASK,
+		     XELPDP_POWER_STATE_ACTIVE(CX0_P0_STATE_ACTIVE) |
+		     XELPDP_PLL_LANE_STAGGERING_DELAY(0));
+}
+
+/* FIXME: Some Type-C cases need not reset both the lanes. Handle those cases. */
+static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915, enum port port,
+				     bool lane_reversal)
+{
+	enum phy phy = intel_port_to_phy(i915, port);
+	enum intel_cx0_lanes lane = lane_reversal ? INTEL_CX0_LANE1 :
+				    INTEL_CX0_LANE0;
+
+	if (__intel_wait_for_register(&i915->uncore, XELPDP_PORT_BUF_CTL1(port),
+				      XELPDP_PORT_BUF_SOC_PHY_READY,
+				      XELPDP_PORT_BUF_SOC_PHY_READY,
+				      XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL))
+		drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n",
+			 phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
+
+	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
+		     PHY_LANES_VAL(PIPE_RESET, INTEL_CX0_BOTH_LANES),
+		     PHY_LANES_VAL(PIPE_RESET, INTEL_CX0_BOTH_LANES));
+
+	if (__intel_wait_for_register(&i915->uncore, XELPDP_PORT_BUF_CTL2(port),
+				      PHY_LANES_VAL(PHY_CURRENT_STATUS, INTEL_CX0_BOTH_LANES),
+				      PHY_LANES_VAL(PHY_CURRENT_STATUS, INTEL_CX0_BOTH_LANES),
+				      XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
+		drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
+			 phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
+
+	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(port),
+		     PHY_LANES_VAL(PCLK_REFCLK_REQUEST, lane),
+		     PHY_LANES_VAL(PCLK_REFCLK_REQUEST, lane));
+
+	if (__intel_wait_for_register(&i915->uncore, XELPDP_PORT_CLOCK_CTL(port),
+				      PHY_LANES_VAL(PCLK_REFCLK_ACK, lane),
+				      PHY_LANES_VAL(PCLK_REFCLK_ACK, lane),
+				      XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
+		drm_warn(&i915->drm, "PHY %c failed to request refclk after %dus.\n",
+			 phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US);
+
+	intel_cx0_powerdown_change_sequence(i915, port, INTEL_CX0_BOTH_LANES,
+					    CX0_P2_STATE_RESET);
+	intel_cx0_setup_powerdown(i915, port);
+
+	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
+		     PHY_LANES_VAL(PIPE_RESET, INTEL_CX0_BOTH_LANES), 0);
+
+	if (intel_de_wait_for_clear(i915, XELPDP_PORT_BUF_CTL2(port),
+				    PHY_LANES_VAL(PHY_CURRENT_STATUS,
+						  INTEL_CX0_BOTH_LANES),
+				    XELPDP_PORT_RESET_END_TIMEOUT))
+		drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dms.\n",
+			 phy_name(phy), XELPDP_PORT_RESET_END_TIMEOUT);
+}
+
+static void intel_c10_program_phy_lane(struct drm_i915_private *i915,
+				       enum port port, int lane_count,
+				       bool lane_reversal)
+{
+	u8 l0t1, l0t2, l1t1, l1t2;
+
+	intel_cx0_rmw(i915, port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
+		      C10_VDR_CTRL_MSGBUS_ACCESS, C10_VDR_CTRL_MSGBUS_ACCESS,
+		      MB_WRITE_COMMITTED);
+
+	l0t1 = intel_cx0_read(i915, port, 0, PHY_CX0_TX_CONTROL(1, 2));
+	l0t2 = intel_cx0_read(i915, port, 0, PHY_CX0_TX_CONTROL(2, 2));
+	l1t1 = intel_cx0_read(i915, port, 1, PHY_CX0_TX_CONTROL(1, 2));
+	l1t2 = intel_cx0_read(i915, port, 1, PHY_CX0_TX_CONTROL(2, 2));
+
+	if (lane_reversal) {
+		switch (lane_count) {
+		case 1:
+			/* Disable MLs 1(lane0), 2(lane0), 3(lane1) */
+			intel_cx0_write(i915, port, 1, PHY_CX0_TX_CONTROL(1, 2),
+					l1t1 | CONTROL2_DISABLE_SINGLE_TX,
+					MB_WRITE_COMMITTED);
+			fallthrough;
+		case 2:
+			/* Disable MLs 1(lane0), 2(lane0) */
+			intel_cx0_write(i915, port, 0, PHY_CX0_TX_CONTROL(2, 2),
+					l0t2 | CONTROL2_DISABLE_SINGLE_TX,
+					MB_WRITE_COMMITTED);
+			fallthrough;
+		case 3:
+			/* Disable MLs 1(lane0) */
+			intel_cx0_write(i915, port, 0, PHY_CX0_TX_CONTROL(1, 2),
+					l0t1 | CONTROL2_DISABLE_SINGLE_TX,
+					MB_WRITE_COMMITTED);
+			break;
+		}
+	} else {
+		switch (lane_count) {
+		case 1:
+			/* Disable MLs 2(lane0), 3(lane1), 4(lane1) */
+			intel_cx0_write(i915, port, 0, PHY_CX0_TX_CONTROL(2, 2),
+					l0t2 | CONTROL2_DISABLE_SINGLE_TX,
+					MB_WRITE_COMMITTED);
+			fallthrough;
+		case 2:
+			/* Disable MLs 3(lane1), 4(lane1) */
+			intel_cx0_write(i915, port, 1, PHY_CX0_TX_CONTROL(1, 2),
+					l1t1 | CONTROL2_DISABLE_SINGLE_TX,
+					MB_WRITE_COMMITTED);
+			fallthrough;
+		case 3:
+			/* Disable MLs 4(lane1) */
+			intel_cx0_write(i915, port, 1, PHY_CX0_TX_CONTROL(2, 2),
+					l1t2 | CONTROL2_DISABLE_SINGLE_TX,
+					MB_WRITE_COMMITTED);
+			break;
+		}
+	}
+
+	intel_cx0_rmw(i915, port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
+		      C10_VDR_CTRL_UPDATE_CFG, C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED);
+}
+
+static void intel_c10pll_enable(struct intel_encoder *encoder,
+				const struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	enum phy phy = intel_port_to_phy(i915, encoder->port);
+	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+	enum intel_cx0_lanes maxpclk_lane = lane_reversal ? INTEL_CX0_LANE1 :
+				    INTEL_CX0_LANE0;
+
+	/*
+	 * 1. Program PORT_CLOCK_CTL REGISTER to configure
+	 * clock muxes, gating and SSC
+	 */
+	intel_program_port_clock_ctl(encoder, crtc_state, lane_reversal);
+
+	/* 2. Bring PHY out of reset. */
+	intel_cx0_phy_lane_reset(i915, encoder->port, lane_reversal);
+
+	/*
+	 * 3. Change Phy power state to Ready.
+	 * TODO: For DP alt mode use only one lane.
+	 */
+	intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES,
+					    CX0_P2_STATE_READY);
+
+	/* 4. Program PHY internal PLL internal registers. */
+	intel_c10_pll_program(i915, crtc_state, encoder);
+
+	/*
+	 * 5. Program the enabled and disabled owned PHY lane
+	 * transmitters over message bus
+	 */
+	intel_c10_program_phy_lane(i915, encoder->port, crtc_state->lane_count, lane_reversal);
+
+	/*
+	 * 6. Follow the Display Voltage Frequency Switching - Sequence
+	 * Before Frequency Change. We handle this step in bxt_set_cdclk().
+	 */
+
+	/*
+	 * 7. Program DDI_CLK_VALFREQ to match intended DDI
+	 * clock frequency.
+	 */
+	intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port),
+		       crtc_state->port_clock);
+	/*
+	 * 8. Set PORT_CLOCK_CTL register PCLK PLL Request
+	 * LN<Lane for maxPCLK> to "1" to enable PLL.
+	 */
+	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), 0,
+		     PHY_LANES_VAL(PCLK_PLL_REQUEST, maxpclk_lane));
+
+	/* 9. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */
+	if (__intel_wait_for_register(&i915->uncore, XELPDP_PORT_CLOCK_CTL(encoder->port),
+				      PHY_LANES_VAL(PCLK_PLL_ACK, maxpclk_lane),
+				      PHY_LANES_VAL(PCLK_PLL_ACK, maxpclk_lane),
+				      XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL))
+		drm_warn(&i915->drm, "Port %c PLL not locked after %dus.\n",
+			 phy_name(phy), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US);
+
+	/*
+	 * 10. Follow the Display Voltage Frequency Switching Sequence After
+	 * Frequency Change. We handle this step in bxt_set_cdclk().
+	 */
+}
+
+void intel_cx0pll_enable(struct intel_encoder *encoder,
+			 const struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+	drm_WARN_ON(&i915->drm, !intel_is_c10phy(i915, phy));
+	intel_c10pll_enable(encoder, crtc_state);
+}
+
+static void intel_c10pll_disable(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	enum phy phy = intel_port_to_phy(i915, encoder->port);
+	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+	enum intel_cx0_lanes lane = lane_reversal ? INTEL_CX0_LANE1 :
+				    INTEL_CX0_LANE0;
+
+	/* 1. Change owned PHY lane power to Disable state. */
+	intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES,
+					    CX0_P2PG_STATE_DISABLE);
+
+	/*
+	 * 2. Follow the Display Voltage Frequency Switching Sequence Before
+	 * Frequency Change. We handle this step in bxt_set_cdclk().
+	 */
+
+	/*
+	 * 3. Set PORT_CLOCK_CTL register PCLK PLL Request LN<Lane for maxPCLK>
+	 * to "0" to disable PLL.
+	 */
+	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+		     PHY_LANES_VAL(PCLK_PLL_REQUEST, INTEL_CX0_BOTH_LANES) |
+		     PHY_LANES_VAL(PCLK_REFCLK_REQUEST, INTEL_CX0_BOTH_LANES), 0);
+
+	/* 4. Program DDI_CLK_VALFREQ to 0. */
+	intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), 0);
+
+	/*
+	 * 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0".
+	 */
+	if (__intel_wait_for_register(&i915->uncore, XELPDP_PORT_CLOCK_CTL(encoder->port),
+				      PHY_LANES_VAL(PCLK_PLL_ACK, lane) |
+				      PHY_LANES_VAL(PCLK_REFCLK_ACK, lane), 0,
+				      XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL))
+		drm_warn(&i915->drm, "Port %c PLL not unlocked after %dus.\n",
+			 phy_name(phy), XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US);
+
+	/*
+	 * 6. Follow the Display Voltage Frequency Switching Sequence After
+	 * Frequency Change. We handle this step in bxt_set_cdclk().
+	 */
+
+	/* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */
+	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+		     XELPDP_DDI_CLOCK_SELECT_MASK |
+		     XELPDP_FORWARD_CLOCK_UNGATE, 0);
+}
+
+void intel_cx0pll_disable(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+	drm_WARN_ON(&i915->drm, !intel_is_c10phy(i915, phy));
+	intel_c10pll_disable(encoder);
+}
+
+#undef PHY_LANES_VAL_ARG
+#undef PHY_LANES_VAL
+
 void intel_c10mpllb_state_verify(struct intel_atomic_state *state,
 				 struct intel_crtc_state *new_crtc_state)
 {
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index cf1f300b6a7b..d12d2e2f02ee 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -106,6 +106,19 @@ enum intel_cx0_lanes {
 #define  C10_VDR_CTRL_UPDATE_CFG	REG_BIT8(0)
 #define PHY_C10_VDR_CUSTOM_WIDTH	0xD02
 
+#define CX0_P0_STATE_ACTIVE		0x0
+#define CX0_P2_STATE_READY		0x2
+#define CX0_P2PG_STATE_DISABLE		0x9
+#define CX0_P4PG_STATE_DISABLE		0xC
+#define CX0_P2_STATE_RESET		0x2
+
+/* PHY_C10_VDR_PLL0 */
+#define PLL_C10_MPLL_SSC_EN		REG_BIT8(0)
+
+/* PIPE SPEC Defined Registers */
+#define PHY_CX0_TX_CONTROL(tx, control)	(0x400 + ((tx) - 1) * 0x200 + (control))
+#define CONTROL2_DISABLE_SINGLE_TX	REG_BIT(6)
+
 static inline bool intel_is_c10phy(struct drm_i915_private *dev_priv, enum phy phy)
 {
 	if (!IS_METEORLAKE(dev_priv))
@@ -114,6 +127,10 @@ static inline bool intel_is_c10phy(struct drm_i915_private *dev_priv, enum phy p
 		return (phy < PHY_C);
 }
 
+void intel_cx0pll_enable(struct intel_encoder *encoder,
+			 const struct intel_crtc_state *crtc_state);
+void intel_cx0pll_disable(struct intel_encoder *encoder);
+
 void intel_c10mpllb_readout_hw_state(struct intel_encoder *encoder,
 				     struct intel_c10mpllb_state *pll_state);
 int intel_cx0mpllb_calc_state(struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index aaa8846c3b18..639ec604babf 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -4384,6 +4384,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 	encoder->pipe_mask = ~0;
 
 	if (DISPLAY_VER(dev_priv) >= 14) {
+		encoder->enable_clock = intel_cx0pll_enable;
+		encoder->disable_clock = intel_cx0pll_disable;
 		encoder->get_config = mtl_ddi_get_config;
 	} else if (IS_DG2(dev_priv)) {
 		encoder->enable_clock = intel_mpllb_enable;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 70b06806ec0d..db32799b5f46 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -420,6 +420,11 @@ static int ehl_max_source_rate(struct intel_dp *intel_dp)
 	return 810000;
 }
 
+static int mtl_max_source_rate(struct intel_dp *intel_dp)
+{
+	return intel_dp_is_edp(intel_dp) ? 675000 : 810000;
+}
+
 static int vbt_max_link_rate(struct intel_dp *intel_dp)
 {
 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
@@ -444,6 +449,10 @@ static void
 intel_dp_set_source_rates(struct intel_dp *intel_dp)
 {
 	/* The values must be in increasing order */
+	static const int mtl_rates[] = {
+		162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000,
+		810000,
+	};
 	static const int icl_rates[] = {
 		162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000,
 		1000000, 1350000,
@@ -469,7 +478,11 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
 	drm_WARN_ON(&dev_priv->drm,
 		    intel_dp->source_rates || intel_dp->num_source_rates);
 
-	if (DISPLAY_VER(dev_priv) >= 11) {
+	if (DISPLAY_VER(dev_priv) >= 14) {
+		source_rates = mtl_rates;
+		size = ARRAY_SIZE(mtl_rates);
+		max_rate = mtl_max_source_rate(intel_dp);
+	} else if (DISPLAY_VER(dev_priv) >= 11) {
 		source_rates = icl_rates;
 		size = ARRAY_SIZE(icl_rates);
 		if (IS_DG2(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 73f541050913..d6fcdf4eba0e 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -1533,6 +1533,8 @@ intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
 {
 	if (DISPLAY_VER(dev_priv) >= 14)
 		dev_priv->display.funcs.dpll = &mtl_dpll_funcs;
+	else if (DISPLAY_VER(dev_priv) >= 14)
+		dev_priv->display.funcs.dpll = &mtl_dpll_funcs;
 	else if (IS_DG2(dev_priv))
 		dev_priv->display.funcs.dpll = &dg2_dpll_funcs;
 	else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5003a5ffbc6a..5e6ff9f2aa10 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2121,6 +2121,11 @@
 #define   TRANS_PUSH_EN			REG_BIT(31)
 #define   TRANS_PUSH_SEND		REG_BIT(30)
 
+/* DDI Buffer Control */
+#define _DDI_CLK_VALFREQ_A		0x64030
+#define _DDI_CLK_VALFREQ_B		0x64130
+#define DDI_CLK_VALFREQ(port)		_MMIO_PORT(port, _DDI_CLK_VALFREQ_A, _DDI_CLK_VALFREQ_B)
+
 /*
  * HSW+ eDP PSR registers
  *
@@ -8375,4 +8380,140 @@ enum skl_power_gate {
 
 #define MTL_MEDIA_GSI_BASE		0x380000
 
+#define PUNIT_MMIO_CR_POC_STRAPS	_MMIO(0x281078)
+#define   NUM_TILES_MASK		REG_GENMASK(1, 0)
+#define   CD_ALIVE			REG_BIT(2)
+#define   SOCKET_ID_MASK		REG_GENMASK(7, 3)
+
+/* Define the BAR and offset for the accelerator fabric CSRs */
+#define CD_BASE_OFFSET 0x291000
+#define CD_BAR_SIZE (256 * 1024)
+
+/*
+ * In general, the i915 should not touch the IAF registers.  The registers
+ * will be passed as an IO resource via the MFD interface.  However, it
+ * is necessary to put the IRQ bits in a known state, before the MFD cell
+ * is registered.
+ *
+ * So define these registers for i915 usage.
+ */
+#define CPORT_MBDB_CSRS (CD_BASE_OFFSET + 0x6000)
+#define CPORT_MBDB_CSRS_END (CPORT_MBDB_CSRS + 0x1000)
+#define CPORT_MBDB_INT_ENABLE_MASK _MMIO(CPORT_MBDB_CSRS + 0x8)
+
+#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A		0x64040
+#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B		0x64140
+#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1		0x16F240
+#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2		0x16F440
+#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC3		0x16F640
+#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC4		0x16F840
+#define _XELPDP_PORT_M2P_MSGBUS_CTL(port, lane)		(_PICK(port, \
+							[PORT_A] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A, \
+							[PORT_B] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B, \
+							[PORT_TC1] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1, \
+							[PORT_TC2] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2, \
+							[PORT_TC3] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC3, \
+							[PORT_TC4] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC4) + ((lane) * 4))
+
+#define XELPDP_PORT_M2P_MSGBUS_CTL(port, lane)		_MMIO(_XELPDP_PORT_M2P_MSGBUS_CTL(port, lane))
+#define  XELPDP_PORT_M2P_TRANSACTION_PENDING		REG_BIT(31)
+#define  XELPDP_PORT_M2P_COMMAND_TYPE_MASK		REG_GENMASK(30, 27)
+#define  XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED	REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x1)
+#define  XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED	REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x2)
+#define  XELPDP_PORT_M2P_COMMAND_READ			REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x3)
+#define  XELPDP_PORT_M2P_DATA_MASK			REG_GENMASK(23, 16)
+#define  XELPDP_PORT_M2P_DATA(val)			REG_FIELD_PREP(XELPDP_PORT_M2P_DATA_MASK, val)
+#define  XELPDP_PORT_M2P_TRANSACTION_RESET		REG_BIT(15)
+#define  XELPDP_PORT_M2P_ADDRESS_MASK			REG_GENMASK(11, 0)
+#define  XELPDP_PORT_M2P_ADDRESS(val)			REG_FIELD_PREP(XELPDP_PORT_M2P_ADDRESS_MASK, val)
+
+#define XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane)	_MMIO(_XELPDP_PORT_M2P_MSGBUS_CTL(port, lane) + 8)
+#define  XELPDP_PORT_P2M_RESPONSE_READY			REG_BIT(31)
+#define  XELPDP_PORT_P2M_COMMAND_TYPE_MASK		REG_GENMASK(30, 27)
+#define  XELPDP_PORT_P2M_COMMAND_READ_ACK		0x4
+#define  XELPDP_PORT_P2M_COMMAND_WRITE_ACK		0x5
+#define  XELPDP_PORT_P2M_DATA_MASK			REG_GENMASK(23, 16)
+#define  XELPDP_PORT_P2M_DATA(val)			REG_FIELD_PREP(XELPDP_PORT_P2M_DATA_MASK, val)
+#define  XELPDP_PORT_P2M_ERROR_SET			REG_BIT(15)
+
+#define  XELPDP_MSGBUS_TIMEOUT_SLOW			1
+#define  XELPDP_MSGBUS_TIMEOUT_FAST_US			2
+#define XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US		3200
+#define XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US		20
+#define XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US		100
+#define XELPDP_PORT_RESET_START_TIMEOUT_US		5
+#define XELPDP_PORT_RESET_END_TIMEOUT			15
+#define XELPDP_REFCLK_ENABLE_TIMEOUT_US			1
+
+#define _XELPDP_PORT_BUF_CTL1_LN0_A			0x64004
+#define _XELPDP_PORT_BUF_CTL1_LN0_B			0x64104
+#define _XELPDP_PORT_BUF_CTL1_LN0_USBC1			0x16F200
+#define _XELPDP_PORT_BUF_CTL1_LN0_USBC2			0x16F400
+#define _XELPDP_PORT_BUF_CTL1_LN0_USBC3			0x16F600
+#define _XELPDP_PORT_BUF_CTL1_LN0_USBC4			0x16F800
+#define _XELPDP_PORT_BUF_CTL1(port)			(_PICK(port, \
+							[PORT_A] = _XELPDP_PORT_BUF_CTL1_LN0_A, \
+							[PORT_B] = _XELPDP_PORT_BUF_CTL1_LN0_B, \
+							[PORT_TC1] = _XELPDP_PORT_BUF_CTL1_LN0_USBC1, \
+							[PORT_TC2] = _XELPDP_PORT_BUF_CTL1_LN0_USBC2, \
+							[PORT_TC3] = _XELPDP_PORT_BUF_CTL1_LN0_USBC3, \
+							[PORT_TC4] = _XELPDP_PORT_BUF_CTL1_LN0_USBC4))
+
+#define XELPDP_PORT_BUF_CTL1(port)			_MMIO(_XELPDP_PORT_BUF_CTL1(port))
+#define  XELPDP_PORT_BUF_SOC_PHY_READY			REG_BIT(24)
+#define  XELPDP_PORT_REVERSAL				REG_BIT(16)
+#define  XELPDP_PORT_WIDTH_MASK				REG_GENMASK(3, 1)
+#define  XELPDP_PORT_WIDTH(val)				REG_FIELD_PREP(XELPDP_PORT_WIDTH_MASK, val)
+
+#define XELPDP_PORT_BUF_CTL2(port)			_MMIO(_XELPDP_PORT_BUF_CTL1(port) + 4)
+#define  XELPDP_LANE0_PIPE_RESET			REG_BIT(31)
+#define  XELPDP_LANE1_PIPE_RESET			REG_BIT(30)
+#define  XELPDP_LANE0_PHY_CURRENT_STATUS		REG_BIT(29)
+#define  XELPDP_LANE1_PHY_CURRENT_STATUS		REG_BIT(28)
+#define  XELPDP_LANE0_POWERDOWN_UPDATE			REG_BIT(25)
+#define  XELPDP_LANE1_POWERDOWN_UPDATE			REG_BIT(24)
+#define  XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK		REG_GENMASK(23, 20)
+#define  XELPDP_LANE0_POWERDOWN_NEW_STATE(val)		REG_FIELD_PREP(XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK, val)
+#define  XELPDP_LANE1_POWERDOWN_NEW_STATE_MASK		REG_GENMASK(19, 16)
+#define  XELPDP_LANE1_POWERDOWN_NEW_STATE(val)		REG_FIELD_PREP(XELPDP_LANE1_POWERDOWN_NEW_STATE_MASK, val)
+#define  XELPDP_POWER_STATE_READY_MASK			REG_GENMASK(7, 4)
+#define  XELPDP_POWER_STATE_READY(val)			REG_FIELD_PREP(XELPDP_POWER_STATE_READY_MASK, val)
+
+#define XELPDP_PORT_BUF_CTL3(port)			_MMIO(_XELPDP_PORT_BUF_CTL1(port) + 8)
+#define  XELPDP_PLL_LANE_STAGGERING_DELAY_MASK		REG_GENMASK(15, 8)
+#define  XELPDP_PLL_LANE_STAGGERING_DELAY(val)		REG_FIELD_PREP(XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, val)
+#define  XELPDP_POWER_STATE_ACTIVE_MASK			REG_GENMASK(3, 0)
+#define  XELPDP_POWER_STATE_ACTIVE(val)			REG_FIELD_PREP(XELPDP_POWER_STATE_ACTIVE_MASK, val)
+
+#define _XELPDP_PORT_CLOCK_CTL_A			0x640E0
+#define _XELPDP_PORT_CLOCK_CTL_B			0x641E0
+#define _XELPDP_PORT_CLOCK_CTL_USBC1			0x16F260
+#define _XELPDP_PORT_CLOCK_CTL_USBC2			0x16F460
+#define _XELPDP_PORT_CLOCK_CTL_USBC3			0x16F660
+#define _XELPDP_PORT_CLOCK_CTL_USBC4			0x16F860
+#define XELPDP_PORT_CLOCK_CTL(port)			_MMIO(_PICK(port, \
+							[PORT_A] = _XELPDP_PORT_CLOCK_CTL_A, \
+							[PORT_B] = _XELPDP_PORT_CLOCK_CTL_B, \
+							[PORT_TC1] = _XELPDP_PORT_CLOCK_CTL_USBC1, \
+							[PORT_TC2] = _XELPDP_PORT_CLOCK_CTL_USBC2, \
+							[PORT_TC3] = _XELPDP_PORT_CLOCK_CTL_USBC3, \
+							[PORT_TC4] = _XELPDP_PORT_CLOCK_CTL_USBC4))
+
+#define XELPDP_LANE0_PCLK_PLL_REQUEST			REG_BIT(31)
+#define XELPDP_LANE0_PCLK_PLL_ACK			REG_BIT(30)
+#define XELPDP_LANE0_PCLK_REFCLK_REQUEST		REG_BIT(29)
+#define XELPDP_LANE0_PCLK_REFCLK_ACK			REG_BIT(28)
+#define XELPDP_LANE1_PCLK_PLL_REQUEST			REG_BIT(27)
+#define XELPDP_LANE1_PCLK_PLL_ACK			REG_BIT(26)
+#define XELPDP_LANE1_PCLK_REFCLK_REQUEST		REG_BIT(25)
+#define XELPDP_LANE1_PCLK_REFCLK_ACK			REG_BIT(24)
+#define XELPDP_DDI_CLOCK_SELECT_MASK			REG_GENMASK(15, 12)
+#define XELPDP_DDI_CLOCK_SELECT(val)			REG_FIELD_PREP(XELPDP_DDI_CLOCK_SELECT_MASK, val)
+#define XELPDP_DDI_CLOCK_SELECT_NONE			0x0
+#define XELPDP_DDI_CLOCK_SELECT_MAXPCLK			0x8
+#define XELPDP_FORWARD_CLOCK_UNGATE			REG_BIT(10)
+#define XELPDP_LANE1_PHY_CLOCK_SELECT			REG_BIT(8)
+#define XELPDP_SSC_ENABLE_PLLA				REG_BIT(1)
+#define XELPDP_SSC_ENABLE_PLLB				REG_BIT(0)
+
 #endif /* _I915_REG_H_ */
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [Intel-gfx] [PATCH 4/5] drm/i915/mtl: Add C10 phy programming for HDMI
  2022-09-29 13:17 [Intel-gfx] [PATCH 0/5] drm/i915/mtl: Add C10 phy support Mika Kahola
                   ` (2 preceding siblings ...)
  2022-09-29 13:17 ` [Intel-gfx] [PATCH 3/5] drm/i915/mtl: Add support for C10 phy programming Mika Kahola
@ 2022-09-29 13:17 ` Mika Kahola
  2022-09-29 13:17 ` [Intel-gfx] [PATCH 5/5] drm/i915/mtl: Add vswing programming for C10 phys Mika Kahola
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 17+ messages in thread
From: Mika Kahola @ 2022-09-29 13:17 UTC (permalink / raw)
  To: intel-gfx

From: Radhakrishna Sripada <radhakrishna.sripada@intel.com>

Like DG2, we still don't have a proper algorithm that can be used
for calculating PHY settings, but we do have tables of register
values for a handful of the more common link rates. Some support is
better than none, so let's go ahead and add/use these tables when we
can, and also add some logic to hdmi_port_clock_valid() to filter the
modelist to just the modes we can actually support with these link
rates.

Hopefully we'll have a proper / non-encumbered algorithm to calculate
these registers by the time we upstream and we'll be able to replace
this patch with something more general purpose.

Bspec: 64568

Cc: Imre Deak <imre.deak@intel.com>
Cc: Mika Kahola <mika.kahola@intel.com>
Cc: Uma Shankar <uma.shankar@intel.com>
Signed-off-by: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
---
 drivers/gpu/drm/i915/display/intel_cx0_phy.c | 168 ++++++++++++++++++-
 drivers/gpu/drm/i915/display/intel_cx0_phy.h |   2 +
 drivers/gpu/drm/i915/display/intel_hdmi.c    |   5 +-
 3 files changed, 170 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 6ba11cd7cd75..a08788d2a3bc 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -448,6 +448,152 @@ static const struct intel_c10mpllb_state * const mtl_c10_edp_tables[] = {
 	NULL,
 };
 
+/*
+ * HDMI link rates with 38.4 MHz reference clock.
+ */
+
+static const struct intel_c10mpllb_state mtl_c10_hdmi_25_175 = {
+	.clock = 25175,
+	.pll[0] = 0x4,
+	.pll[1] = 0,
+	.pll[2] = 0xB2,
+	.pll[3] = 0,
+	.pll[4] = 0,
+	.pll[5] = 0,
+	.pll[6] = 0,
+	.pll[7] = 0,
+	.pll[8] = 0x20,
+	.pll[9] = 0x1,
+	.pll[10] = 0,
+	.pll[11] = 0,
+	.pll[12] = 0,
+	.pll[13] = 0,
+	.pll[14] = 0,
+	.pll[15] = 0xD,
+	.pll[16] = 0x6,
+	.pll[17] = 0x8F,
+	.pll[18] = 0x84,
+	.pll[19] = 0x23,
+};
+
+static const struct intel_c10mpllb_state mtl_c10_hdmi_27_0 = {
+	.clock = 27000,
+	.pll[0] = 0x34,
+	.pll[1] = 0,
+	.pll[2] = 0xC0,
+	.pll[3] = 0,
+	.pll[4] = 0,
+	.pll[5] = 0,
+	.pll[6] = 0,
+	.pll[7] = 0,
+	.pll[8] = 0x20,
+	.pll[9] = 0x1,
+	.pll[10] = 0,
+	.pll[11] = 0,
+	.pll[12] = 0x80,
+	.pll[13] = 0,
+	.pll[14] = 0,
+	.pll[15] = 0xD,
+	.pll[16] = 0x6,
+	.pll[17] = 0xCF,
+	.pll[18] = 0x84,
+	.pll[19] = 0x23,
+};
+
+static const struct intel_c10mpllb_state mtl_c10_hdmi_74_25 = {
+	.clock = 74250,
+	.pll[0] = 0xF4,
+	.pll[1] = 0,
+	.pll[2] = 0x7A,
+	.pll[3] = 0,
+	.pll[4] = 0,
+	.pll[5] = 0,
+	.pll[6] = 0,
+	.pll[7] = 0,
+	.pll[8] = 0x20,
+	.pll[9] = 0x1,
+	.pll[10] = 0,
+	.pll[11] = 0,
+	.pll[12] = 0x58,
+	.pll[13] = 0,
+	.pll[14] = 0,
+	.pll[15] = 0xB,
+	.pll[16] = 0x6,
+	.pll[17] = 0xF,
+	.pll[18] = 0x85,
+	.pll[19] = 0x23,
+};
+
+static const struct intel_c10mpllb_state mtl_c10_hdmi_148_5 = {
+	.clock = 148500,
+	.pll[0] = 0xF4,
+	.pll[1] = 0,
+	.pll[2] = 0x7A,
+	.pll[3] = 0,
+	.pll[4] = 0,
+	.pll[5] = 0,
+	.pll[6] = 0,
+	.pll[7] = 0,
+	.pll[8] = 0x20,
+	.pll[9] = 0x1,
+	.pll[10] = 0,
+	.pll[11] = 0,
+	.pll[12] = 0x58,
+	.pll[13] = 0,
+	.pll[14] = 0,
+	.pll[15] = 0xA,
+	.pll[16] = 0x6,
+	.pll[17] = 0xF,
+	.pll[18] = 0x85,
+	.pll[19] = 0x23,
+};
+
+static const struct intel_c10mpllb_state mtl_c10_hdmi_594 = {
+	.clock = 594000,
+	.pll[0] = 0xF4,
+	.pll[1] = 0,
+	.pll[2] = 0x7A,
+	.pll[3] = 0,
+	.pll[4] = 0,
+	.pll[5] = 0,
+	.pll[6] = 0,
+	.pll[7] = 0,
+	.pll[8] = 0x20,
+	.pll[9] = 0x1,
+	.pll[10] = 0,
+	.pll[11] = 0,
+	.pll[12] = 0x58,
+	.pll[13] = 0,
+	.pll[14] = 0,
+	.pll[15] = 0x8,
+	.pll[16] = 0x6,
+	.pll[17] = 0xF,
+	.pll[18] = 0x85,
+	.pll[19] = 0x23,
+};
+
+static const struct intel_c10mpllb_state * const mtl_c10_hdmi_tables[] = {
+	&mtl_c10_hdmi_25_175,
+	&mtl_c10_hdmi_27_0,
+	&mtl_c10_hdmi_74_25,
+	&mtl_c10_hdmi_148_5,
+	&mtl_c10_hdmi_594,
+	NULL,
+};
+
+int intel_c10_phy_check_hdmi_link_rate(int clock)
+{
+	const struct intel_c10mpllb_state * const *tables = mtl_c10_hdmi_tables;
+	int i;
+
+	for (i = 0; tables[i]; i++) {
+		if (clock == tables[i]->clock)
+			return MODE_OK;
+	}
+
+	return MODE_CLOCK_RANGE;
+}
+
 static const struct intel_c10mpllb_state * const *
 intel_c10_mpllb_tables_get(struct intel_crtc_state *crtc_state,
 			   struct intel_encoder *encoder)
@@ -457,9 +603,10 @@ intel_c10_mpllb_tables_get(struct intel_crtc_state *crtc_state,
 			return mtl_c10_edp_tables;
 		else
 			return mtl_c10_dp_tables;
+	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+		return mtl_c10_hdmi_tables;
 	}
 
-	/* TODO: Add HDMI Support */
 	MISSING_CASE(encoder->type);
 	return NULL;
 }
@@ -467,9 +614,20 @@ intel_c10_mpllb_tables_get(struct intel_crtc_state *crtc_state,
 static int intel_c10mpllb_calc_state(struct intel_crtc_state *crtc_state,
 				     struct intel_encoder *encoder)
 {
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
 	const struct intel_c10mpllb_state * const *tables;
+	enum phy phy = intel_port_to_phy(i915, encoder->port);
 	int i;
 
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+		if (intel_c10_phy_check_hdmi_link_rate(crtc_state->port_clock)
+		    != MODE_OK) {
+			drm_dbg_kms(&i915->drm, "Can't support HDMI link rate %d on phy %c.\n",
+				      crtc_state->port_clock, phy_name(phy));
+			return -EINVAL;
+		}
+	}
+
 	tables = intel_c10_mpllb_tables_get(crtc_state, encoder);
 	if (!tables)
 		return -EINVAL;
@@ -521,7 +679,8 @@ void intel_c10mpllb_readout_hw_state(struct intel_encoder *encoder,
 	cmn = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_CMN(0));
 	tx0 = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_TX(0));
 
-	if (tx0 != C10_TX0_VAL || cmn != C10_CMN0_DP_VAL)
+	if (tx0 != C10_TX0_VAL || cmn != (intel_encoder_is_dp(encoder) ?
+					  C10_CMN0_DP_VAL : C10_CMN0_HDMI_VAL))
 		drm_warn(&i915->drm, "Unexpected tx: %x or cmn: %x for phy: %c.\n",
 			 tx0, cmn, phy_name(phy));
 }
@@ -537,11 +696,10 @@ static void intel_c10_pll_program(struct drm_i915_private *i915,
 				 INTEL_CX0_LANE0;
 	enum intel_cx0_lanes follower_lane = lane_reversal ? INTEL_CX0_LANE0 :
 				 INTEL_CX0_LANE1;
-
 	int i;
 	struct intel_dp *intel_dp;
 	bool use_ssc = false;
-	u8 cmn0 = 0;
+	u8 cmn0;
 
 	if (intel_crtc_has_dp_encoder(crtc_state)) {
 		intel_dp = enc_to_intel_dp(encoder);
@@ -552,6 +710,8 @@ static void intel_c10_pll_program(struct drm_i915_private *i915,
 			use_ssc = false;
 
 		cmn0 = C10_CMN0_DP_VAL;
+	} else {
+		cmn0 = C10_CMN0_HDMI_VAL;
 	}
 
 	intel_cx0_write(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index d12d2e2f02ee..fc8e4041f26f 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -96,6 +96,7 @@ enum intel_cx0_lanes {
 #define  C10_PLL15_TXCLKDIV_MASK	REG_GENMASK8(2, 0)
 #define PHY_C10_VDR_CMN(idx)		(0xC20 + (idx))
 #define  C10_CMN0_DP_VAL		0x21
+#define  C10_CMN0_HDMI_VAL		0x1
 #define  C10_CMN3_TXVBOOST_MASK		REG_GENMASK8(7, 5)
 #define  C10_CMN3_TXVBOOST(val)		REG_FIELD_PREP8(C10_CMN3_TXVBOOST_MASK, val)
 #define PHY_C10_VDR_TX(idx)		(0xC30 + (idx))
@@ -141,5 +142,6 @@ int intel_c10mpllb_calc_port_clock(struct intel_encoder *encoder,
 				   const struct intel_c10mpllb_state *pll_state);
 void intel_c10mpllb_state_verify(struct intel_atomic_state *state,
 				 struct intel_crtc_state *new_crtc_state);
+int intel_c10_phy_check_hdmi_link_rate(int clock);
 
 #endif /* __INTEL_CX0_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 93519fb23d9d..c274098f2196 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -44,6 +44,7 @@
 #include "i915_drv.h"
 #include "intel_atomic.h"
 #include "intel_connector.h"
+#include "intel_cx0_phy.h"
 #include "intel_ddi.h"
 #include "intel_de.h"
 #include "intel_display_types.h"
@@ -1875,7 +1876,9 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
 	 * FIXME: We will hopefully get an algorithmic way of programming
 	 * the MPLLB for HDMI in the future.
 	 */
-	if (IS_DG2(dev_priv))
+	if (IS_METEORLAKE(dev_priv))
+		return intel_c10_phy_check_hdmi_link_rate(clock);
+	else if (IS_DG2(dev_priv))
 		return intel_snps_phy_check_hdmi_link_rate(clock);
 
 	return MODE_OK;
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [Intel-gfx] [PATCH 5/5] drm/i915/mtl: Add vswing programming for C10 phys
  2022-09-29 13:17 [Intel-gfx] [PATCH 0/5] drm/i915/mtl: Add C10 phy support Mika Kahola
                   ` (3 preceding siblings ...)
  2022-09-29 13:17 ` [Intel-gfx] [PATCH 4/5] drm/i915/mtl: Add C10 phy programming for HDMI Mika Kahola
@ 2022-09-29 13:17 ` Mika Kahola
  2022-09-29 19:46 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915/mtl: Add C10 phy support Patchwork
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 17+ messages in thread
From: Mika Kahola @ 2022-09-29 13:17 UTC (permalink / raw)
  To: intel-gfx

From: Radhakrishna Sripada <radhakrishna.sripada@intel.com>

C10 phys uses direct mapping internally for voltage and pre-emphasis levels.
Program the levels directly to the fields in the VDR Registers.

Bspec: 65449

Cc: Imre Deak <imre.deak@intel.com>
Cc: Mika Kahola <mika.kahola@intel.com>
Cc: Uma Shankar <uma.shankar@intel.com>
Signed-off-by: Clint Taylor <Clinton.A.Taylor@intel.com>
Signed-off-by: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
---
 drivers/gpu/drm/i915/display/intel_cx0_phy.c  | 157 +++++++++++++++---
 drivers/gpu/drm/i915/display/intel_cx0_phy.h  |   8 +
 drivers/gpu/drm/i915/display/intel_ddi.c      |   4 +-
 .../drm/i915/display/intel_ddi_buf_trans.c    |  36 +++-
 .../drm/i915/display/intel_ddi_buf_trans.h    |   6 +
 .../i915/display/intel_display_power_map.c    |   1 +
 drivers/gpu/drm/i915/i915_reg.h               |   1 +
 7 files changed, 187 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index a08788d2a3bc..bba3c4579cfa 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -4,12 +4,24 @@
  */
 
 #include "intel_cx0_phy.h"
+#include "intel_ddi.h"
+#include "intel_ddi_buf_trans.h"
 #include "intel_de.h"
 #include "intel_display_types.h"
 #include "intel_dp.h"
 #include "intel_panel.h"
+#include "intel_psr.h"
 #include "intel_uncore.h"
 
+static void
+assert_dc_off(struct drm_i915_private *i915)
+{
+	bool enabled;
+
+	enabled = intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF);
+	drm_WARN_ON(&i915->drm, !enabled);
+}
+
 static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, int lane)
 {
 	enum phy phy = intel_port_to_phy(i915, port);
@@ -37,6 +49,8 @@ static u8 intel_cx0_read(struct drm_i915_private *i915, enum port port,
 	u32 val = 0;
 	int attempts = 0;
 
+	assert_dc_off(i915);
+
 retry:
 	if (attempts == 3) {
 		drm_err_once(&i915->drm, "PHY %c Read %04x failed after %d retries. Status: 0x%x\n", phy_name(phy), addr, attempts, val ?: 0);
@@ -126,6 +140,8 @@ static void __intel_cx0_write(struct drm_i915_private *i915, enum port port,
 	enum phy phy = intel_port_to_phy(i915, port);
 	int attempts = 0;
 
+	assert_dc_off(i915);
+
 retry:
 	if (attempts == 3) {
 		drm_err_once(&i915->drm, "PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, attempts);
@@ -204,6 +220,76 @@ static void intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
 	}
 }
 
+/*
+ * Prepare HW for CX0 phy transactions.
+ *
+ * It is required that PSR and DC5/6 are disabled before any CX0 message
+ * bus transaction is executed.
+ */
+static intel_wakeref_t intel_cx0_phy_transaction_begin(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+	intel_psr_pause(intel_dp);
+	return intel_display_power_get(i915, POWER_DOMAIN_DC_OFF);
+}
+
+static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_wakeref_t wakeref)
+{
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+	intel_psr_resume(intel_dp);
+	intel_display_power_put(i915, POWER_DOMAIN_DC_OFF, wakeref);
+}
+
+void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
+				     const struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+	enum intel_cx0_lanes master_lane = lane_reversal ? INTEL_CX0_LANE1 :
+				 INTEL_CX0_LANE0;
+	const struct intel_ddi_buf_trans *trans;
+	intel_wakeref_t wakeref;
+	int n_entries, ln;
+
+	wakeref = intel_cx0_phy_transaction_begin(encoder);
+
+	trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+	if (drm_WARN_ON_ONCE(&i915->drm, !trans))
+		return;
+
+	intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
+		      0, C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);
+
+	for (ln = 0; ln < 4; ln++) {
+		int level = intel_ddi_level(encoder, crtc_state, ln);
+		int lane, tx;
+
+		lane = ln / 2;
+		tx = ln % 2 + 1;
+
+		intel_cx0_rmw(i915, encoder->port, lane, PHY_CX0_TX_CONTROL(tx, 2),
+			      C10_PHY_VSWING_PREEMPH_MASK,
+			      C10_PHY_VSWING_PREEMPH(trans->entries[level].direct.preemph),
+			      MB_WRITE_COMMITTED);
+		intel_cx0_rmw(i915, encoder->port, lane, PHY_CX0_TX_CONTROL(tx, 8),
+			      C10_PHY_VSWING_LEVEL_MASK,
+			      C10_PHY_VSWING_LEVEL(trans->entries[level].direct.level),
+			      MB_WRITE_COMMITTED);
+	}
+
+	intel_cx0_write(i915, encoder->port, !master_lane, PHY_C10_VDR_CONTROL(1),
+		 C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED);
+	intel_cx0_write(i915, encoder->port, master_lane, PHY_C10_VDR_CONTROL(1),
+			C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG,
+			MB_WRITE_COMMITTED);
+	intel_cx0_phy_transaction_end(encoder, wakeref);
+}
+
 /*
  * Basic DP link rates with 38.4 MHz reference clock.
  * Note: The tables below are with SSC. In non-ssc
@@ -662,9 +748,12 @@ void intel_c10mpllb_readout_hw_state(struct intel_encoder *encoder,
 	enum intel_cx0_lanes lane = lane_reversal ? INTEL_CX0_LANE1 :
 				    INTEL_CX0_LANE0;
 	enum phy phy = intel_port_to_phy(i915, encoder->port);
+	intel_wakeref_t wakeref;
 	int i;
 	u8 cmn, tx0;
 
+	wakeref = intel_cx0_phy_transaction_begin(encoder);
+
 	/*
 	 * According to C10 VDR Register programming Sequence we need
 	 * to do this to read PHY internal registers from MsgBus.
@@ -683,6 +772,8 @@ void intel_c10mpllb_readout_hw_state(struct intel_encoder *encoder,
 					  C10_CMN0_DP_VAL : C10_CMN0_HDMI_VAL))
 		drm_warn(&i915->drm, "Unexpected tx: %x or cmn: %x for phy: %c.\n",
 			 tx0, cmn, phy_name(phy));
+
+	intel_cx0_phy_transaction_end(encoder, wakeref);
 }
 
 static void intel_c10_pll_program(struct drm_i915_private *i915,
@@ -839,17 +930,20 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
 
 	if (intel_crtc_has_dp_encoder(crtc_state)) {
 		intel_dp = enc_to_intel_dp(encoder);
-		ssc_enabled = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
-			      DP_MAX_DOWNSPREAD_0_5;
+		ssc_enabled = (intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
+			      DP_MAX_DOWNSPREAD_0_5);
+
+		if (intel_dp_is_edp(intel_dp) && !intel_panel_use_ssc(i915))
+			ssc_enabled = false;
 
 		/* TODO: DP2.0 10G and 20G rates enable MPLLA*/
 		val |= ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
 	}
+
 	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
-		     XELPDP_LANE1_PHY_CLOCK_SELECT |
-		     XELPDP_FORWARD_CLOCK_UNGATE |
+		     XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
 		     XELPDP_DDI_CLOCK_SELECT_MASK |
-		     XELPDP_SSC_ENABLE_PLLB, val);
+		     XELPDP_SSC_ENABLE_PLLA | XELPDP_SSC_ENABLE_PLLB, val);
 }
 
 static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
@@ -859,16 +953,16 @@ static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
 	enum phy phy = intel_port_to_phy(i915, port);
 
 	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
-		     PHY_LANES_VAL(POWERDOWN_NEW_STATE_MASK, lane),
+		     PHY_LANES_VAL(POWERDOWN_NEW_STATE_MASK, INTEL_CX0_BOTH_LANES),
 		     PHY_LANES_VAL_ARG(POWERDOWN_NEW_STATE, lane, state));
 	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
-		     PHY_LANES_VAL(POWERDOWN_UPDATE, lane),
+		     PHY_LANES_VAL(POWERDOWN_UPDATE, INTEL_CX0_BOTH_LANES),
 		     PHY_LANES_VAL(POWERDOWN_UPDATE, lane));
 
 	/* Update Timeout Value */
 	if (__intel_wait_for_register(&i915->uncore, XELPDP_PORT_BUF_CTL2(port),
 				      PHY_LANES_VAL(POWERDOWN_UPDATE, lane), 0,
-				      XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
+				      XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL))
 		drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
 			 phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
 }
@@ -912,11 +1006,11 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915, enum port po
 			 phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
 
 	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(port),
-		     PHY_LANES_VAL(PCLK_REFCLK_REQUEST, lane),
+		     PHY_LANES_VAL(PCLK_REFCLK_REQUEST, INTEL_CX0_BOTH_LANES),
 		     PHY_LANES_VAL(PCLK_REFCLK_REQUEST, lane));
 
 	if (__intel_wait_for_register(&i915->uncore, XELPDP_PORT_CLOCK_CTL(port),
-				      PHY_LANES_VAL(PCLK_REFCLK_ACK, lane),
+				      PHY_LANES_VAL(PCLK_REFCLK_ACK, INTEL_CX0_BOTH_LANES),
 				      PHY_LANES_VAL(PCLK_REFCLK_ACK, lane),
 				      XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
 		drm_warn(&i915->drm, "PHY %c failed to request refclk after %dus.\n",
@@ -943,9 +1037,12 @@ static void intel_c10_program_phy_lane(struct drm_i915_private *i915,
 {
 	u8 l0t1, l0t2, l1t1, l1t2;
 
-	intel_cx0_rmw(i915, port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
-		      C10_VDR_CTRL_MSGBUS_ACCESS, C10_VDR_CTRL_MSGBUS_ACCESS,
-		      MB_WRITE_COMMITTED);
+	intel_cx0_rmw(i915, port, 1, PHY_C10_VDR_CONTROL(1),
+		      C10_VDR_CTRL_MSGBUS_ACCESS | C10_VDR_CTRL_UPDATE_CFG,
+		      C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);
+	intel_cx0_rmw(i915, port, 0, PHY_C10_VDR_CONTROL(1),
+		      C10_VDR_CTRL_MSGBUS_ACCESS | C10_VDR_CTRL_UPDATE_CFG,
+		      C10_VDR_CTRL_MASTER_LANE  | C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);
 
 	l0t1 = intel_cx0_read(i915, port, 0, PHY_CX0_TX_CONTROL(1, 2));
 	l0t2 = intel_cx0_read(i915, port, 0, PHY_CX0_TX_CONTROL(2, 2));
@@ -996,8 +1093,12 @@ static void intel_c10_program_phy_lane(struct drm_i915_private *i915,
 		}
 	}
 
-	intel_cx0_rmw(i915, port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
-		      C10_VDR_CTRL_UPDATE_CFG, C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED);
+	intel_cx0_rmw(i915, port, 1, PHY_C10_VDR_CONTROL(1),
+		      C10_VDR_CTRL_UPDATE_CFG | C10_VDR_CTRL_MSGBUS_ACCESS,
+		      C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED);
+	intel_cx0_rmw(i915, port, 0, PHY_C10_VDR_CONTROL(1),
+		      C10_VDR_CTRL_UPDATE_CFG | C10_VDR_CTRL_MSGBUS_ACCESS,
+		      C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED);
 }
 
 static void intel_c10pll_enable(struct intel_encoder *encoder,
@@ -1050,12 +1151,13 @@ static void intel_c10pll_enable(struct intel_encoder *encoder,
 	 * 8. Set PORT_CLOCK_CTL register PCLK PLL Request
 	 * LN<Lane for maxPCLK> to "1" to enable PLL.
 	 */
-	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), 0,
+	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+		     PHY_LANES_VAL(PCLK_PLL_REQUEST, INTEL_CX0_BOTH_LANES),
 		     PHY_LANES_VAL(PCLK_PLL_REQUEST, maxpclk_lane));
 
 	/* 9. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */
 	if (__intel_wait_for_register(&i915->uncore, XELPDP_PORT_CLOCK_CTL(encoder->port),
-				      PHY_LANES_VAL(PCLK_PLL_ACK, maxpclk_lane),
+				      PHY_LANES_VAL(PCLK_PLL_ACK, INTEL_CX0_BOTH_LANES),
 				      PHY_LANES_VAL(PCLK_PLL_ACK, maxpclk_lane),
 				      XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL))
 		drm_warn(&i915->drm, "Port %c PLL not locked after %dus.\n",
@@ -1072,19 +1174,20 @@ void intel_cx0pll_enable(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
 	enum phy phy = intel_port_to_phy(i915, encoder->port);
+	intel_wakeref_t wakeref;
+
+	wakeref = intel_cx0_phy_transaction_begin(encoder);
 
 	drm_WARN_ON(&i915->drm, !intel_is_c10phy(i915, phy));
 	intel_c10pll_enable(encoder, crtc_state);
+
+	intel_cx0_phy_transaction_end(encoder, wakeref);
 }
 
 static void intel_c10pll_disable(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
 	enum phy phy = intel_port_to_phy(i915, encoder->port);
-	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
-	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
-	enum intel_cx0_lanes lane = lane_reversal ? INTEL_CX0_LANE1 :
-				    INTEL_CX0_LANE0;
 
 	/* 1. Change owned PHY lane power to Disable state. */
 	intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES,
@@ -1110,8 +1213,8 @@ static void intel_c10pll_disable(struct intel_encoder *encoder)
 	 * 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0".
 	 */
 	if (__intel_wait_for_register(&i915->uncore, XELPDP_PORT_CLOCK_CTL(encoder->port),
-				      PHY_LANES_VAL(PCLK_PLL_ACK, lane) |
-				      PHY_LANES_VAL(PCLK_REFCLK_ACK, lane), 0,
+				      PHY_LANES_VAL(PCLK_PLL_ACK, INTEL_CX0_BOTH_LANES) |
+				      PHY_LANES_VAL(PCLK_REFCLK_ACK, INTEL_CX0_BOTH_LANES), 0,
 				      XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL))
 		drm_warn(&i915->drm, "Port %c PLL not unlocked after %dus.\n",
 			 phy_name(phy), XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US);
@@ -1123,7 +1226,8 @@ static void intel_c10pll_disable(struct intel_encoder *encoder)
 
 	/* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */
 	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
-		     XELPDP_DDI_CLOCK_SELECT_MASK |
+		     XELPDP_DDI_CLOCK_SELECT_MASK, 0);
+	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
 		     XELPDP_FORWARD_CLOCK_UNGATE, 0);
 }
 
@@ -1131,9 +1235,14 @@ void intel_cx0pll_disable(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
 	enum phy phy = intel_port_to_phy(i915, encoder->port);
+	intel_wakeref_t wakeref;
+
+	wakeref = intel_cx0_phy_transaction_begin(encoder);
 
 	drm_WARN_ON(&i915->drm, !intel_is_c10phy(i915, phy));
 	intel_c10pll_disable(encoder);
+
+	intel_cx0_phy_transaction_end(encoder, wakeref);
 }
 
 #undef PHY_LANES_VAL_ARG
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index fc8e4041f26f..9816449e3931 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -120,6 +120,12 @@ enum intel_cx0_lanes {
 #define PHY_CX0_TX_CONTROL(tx, control)	(0x400 + ((tx) - 1) * 0x200 + (control))
 #define CONTROL2_DISABLE_SINGLE_TX	REG_BIT(6)
 
+/* C10 Phy VSWING Masks */
+#define C10_PHY_VSWING_LEVEL_MASK		REG_GENMASK8(2, 0)
+#define C10_PHY_VSWING_LEVEL(val)		REG_FIELD_PREP8(C10_PHY_VSWING_LEVEL_MASK, val)
+#define C10_PHY_VSWING_PREEMPH_MASK		REG_GENMASK8(1, 0)
+#define C10_PHY_VSWING_PREEMPH(val)		REG_FIELD_PREP8(C10_PHY_VSWING_PREEMPH_MASK, val)
+
 static inline bool intel_is_c10phy(struct drm_i915_private *dev_priv, enum phy phy)
 {
 	if (!IS_METEORLAKE(dev_priv))
@@ -143,5 +149,7 @@ int intel_c10mpllb_calc_port_clock(struct intel_encoder *encoder,
 void intel_c10mpllb_state_verify(struct intel_atomic_state *state,
 				 struct intel_crtc_state *new_crtc_state);
 int intel_c10_phy_check_hdmi_link_rate(int clock);
+void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
+				     const struct intel_crtc_state *crtc_state);
 
 #endif /* __INTEL_CX0_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 639ec604babf..1380ed2221ad 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -4445,7 +4445,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 		encoder->get_config = hsw_ddi_get_config;
 	}
 
-	if (IS_DG2(dev_priv)) {
+	if (DISPLAY_VER(dev_priv) >= 14) {
+		encoder->set_signal_levels = intel_cx0_phy_set_signal_levels;
+	} else if (IS_DG2(dev_priv)) {
 		encoder->set_signal_levels = intel_snps_phy_set_signal_levels;
 	} else if (DISPLAY_VER(dev_priv) >= 12) {
 		if (intel_phy_is_combo(dev_priv, phy))
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index 006a2e979000..49f8a0a6593b 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -1035,6 +1035,30 @@ static const struct intel_ddi_buf_trans dg2_snps_trans_uhbr = {
 	.num_entries = ARRAY_SIZE(_dg2_snps_trans_uhbr),
 };
 
+/*
+ * Some platforms don't need a mapping table and only expect us to
+ * to program the vswing + preemphasis levels directly since the
+ * hardware will do its own mapping to tuning values internally.
+ */
+static const union intel_ddi_buf_trans_entry direct_map_trans[] = {
+    { .direct = { .level = 0, .preemph = 0 } },
+    { .direct = { .level = 0, .preemph = 1 } },
+    { .direct = { .level = 0, .preemph = 2 } },
+    { .direct = { .level = 0, .preemph = 3 } },
+    { .direct = { .level = 1, .preemph = 0 } },
+    { .direct = { .level = 1, .preemph = 0 } },
+    { .direct = { .level = 1, .preemph = 2 } },
+    { .direct = { .level = 2, .preemph = 0 } },
+    { .direct = { .level = 2, .preemph = 1 } },
+    { .direct = { .level = 3, .preemph = 0 } },
+};
+
+static const struct intel_ddi_buf_trans mtl_cx0c10_trans = {
+	.entries = direct_map_trans,
+	.num_entries = ARRAY_SIZE(direct_map_trans),
+	.hdmi_default_entry = ARRAY_SIZE(direct_map_trans) - 1,
+};
+
 bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table)
 {
 	return table == &tgl_combo_phy_trans_edp_hbr2_hobl;
@@ -1606,12 +1630,22 @@ dg2_get_snps_buf_trans(struct intel_encoder *encoder,
 		return intel_get_buf_trans(&dg2_snps_trans, n_entries);
 }
 
+static const struct intel_ddi_buf_trans *
+mtl_get_cx0_buf_trans(struct intel_encoder *encoder,
+		      const struct intel_crtc_state *crtc_state,
+		      int *n_entries)
+{
+	return intel_get_buf_trans(&mtl_cx0c10_trans, n_entries);
+}
+
 void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
 	enum phy phy = intel_port_to_phy(i915, encoder->port);
 
-	if (IS_DG2(i915)) {
+	if (DISPLAY_VER(i915) >= 14) {
+		encoder->get_buf_trans = mtl_get_cx0_buf_trans;
+	} else if (IS_DG2(i915)) {
 		encoder->get_buf_trans = dg2_get_snps_buf_trans;
 	} else if (IS_ALDERLAKE_P(i915)) {
 		if (intel_phy_is_combo(i915, phy))
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
index 2133984a572b..e4a857b9829d 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
@@ -51,6 +51,11 @@ struct dg2_snps_phy_buf_trans {
 	u8 post_cursor;
 };
 
+struct direct_phy_buf_trans {
+	u8 level;
+	u8 preemph;
+};
+
 union intel_ddi_buf_trans_entry {
 	struct hsw_ddi_buf_trans hsw;
 	struct bxt_ddi_buf_trans bxt;
@@ -58,6 +63,7 @@ union intel_ddi_buf_trans_entry {
 	struct icl_mg_phy_ddi_buf_trans mg;
 	struct tgl_dkl_phy_ddi_buf_trans dkl;
 	struct dg2_snps_phy_buf_trans snps;
+	struct direct_phy_buf_trans direct;
 };
 
 struct intel_ddi_buf_trans {
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index dc04afc6cc8f..45c3ab4e2f28 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -1374,6 +1374,7 @@ I915_DECL_PW_DOMAINS(xelpdp_pwdoms_dc_off,
 	XELPDP_PW_2_POWER_DOMAINS,
 	POWER_DOMAIN_AUDIO_MMIO,
 	POWER_DOMAIN_MODESET,
+	POWER_DOMAIN_DC_OFF,
 	POWER_DOMAIN_AUX_A,
 	POWER_DOMAIN_AUX_B,
 	POWER_DOMAIN_INIT);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5e6ff9f2aa10..bafd70fb96bd 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -8442,6 +8442,7 @@ enum skl_power_gate {
 #define XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US		20
 #define XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US		100
 #define XELPDP_PORT_RESET_START_TIMEOUT_US		5
+#define XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US		100
 #define XELPDP_PORT_RESET_END_TIMEOUT			15
 #define XELPDP_REFCLK_ENABLE_TIMEOUT_US			1
 
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915/mtl: Add C10 phy support
  2022-09-29 13:17 [Intel-gfx] [PATCH 0/5] drm/i915/mtl: Add C10 phy support Mika Kahola
                   ` (4 preceding siblings ...)
  2022-09-29 13:17 ` [Intel-gfx] [PATCH 5/5] drm/i915/mtl: Add vswing programming for C10 phys Mika Kahola
@ 2022-09-29 19:46 ` Patchwork
  2022-09-29 20:08 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
  2022-09-30 21:08 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
  7 siblings, 0 replies; 17+ messages in thread
From: Patchwork @ 2022-09-29 19:46 UTC (permalink / raw)
  To: Mika Kahola; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/mtl: Add C10 phy support
URL   : https://patchwork.freedesktop.org/series/109248/
State : warning

== Summary ==

Error: dim checkpatch failed
7a245d0165a3 drm/i915/mtl: Add Support for C10, C20 PHY Message Bus
Traceback (most recent call last):
  File "scripts/spdxcheck.py", line 6, in <module>
    from ply import lex, yacc
ModuleNotFoundError: No module named 'ply'
-:19: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#19: 
new file mode 100644

-:44: WARNING:QUOTED_WHITESPACE_BEFORE_NEWLINE: unnecessary whitespace before a quoted newline
#44: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:21:
+		drm_err_once(&i915->drm, "Failed to bring PHY %c to idle. \n", phy_name(phy));

-:50: WARNING:RETURN_VOID: void function return statements are not generally useful
#50: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:27:
+	return;
+}

-:61: WARNING:LONG_LINE: line length of 142 exceeds 100 columns
#61: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:38:
+		drm_err_once(&i915->drm, "PHY %c Read %04x failed after %d retries. Status: 0x%x\n", phy_name(phy), addr, attempts, val ?: 0);

-:69: WARNING:LONG_LINE: line length of 142 exceeds 100 columns
#69: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:46:
+		drm_dbg(&i915->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));

-:88: WARNING:LONG_LINE: line length of 120 exceeds 100 columns
#88: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:65:
+		drm_dbg(&i915->drm, "PHY %c Timeout waiting for Read response ACK. Status: 0x%x\n", phy_name(phy), val);

-:96: WARNING:LONG_LINE: line length of 117 exceeds 100 columns
#96: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:73:
+		drm_dbg(&i915->drm, "PHY %c Error occurred during read command. Status: 0x%x\n", phy_name(phy), val);

-:105: WARNING:LONG_LINE: line length of 110 exceeds 100 columns
#105: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:82:
+		drm_dbg(&i915->drm, "PHY %c Not a Read response. MSGBUS Status: 0x%x.\n", phy_name(phy), val);

-:117: CHECK:PARENTHESIS_ALIGNMENT: Alignment should match open parenthesis
#117: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:94:
+static int intel_cx0_wait_cwrite_ack(struct drm_i915_private *i915,
+				      enum port port, int lane)

-:129: WARNING:LONG_LINE: line length of 124 exceeds 100 columns
#129: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:106:
+		drm_dbg(&i915->drm, "PHY %c Timeout waiting for Committed message ACK. Status: 0x%x\n", phy_name(phy), val);

-:135: WARNING:LONG_LINE: line length of 114 exceeds 100 columns
#135: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:112:
+		drm_dbg(&i915->drm, "PHY %c Unexpected ACK received. MSGBUS STATUS: 0x%x.\n", phy_name(phy), val);

-:150: WARNING:LONG_LINE: line length of 120 exceeds 100 columns
#150: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:127:
+		drm_err_once(&i915->drm, "PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, attempts);

-:158: WARNING:LONG_LINE: line length of 142 exceeds 100 columns
#158: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:135:
+		drm_dbg(&i915->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));

-:190: WARNING:RETURN_VOID: void function return statements are not generally useful
#190: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:167:
+	return;
+}

total: 0 errors, 13 warnings, 1 checks, 179 lines checked
afd046f59d87 drm/i915/mtl: Add PLL programming support for C10 phy
-:444: ERROR:CODE_INDENT: code indent should use tabs where possible
#444: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:558:
+^I^I        C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);$

-:444: CHECK:PARENTHESIS_ALIGNMENT: Alignment should match open parenthesis
#444: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:558:
+	intel_cx0_write(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
+		        C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);

-:459: WARNING:LONG_LINE: line length of 104 exceeds 100 columns
#459: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:573:
+	intel_cx0_write(i915, encoder->port, master_lane, PHY_C10_VDR_CMN(0), cmn0, MB_WRITE_COMMITTED);

-:460: WARNING:LONG_LINE: line length of 110 exceeds 100 columns
#460: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:574:
+	intel_cx0_write(i915, encoder->port, master_lane, PHY_C10_VDR_TX(0), C10_TX0_VAL, MB_WRITE_COMMITTED);

-:496: CHECK:SPACING: spaces preferred around that '+' (ctx:VxW)
#496: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:610:
+			    i, hw_state->pll[i], i + 1, hw_state->pll[i+ 1],
 			                                               ^

-:573: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#573: 
new file mode 100644

-:578: WARNING:SPDX_LICENSE_TAG: Improper SPDX comment style for 'drivers/gpu/drm/i915/display/intel_cx0_phy.h', please use '/*' instead
#578: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.h:1:
+// SPDX-License-Identifier: MIT

-:578: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#578: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.h:1:
+// SPDX-License-Identifier: MIT

-:601: CHECK:MACRO_ARG_REUSE: Macro argument reuse '__n' - possible side-effects?
#601: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.h:24:
+#define REG_BIT8(__n)							\
+	((u8)(BIT(__n) +						\
+	       BUILD_BUG_ON_ZERO(__is_constexpr(__n) &&		\
+				 ((__n) < 0 || (__n) > 7))))

-:615: CHECK:MACRO_ARG_REUSE: Macro argument reuse '__high' - possible side-effects?
#615: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.h:38:
+#define REG_GENMASK8(__high, __low)					\
+	((u8)(GENMASK(__high, __low) +					\
+	       BUILD_BUG_ON_ZERO(__is_constexpr(__high) &&	\
+				 __is_constexpr(__low) &&		\
+				 ((__low) < 0 || (__high) > 7 || (__low) > (__high)))))

-:615: CHECK:MACRO_ARG_REUSE: Macro argument reuse '__low' - possible side-effects?
#615: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.h:38:
+#define REG_GENMASK8(__high, __low)					\
+	((u8)(GENMASK(__high, __low) +					\
+	       BUILD_BUG_ON_ZERO(__is_constexpr(__high) &&	\
+				 __is_constexpr(__low) &&		\
+				 ((__low) < 0 || (__high) > 7 || (__low) > (__high)))))

-:624: CHECK:MACRO_ARG_REUSE: Macro argument reuse '__x' - possible side-effects?
#624: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.h:47:
+#define IS_POWER_OF_2(__x)		((__x) && (((__x) & ((__x) - 1)) == 0))

-:636: CHECK:MACRO_ARG_REUSE: Macro argument reuse '__mask' - possible side-effects?
#636: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.h:59:
+#define REG_FIELD_PREP8(__mask, __val)						\
+	((u8)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) +	\
+	       BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) +		\
+	       BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U8_MAX) +		\
+	       BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
+	       BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))

-:636: CHECK:MACRO_ARG_REUSE: Macro argument reuse '__val' - possible side-effects?
#636: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.h:59:
+#define REG_FIELD_PREP8(__mask, __val)						\
+	((u8)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) +	\
+	       BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) +		\
+	       BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U8_MAX) +		\
+	       BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
+	       BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))

-:641: WARNING:LONG_LINE: line length of 128 exceeds 100 columns
#641: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.h:64:
+	       BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))

-:732: WARNING:LONG_LINE: line length of 102 exceeds 100 columns
#732: FILE: drivers/gpu/drm/i915/display/intel_ddi.c:3501:
+	crtc_state->port_clock = intel_c10mpllb_calc_port_clock(encoder, &crtc_state->c10mpllb_state);

total: 1 errors, 7 warnings, 8 checks, 818 lines checked
e6d269ba2195 drm/i915/mtl: Add support for C10 phy programming
-:50: ERROR:SPACING: space required before the open parenthesis '('
#50: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:636:
+#define PHY_LANES_VAL_ARG(FIELD, lanes, arg)	({u32 __val; switch(lanes) {\

-:50: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'arg' - possible side-effects?
#50: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:636:
+#define PHY_LANES_VAL_ARG(FIELD, lanes, arg)	({u32 __val; switch(lanes) {\
+						  case INTEL_CX0_BOTH_LANES:	\
+							__val = ((XELPDP_LANE0_##FIELD(arg)) |\
+							        (XELPDP_LANE1_##FIELD(arg))); \
+							break;				\
+						  case INTEL_CX0_LANE0:         \
+							__val = (XELPDP_LANE0_##FIELD(arg));\
+							break;				\
+						  case INTEL_CX0_LANE1:         \
+							__val = (XELPDP_LANE1_##FIELD(arg));\
+							break;  \
+						 }; __val; })

-:51: WARNING:TABSTOP: Statements should start on a tabstop
#51: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:637:
+						  case INTEL_CX0_BOTH_LANES:	\

-:53: ERROR:CODE_INDENT: code indent should use tabs where possible
#53: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:639:
+^I^I^I^I^I^I^I        (XELPDP_LANE1_##FIELD(arg))); \$

-:55: WARNING:TABSTOP: Statements should start on a tabstop
#55: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:641:
+						  case INTEL_CX0_LANE0:         \

-:58: WARNING:TABSTOP: Statements should start on a tabstop
#58: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:644:
+						  case INTEL_CX0_LANE1:         \

-:63: ERROR:SPACING: space required before the open parenthesis '('
#63: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:649:
+#define PHY_LANES_VAL(FIELD, lanes)	({u32 __val; switch(lanes) {\

-:64: WARNING:TABSTOP: Statements should start on a tabstop
#64: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:650:
+						  case INTEL_CX0_BOTH_LANES:	\

-:66: ERROR:CODE_INDENT: code indent should use tabs where possible
#66: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:652:
+^I^I^I^I^I^I^I        XELPDP_LANE1_##FIELD); \$

-:68: WARNING:TABSTOP: Statements should start on a tabstop
#68: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:654:
+						  case INTEL_CX0_LANE0:         \

-:71: WARNING:TABSTOP: Statements should start on a tabstop
#71: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:657:
+						  case INTEL_CX0_LANE1:         \

-:548: WARNING:LONG_LINE: line length of 103 exceeds 100 columns
#548: FILE: drivers/gpu/drm/i915/i915_reg.h:8411:
+							[PORT_A] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A, \

-:549: WARNING:LONG_LINE: line length of 103 exceeds 100 columns
#549: FILE: drivers/gpu/drm/i915/i915_reg.h:8412:
+							[PORT_B] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B, \

-:550: WARNING:LONG_LINE: line length of 109 exceeds 100 columns
#550: FILE: drivers/gpu/drm/i915/i915_reg.h:8413:
+							[PORT_TC1] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1, \

-:551: WARNING:LONG_LINE: line length of 109 exceeds 100 columns
#551: FILE: drivers/gpu/drm/i915/i915_reg.h:8414:
+							[PORT_TC2] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2, \

-:552: WARNING:LONG_LINE: line length of 109 exceeds 100 columns
#552: FILE: drivers/gpu/drm/i915/i915_reg.h:8415:
+							[PORT_TC3] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC3, \

-:553: WARNING:LONG_LINE: line length of 123 exceeds 100 columns
#553: FILE: drivers/gpu/drm/i915/i915_reg.h:8416:
+							[PORT_TC4] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC4) + ((lane) * 4))

-:555: WARNING:LONG_LINE: line length of 102 exceeds 100 columns
#555: FILE: drivers/gpu/drm/i915/i915_reg.h:8418:
+#define XELPDP_PORT_M2P_MSGBUS_CTL(port, lane)		_MMIO(_XELPDP_PORT_M2P_MSGBUS_CTL(port, lane))

-:558: WARNING:LONG_LINE: line length of 110 exceeds 100 columns
#558: FILE: drivers/gpu/drm/i915/i915_reg.h:8421:
+#define  XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED	REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x1)

-:559: WARNING:LONG_LINE: line length of 110 exceeds 100 columns
#559: FILE: drivers/gpu/drm/i915/i915_reg.h:8422:
+#define  XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED	REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x2)

-:560: WARNING:LONG_LINE: line length of 110 exceeds 100 columns
#560: FILE: drivers/gpu/drm/i915/i915_reg.h:8423:
+#define  XELPDP_PORT_M2P_COMMAND_READ			REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x3)

-:562: WARNING:LONG_LINE: line length of 102 exceeds 100 columns
#562: FILE: drivers/gpu/drm/i915/i915_reg.h:8425:
+#define  XELPDP_PORT_M2P_DATA(val)			REG_FIELD_PREP(XELPDP_PORT_M2P_DATA_MASK, val)

-:565: WARNING:LONG_LINE: line length of 105 exceeds 100 columns
#565: FILE: drivers/gpu/drm/i915/i915_reg.h:8428:
+#define  XELPDP_PORT_M2P_ADDRESS(val)			REG_FIELD_PREP(XELPDP_PORT_M2P_ADDRESS_MASK, val)

-:567: WARNING:LONG_LINE: line length of 106 exceeds 100 columns
#567: FILE: drivers/gpu/drm/i915/i915_reg.h:8430:
+#define XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane)	_MMIO(_XELPDP_PORT_M2P_MSGBUS_CTL(port, lane) + 8)

-:573: WARNING:LONG_LINE: line length of 102 exceeds 100 columns
#573: FILE: drivers/gpu/drm/i915/i915_reg.h:8436:
+#define  XELPDP_PORT_P2M_DATA(val)			REG_FIELD_PREP(XELPDP_PORT_P2M_DATA_MASK, val)

-:594: WARNING:LONG_LINE: line length of 103 exceeds 100 columns
#594: FILE: drivers/gpu/drm/i915/i915_reg.h:8457:
+							[PORT_TC1] = _XELPDP_PORT_BUF_CTL1_LN0_USBC1, \

-:595: WARNING:LONG_LINE: line length of 103 exceeds 100 columns
#595: FILE: drivers/gpu/drm/i915/i915_reg.h:8458:
+							[PORT_TC2] = _XELPDP_PORT_BUF_CTL1_LN0_USBC2, \

-:596: WARNING:LONG_LINE: line length of 103 exceeds 100 columns
#596: FILE: drivers/gpu/drm/i915/i915_reg.h:8459:
+							[PORT_TC3] = _XELPDP_PORT_BUF_CTL1_LN0_USBC3, \

-:597: WARNING:LONG_LINE: line length of 102 exceeds 100 columns
#597: FILE: drivers/gpu/drm/i915/i915_reg.h:8460:
+							[PORT_TC4] = _XELPDP_PORT_BUF_CTL1_LN0_USBC4))

-:613: WARNING:LONG_LINE: line length of 114 exceeds 100 columns
#613: FILE: drivers/gpu/drm/i915/i915_reg.h:8476:
+#define  XELPDP_LANE0_POWERDOWN_NEW_STATE(val)		REG_FIELD_PREP(XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK, val)

-:615: WARNING:LONG_LINE: line length of 114 exceeds 100 columns
#615: FILE: drivers/gpu/drm/i915/i915_reg.h:8478:
+#define  XELPDP_LANE1_POWERDOWN_NEW_STATE(val)		REG_FIELD_PREP(XELPDP_LANE1_POWERDOWN_NEW_STATE_MASK, val)

-:617: WARNING:LONG_LINE: line length of 106 exceeds 100 columns
#617: FILE: drivers/gpu/drm/i915/i915_reg.h:8480:
+#define  XELPDP_POWER_STATE_READY(val)			REG_FIELD_PREP(XELPDP_POWER_STATE_READY_MASK, val)

-:621: WARNING:LONG_LINE: line length of 114 exceeds 100 columns
#621: FILE: drivers/gpu/drm/i915/i915_reg.h:8484:
+#define  XELPDP_PLL_LANE_STAGGERING_DELAY(val)		REG_FIELD_PREP(XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, val)

-:623: WARNING:LONG_LINE: line length of 107 exceeds 100 columns
#623: FILE: drivers/gpu/drm/i915/i915_reg.h:8486:
+#define  XELPDP_POWER_STATE_ACTIVE(val)			REG_FIELD_PREP(XELPDP_POWER_STATE_ACTIVE_MASK, val)

-:648: WARNING:LONG_LINE: line length of 105 exceeds 100 columns
#648: FILE: drivers/gpu/drm/i915/i915_reg.h:8511:
+#define XELPDP_DDI_CLOCK_SELECT(val)			REG_FIELD_PREP(XELPDP_DDI_CLOCK_SELECT_MASK, val)

total: 4 errors, 30 warnings, 1 checks, 600 lines checked
c4154b7d3dd6 drm/i915/mtl: Add C10 phy programming for HDMI
-:207: CHECK:PARENTHESIS_ALIGNMENT: Alignment should match open parenthesis
#207: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:626:
+			drm_dbg_kms(&i915->drm, "Can't support HDMI link rate %d on phy %c.\n",
+				      crtc_state->port_clock, phy_name(phy));

total: 0 errors, 0 warnings, 1 checks, 242 lines checked
ad6f986d0965 drm/i915/mtl: Add vswing programming for C10 phys
-:6: WARNING:COMMIT_LOG_LONG_LINE: Possible unwrapped commit description (prefer a maximum 75 chars per line)
#6: 
C10 phys uses direct mapping internally for voltage and pre-emphasis levels.

-:131: CHECK:PARENTHESIS_ALIGNMENT: Alignment should match open parenthesis
#131: FILE: drivers/gpu/drm/i915/display/intel_cx0_phy.c:286:
+	intel_cx0_write(i915, encoder->port, !master_lane, PHY_C10_VDR_CONTROL(1),
+		 C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED);

-:381: WARNING:REPEATED_WORD: Possible repeated word: 'to'
#381: FILE: drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c:1040:
+ * Some platforms don't need a mapping table and only expect us to
+ * to program the vswing + preemphasis levels directly since the

-:385: WARNING:LEADING_SPACE: please, no spaces at the start of a line
#385: FILE: drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c:1044:
+    { .direct = { .level = 0, .preemph = 0 } },$

-:386: WARNING:LEADING_SPACE: please, no spaces at the start of a line
#386: FILE: drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c:1045:
+    { .direct = { .level = 0, .preemph = 1 } },$

-:387: WARNING:LEADING_SPACE: please, no spaces at the start of a line
#387: FILE: drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c:1046:
+    { .direct = { .level = 0, .preemph = 2 } },$

-:388: WARNING:LEADING_SPACE: please, no spaces at the start of a line
#388: FILE: drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c:1047:
+    { .direct = { .level = 0, .preemph = 3 } },$

-:389: WARNING:LEADING_SPACE: please, no spaces at the start of a line
#389: FILE: drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c:1048:
+    { .direct = { .level = 1, .preemph = 0 } },$

-:390: WARNING:LEADING_SPACE: please, no spaces at the start of a line
#390: FILE: drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c:1049:
+    { .direct = { .level = 1, .preemph = 0 } },$

-:391: WARNING:LEADING_SPACE: please, no spaces at the start of a line
#391: FILE: drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c:1050:
+    { .direct = { .level = 1, .preemph = 2 } },$

-:392: WARNING:LEADING_SPACE: please, no spaces at the start of a line
#392: FILE: drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c:1051:
+    { .direct = { .level = 2, .preemph = 0 } },$

-:393: WARNING:LEADING_SPACE: please, no spaces at the start of a line
#393: FILE: drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c:1052:
+    { .direct = { .level = 2, .preemph = 1 } },$

-:394: WARNING:LEADING_SPACE: please, no spaces at the start of a line
#394: FILE: drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c:1053:
+    { .direct = { .level = 3, .preemph = 0 } },$

total: 0 errors, 12 warnings, 1 checks, 408 lines checked



^ permalink raw reply	[flat|nested] 17+ messages in thread

* [Intel-gfx] ✓ Fi.CI.BAT: success for drm/i915/mtl: Add C10 phy support
  2022-09-29 13:17 [Intel-gfx] [PATCH 0/5] drm/i915/mtl: Add C10 phy support Mika Kahola
                   ` (5 preceding siblings ...)
  2022-09-29 19:46 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915/mtl: Add C10 phy support Patchwork
@ 2022-09-29 20:08 ` Patchwork
  2022-09-30 21:08 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
  7 siblings, 0 replies; 17+ messages in thread
From: Patchwork @ 2022-09-29 20:08 UTC (permalink / raw)
  To: Mika Kahola; +Cc: intel-gfx

[-- Attachment #1: Type: text/plain, Size: 5354 bytes --]

== Series Details ==

Series: drm/i915/mtl: Add C10 phy support
URL   : https://patchwork.freedesktop.org/series/109248/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_12199 -> Patchwork_109248v1
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/index.html

Participating hosts (49 -> 44)
------------------------------

  Missing    (5): fi-hsw-4200u bat-dg1-5 fi-ctg-p8600 fi-hsw-4770 fi-bdw-samus 

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_109248v1:

### IGT changes ###

#### Suppressed ####

  The following results come from untrusted machines, tests, or statuses.
  They do not affect the overall result.

  * igt@i915_selftest@live@slpc:
    - {bat-rpls-2}:       [DMESG-FAIL][1] ([i915#6367]) -> [DMESG-FAIL][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/bat-rpls-2/igt@i915_selftest@live@slpc.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/bat-rpls-2/igt@i915_selftest@live@slpc.html
    - {bat-adln-1}:       [PASS][3] -> [DMESG-FAIL][4]
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/bat-adln-1/igt@i915_selftest@live@slpc.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/bat-adln-1/igt@i915_selftest@live@slpc.html

  
Known issues
------------

  Here are the changes found in Patchwork_109248v1 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@i915_selftest@live@gem_contexts:
    - fi-icl-u2:          [PASS][5] -> [INCOMPLETE][6] ([i915#4890])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/fi-icl-u2/igt@i915_selftest@live@gem_contexts.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/fi-icl-u2/igt@i915_selftest@live@gem_contexts.html

  * igt@kms_chamelium@common-hpd-after-suspend:
    - fi-snb-2600:        NOTRUN -> [SKIP][7] ([fdo#109271] / [fdo#111827])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/fi-snb-2600/igt@kms_chamelium@common-hpd-after-suspend.html

  * igt@runner@aborted:
    - fi-icl-u2:          NOTRUN -> [FAIL][8] ([i915#4312])
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/fi-icl-u2/igt@runner@aborted.html

  
#### Possible fixes ####

  * igt@gem_exec_suspend@basic-s0@smem:
    - {bat-adlm-1}:       [DMESG-WARN][9] ([i915#2867]) -> [PASS][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/bat-adlm-1/igt@gem_exec_suspend@basic-s0@smem.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/bat-adlm-1/igt@gem_exec_suspend@basic-s0@smem.html

  * igt@i915_selftest@live@hangcheck:
    - fi-snb-2600:        [INCOMPLETE][11] ([i915#6992]) -> [PASS][12]
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/fi-snb-2600/igt@i915_selftest@live@hangcheck.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/fi-snb-2600/igt@i915_selftest@live@hangcheck.html

  * igt@kms_cursor_legacy@basic-busy-flip-before-cursor@atomic-transitions-varying-size:
    - fi-bsw-kefka:       [FAIL][13] ([i915#6298]) -> [PASS][14]
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/fi-bsw-kefka/igt@kms_cursor_legacy@basic-busy-flip-before-cursor@atomic-transitions-varying-size.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/fi-bsw-kefka/igt@kms_cursor_legacy@basic-busy-flip-before-cursor@atomic-transitions-varying-size.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#111827]: https://bugs.freedesktop.org/show_bug.cgi?id=111827
  [i915#2867]: https://gitlab.freedesktop.org/drm/intel/issues/2867
  [i915#4258]: https://gitlab.freedesktop.org/drm/intel/issues/4258
  [i915#4312]: https://gitlab.freedesktop.org/drm/intel/issues/4312
  [i915#4890]: https://gitlab.freedesktop.org/drm/intel/issues/4890
  [i915#6298]: https://gitlab.freedesktop.org/drm/intel/issues/6298
  [i915#6367]: https://gitlab.freedesktop.org/drm/intel/issues/6367
  [i915#6471]: https://gitlab.freedesktop.org/drm/intel/issues/6471
  [i915#6818]: https://gitlab.freedesktop.org/drm/intel/issues/6818
  [i915#6992]: https://gitlab.freedesktop.org/drm/intel/issues/6992


Build changes
-------------

  * Linux: CI_DRM_12199 -> Patchwork_109248v1

  CI-20190529: 20190529
  CI_DRM_12199: 6fa6bc62d3b91e5a70b8e4869436a0b03083abf5 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_6669: 3d2df081c14c251e0269e3510ddc4e9d26ffe925 @ https://gitlab.freedesktop.org/drm/igt-gpu-tools.git
  Patchwork_109248v1: 6fa6bc62d3b91e5a70b8e4869436a0b03083abf5 @ git://anongit.freedesktop.org/gfx-ci/linux


### Linux commits

4d575208d033 drm/i915/mtl: Add vswing programming for C10 phys
4873a207ebd6 drm/i915/mtl: Add C10 phy programming for HDMI
925cd699312e drm/i915/mtl: Add support for C10 phy programming
233dc42b2d09 drm/i915/mtl: Add PLL programming support for C10 phy
cee922f59024 drm/i915/mtl: Add Support for C10, C20 PHY Message Bus

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/index.html

[-- Attachment #2: Type: text/html, Size: 5985 bytes --]

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Intel-gfx] [PATCH 1/5] drm/i915/mtl: Add Support for C10, C20 PHY Message Bus
  2022-09-29 13:17 ` [Intel-gfx] [PATCH 1/5] drm/i915/mtl: Add Support for C10, C20 PHY Message Bus Mika Kahola
@ 2022-09-30  9:04   ` Jani Nikula
  2022-10-06 10:04     ` Kahola, Mika
  2022-10-11  0:00   ` Lucas De Marchi
  1 sibling, 1 reply; 17+ messages in thread
From: Jani Nikula @ 2022-09-30  9:04 UTC (permalink / raw)
  To: Mika Kahola, intel-gfx

On Thu, 29 Sep 2022, Mika Kahola <mika.kahola@intel.com> wrote:
> From: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
>
> XELPDP has C10 and C20 phys from Synopsys to drive displays. Each phy
> has a dedicated PIPE 5.2 Message bus for configuration. This message
> bus is used to configure the phy internal registers.

This looks like a silly intermediate step, adding a bunch of static
functions with __maybe_unused, just to be modified again in the next
patch.

>
> Bspec: 64599, 65100, 65101, 67610, 67636
>
> Cc: Mika Kahola <mika.kahola@intel.com>
> Cc: Imre Deak <imre.deak@intel.com>
> Cc: Uma Shankar <uma.shankar@intel.com>
> Signed-off-by: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
> Signed-off-by: Mika Kahola <mika.kahola@intel.com> (v4)
> ---
>  drivers/gpu/drm/i915/display/intel_cx0_phy.c | 179 +++++++++++++++++++
>  1 file changed, 179 insertions(+)
>  create mode 100644 drivers/gpu/drm/i915/display/intel_cx0_phy.c
>
> diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
> new file mode 100644
> index 000000000000..7930b0255cfa
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
> @@ -0,0 +1,179 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2021 Intel Corporation
> + */
> +
> +#include "intel_de.h"
> +#include "intel_uncore.h"

Do you use anything from intel_uncore.h directly, or is it just
intel_de.h?

> +
> +static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, int lane)
> +{
> +	enum phy phy = intel_port_to_phy(i915, port);
> +
> +	/* Bring the phy to idle. */
> +	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
> +		       XELPDP_PORT_M2P_TRANSACTION_RESET);
> +
> +	/* Wait for Idle Clear. */
> +	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
> +				    XELPDP_PORT_M2P_TRANSACTION_RESET,
> +				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
> +		drm_err_once(&i915->drm, "Failed to bring PHY %c to idle. \n", phy_name(phy));
> +		return;
> +	}
> +
> +	intel_de_write(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane), ~0);
> +	return;

Unnecessary return statement.

> +}
> +
> +__maybe_unused static u8 intel_cx0_read(struct drm_i915_private *i915, enum port port,
> +			 int lane, u16 addr)
> +{
> +	enum phy phy = intel_port_to_phy(i915, port);
> +	u32 val = 0;
> +	int attempts = 0;
> +
> +retry:
> +	if (attempts == 3) {
> +		drm_err_once(&i915->drm, "PHY %c Read %04x failed after %d retries. Status: 0x%x\n", phy_name(phy), addr, attempts, val ?: 0);
> +		return 0;
> +	}

The code looks like it would benefit from abstracting a non-retrying
read function that returns errors, with this function doing the retry
loop using a conventional for loop.

There's four copy-pasted bits of error handling here that's just error
prone.

> +
> +	/* Wait for pending transactions.*/
> +	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
> +				    XELPDP_PORT_M2P_TRANSACTION_PENDING,
> +				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
> +		drm_dbg(&i915->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));

drm_dbg_kms() throughout.

> +		attempts++;
> +		intel_cx0_bus_reset(i915, port, lane);
> +		goto retry;
> +	}
> +
> +	/* Issue the read command. */
> +	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
> +		       XELPDP_PORT_M2P_TRANSACTION_PENDING |
> +		       XELPDP_PORT_M2P_COMMAND_READ |
> +		       XELPDP_PORT_M2P_ADDRESS(addr));
> +
> +	/* Wait for response ready. And read response.*/
> +	if (__intel_wait_for_register(&i915->uncore,
> +				      XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
> +				      XELPDP_PORT_P2M_RESPONSE_READY,
> +				      XELPDP_PORT_P2M_RESPONSE_READY,
> +				      XELPDP_MSGBUS_TIMEOUT_FAST_US,
> +				      XELPDP_MSGBUS_TIMEOUT_SLOW, &val)) {
> +		drm_dbg(&i915->drm, "PHY %c Timeout waiting for Read response ACK. Status: 0x%x\n", phy_name(phy), val);
> +		attempts++;
> +		intel_cx0_bus_reset(i915, port, lane);
> +		goto retry;
> +	}
> +
> +	/* Check for error. */
> +	if (val & XELPDP_PORT_P2M_ERROR_SET) {
> +		drm_dbg(&i915->drm, "PHY %c Error occurred during read command. Status: 0x%x\n", phy_name(phy), val);
> +		attempts++;
> +		intel_cx0_bus_reset(i915, port, lane);
> +		goto retry;
> +	}
> +
> +	/* Check for Read Ack. */
> +	if (REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, val) !=
> +	    XELPDP_PORT_P2M_COMMAND_READ_ACK) {
> +		drm_dbg(&i915->drm, "PHY %c Not a Read response. MSGBUS Status: 0x%x.\n", phy_name(phy), val);
> +		attempts++;
> +		intel_cx0_bus_reset(i915, port, lane);
> +		goto retry;
> +	}
> +
> +	/* Clear Response Ready flag.*/
> +	intel_de_write(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane), ~0);

Blank line before return.

> +	return (u8)REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val);

Unnecessary cast.

> +}
> +
> +static int intel_cx0_wait_cwrite_ack(struct drm_i915_private *i915,
> +				      enum port port, int lane)
> +{
> +	enum phy phy = intel_port_to_phy(i915, port);
> +	u32 val;
> +
> +	/* Check for write ack. */
> +	if (__intel_wait_for_register(&i915->uncore,
> +				      XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
> +				      XELPDP_PORT_P2M_RESPONSE_READY,
> +				      XELPDP_PORT_P2M_RESPONSE_READY,
> +				      XELPDP_MSGBUS_TIMEOUT_FAST_US,
> +				      XELPDP_MSGBUS_TIMEOUT_SLOW, &val)) {
> +		drm_dbg(&i915->drm, "PHY %c Timeout waiting for Committed message ACK. Status: 0x%x\n", phy_name(phy), val);
> +		return -ETIMEDOUT;
> +	}
> +
> +	if ((REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, val) !=
> +	     XELPDP_PORT_P2M_COMMAND_WRITE_ACK) || val & XELPDP_PORT_P2M_ERROR_SET) {
> +		drm_dbg(&i915->drm, "PHY %c Unexpected ACK received. MSGBUS STATUS: 0x%x.\n", phy_name(phy), val);
> +		return -EINVAL;
> +	}

This is also copy-paste duplicating the stuff in the previous
function. So why isn't this function used there?

> +
> +	return 0;
> +}
> +
> +__maybe_unused static void intel_cx0_write(struct drm_i915_private *i915, enum port port,
> +			    int lane, u16 addr, u8 data, bool committed)
> +{
> +	enum phy phy = intel_port_to_phy(i915, port);
> +	int attempts = 0;
> +
> +retry:
> +	if (attempts == 3) {
> +		drm_err_once(&i915->drm, "PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, attempts);
> +		return;
> +	}

Same here with the retries as in the write. Have a lower level
non-retrying write function, and handle the rewrites at a different
abstraction level.

> +
> +	/* Wait for pending transactions.*/
> +	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
> +				    XELPDP_PORT_M2P_TRANSACTION_PENDING,
> +				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
> +		drm_dbg(&i915->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));
> +		attempts++;
> +		intel_cx0_bus_reset(i915, port, lane);
> +		goto retry;
> +	}
> +
> +	/* Issue the write command. */
> +	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
> +		       XELPDP_PORT_M2P_TRANSACTION_PENDING |
> +		       (committed ? XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED :
> +		       XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED) |
> +		       XELPDP_PORT_M2P_DATA(data) |
> +		       XELPDP_PORT_M2P_ADDRESS(addr));
> +
> +	/* Check for error. */
> +	if (committed) {
> +		if (intel_cx0_wait_cwrite_ack(i915, port, lane) < 0) {
> +			attempts++;
> +			intel_cx0_bus_reset(i915, port, lane);
> +			goto retry;
> +		}
> +	} else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(phy, lane)) &
> +			    XELPDP_PORT_P2M_ERROR_SET)) {
> +		drm_dbg(&i915->drm, "PHY %c Error occurred during write command.\n", phy_name(phy));
> +		attempts++;
> +		intel_cx0_bus_reset(i915, port, lane);
> +		goto retry;
> +	}
> +
> +	intel_de_write(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane), ~0);
> +
> +	return;

Unnecessary return statement.

> +}
> +
> +__maybe_unused static void intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
> +			  int lane, u16 addr, u8 clear, u8 set, bool committed)
> +{
> +	u8 old, val;
> +
> +	old = intel_cx0_read(i915, port, lane, addr);
> +	val = (old & ~clear) | set;
> +
> +	if (val != old)
> +		intel_cx0_write(i915, port, lane, addr, val, committed);
> +}

-- 
Jani Nikula, Intel Open Source Graphics Center

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Intel-gfx] [PATCH 2/5] drm/i915/mtl: Add PLL programming support for C10 phy
  2022-09-29 13:17 ` [Intel-gfx] [PATCH 2/5] drm/i915/mtl: Add PLL programming support for C10 phy Mika Kahola
@ 2022-09-30  9:19   ` Jani Nikula
  0 siblings, 0 replies; 17+ messages in thread
From: Jani Nikula @ 2022-09-30  9:19 UTC (permalink / raw)
  To: Mika Kahola, intel-gfx

On Thu, 29 Sep 2022, Mika Kahola <mika.kahola@intel.com> wrote:
> From: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
>
> XELPDP has C10 phys to drive output to the EDP and the native output
> from the display engine. Add structures, programming hardware state
> readout logic. Port clock calculations are similar to DG2. Use the DG2
> formulae to calculate the port clock but use the relevant pll signals.
> Note: PHY lane 0 is always used for PLL programming.
>
> Bspec: 64568, 64539, 67636
>
> Cc: Mika Kahola <mika.kahola@intel.com>
> Cc: Imre Deak <imre.deak@intel.com>
> Cc: Uma Shankar <uma.shankar@intel.com>
> Signed-off-by: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
> ---
>  drivers/gpu/drm/i915/display/intel_cx0_phy.c  | 516 +++++++++++++++++-
>  drivers/gpu/drm/i915/display/intel_cx0_phy.h  | 128 +++++
>  drivers/gpu/drm/i915/display/intel_ddi.c      |  20 +-
>  drivers/gpu/drm/i915/display/intel_display.c  |   1 +
>  .../drm/i915/display/intel_display_power.c    |   3 +-
>  .../i915/display/intel_display_power_well.c   |   2 +-
>  .../drm/i915/display/intel_display_types.h    |   6 +
>  drivers/gpu/drm/i915/display/intel_dpll.c     |  20 +-
>  drivers/gpu/drm/i915/display/intel_dpll_mgr.c |   2 +-
>  .../drm/i915/display/intel_modeset_verify.c   |   2 +
>  10 files changed, 690 insertions(+), 10 deletions(-)
>  create mode 100644 drivers/gpu/drm/i915/display/intel_cx0_phy.h
>
> diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
> index 7930b0255cfa..2f401116d1d0 100644
> --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
> +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
> @@ -3,7 +3,11 @@
>   * Copyright © 2021 Intel Corporation
>   */
>  
> +#include "intel_cx0_phy.h"
>  #include "intel_de.h"
> +#include "intel_display_types.h"
> +#include "intel_dp.h"
> +#include "intel_panel.h"
>  #include "intel_uncore.h"
>  
>  static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, int lane)
> @@ -26,7 +30,7 @@ static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, i
>  	return;
>  }
>  
> -__maybe_unused static u8 intel_cx0_read(struct drm_i915_private *i915, enum port port,
> +static u8 intel_cx0_read(struct drm_i915_private *i915, enum port port,
>  			 int lane, u16 addr)

Better just squash patch 1 here I think to avoid this.

>  {
>  	enum phy phy = intel_port_to_phy(i915, port);
> @@ -116,8 +120,8 @@ static int intel_cx0_wait_cwrite_ack(struct drm_i915_private *i915,
>  	return 0;
>  }
>  
> -__maybe_unused static void intel_cx0_write(struct drm_i915_private *i915, enum port port,
> -			    int lane, u16 addr, u8 data, bool committed)
> +static void __intel_cx0_write(struct drm_i915_private *i915, enum port port,
> +			      int lane, u16 addr, u8 data, bool committed)
>  {
>  	enum phy phy = intel_port_to_phy(i915, port);
>  	int attempts = 0;
> @@ -166,8 +170,19 @@ __maybe_unused static void intel_cx0_write(struct drm_i915_private *i915, enum p
>  	return;
>  }
>  
> -__maybe_unused static void intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
> -			  int lane, u16 addr, u8 clear, u8 set, bool committed)
> +static void intel_cx0_write(struct drm_i915_private *i915, enum port port,
> +			    int lane, u16 addr, u8 data, bool committed)
> +{
> +	if (lane == INTEL_CX0_BOTH_LANES) {
> +		__intel_cx0_write(i915, port, INTEL_CX0_LANE0, addr, data, committed);
> +		__intel_cx0_write(i915, port, INTEL_CX0_LANE1, addr, data, committed);
> +	} else {
> +		__intel_cx0_write(i915, port, lane, addr, data, committed);
> +	}
> +}

Usually this kind of stuff is handled with a bitmask describing the
lanes, and having a for loop over the bits.

The enum does not seem very well thought out. Even the naming becomes
cumbersome, with "enum intel_cx0_lanes lane" confusing plural and
singular. And the function at hand has int parameter instead, with the
singular.

> +
> +static void __intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
> +			    int lane, u16 addr, u8 clear, u8 set, bool committed)
>  {
>  	u8 old, val;
>  
> @@ -177,3 +192,494 @@ __maybe_unused static void intel_cx0_rmw(struct drm_i915_private *i915, enum por
>  	if (val != old)
>  		intel_cx0_write(i915, port, lane, addr, val, committed);
>  }
> +
> +static void intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
> +			  int lane, u16 addr, u8 clear, u8 set, bool committed)
> +{
> +	if (lane == INTEL_CX0_BOTH_LANES) {
> +		__intel_cx0_rmw(i915, port, INTEL_CX0_LANE0, addr, clear, set, committed);
> +		__intel_cx0_rmw(i915, port, INTEL_CX0_LANE1, addr, clear, set, committed);
> +	} else {
> +		__intel_cx0_rmw(i915, port, lane, addr, clear, set, committed);
> +	}
> +}

Ditto.

> +
> +/*
> + * Basic DP link rates with 38.4 MHz reference clock.
> + * Note: The tables below are with SSC. In non-ssc
> + * registers 0xC04 to 0xC08(pll[4] to pll[8]) will be
> + * programmed 0.
> + */
> +
> +static const struct intel_c10mpllb_state mtl_c10_dp_rbr = {
> +	.clock = 162000,
> +	.pll[0] = 0xB4,
> +	.pll[1] = 0,
> +	.pll[2] = 0x30,
> +	.pll[3] = 0x1,
> +	.pll[4] = 0x26,
> +	.pll[5] = 0x0C,
> +	.pll[6] = 0x98,
> +	.pll[7] = 0x46,
> +	.pll[8] = 0x1,
> +	.pll[9] = 0x1,
> +	.pll[10] = 0,
> +	.pll[11] = 0,
> +	.pll[12] = 0xC0,
> +	.pll[13] = 0,
> +	.pll[14] = 0,
> +	.pll[15] = 0x2,
> +	.pll[16] = 0x84,
> +	.pll[17] = 0x4F,
> +	.pll[18] = 0xE5,
> +	.pll[19] = 0x23,
> +};
> +
> +static const struct intel_c10mpllb_state mtl_c10_edp_r216 = {
> +	.clock = 216000,
> +	.pll[0] = 0x4,
> +	.pll[1] = 0,
> +	.pll[2] = 0xA2,
> +	.pll[3] = 0x1,
> +	.pll[4] = 0x33,
> +	.pll[5] = 0x10,
> +	.pll[6] = 0x75,
> +	.pll[7] = 0xB3,
> +	.pll[8] = 0x1,
> +	.pll[9] = 0x1,
> +	.pll[10] = 0,
> +	.pll[11] = 0,
> +	.pll[12] = 0,
> +	.pll[13] = 0,
> +	.pll[14] = 0,
> +	.pll[15] = 0x2,
> +	.pll[16] = 0x85,
> +	.pll[17] = 0x0F,
> +	.pll[18] = 0xE6,
> +	.pll[19] = 0x23,
> +};
> +
> +static const struct intel_c10mpllb_state mtl_c10_edp_r243 = {
> +	.clock = 243000,
> +	.pll[0] = 0x34,
> +	.pll[1] = 0,
> +	.pll[2] = 0xDA,
> +	.pll[3] = 0x1,
> +	.pll[4] = 0x39,
> +	.pll[5] = 0x12,
> +	.pll[6] = 0xE3,
> +	.pll[7] = 0xE9,
> +	.pll[8] = 0x1,
> +	.pll[9] = 0x1,
> +	.pll[10] = 0,
> +	.pll[11] = 0,
> +	.pll[12] = 0x20,
> +	.pll[13] = 0,
> +	.pll[14] = 0,
> +	.pll[15] = 0x2,
> +	.pll[16] = 0x85,
> +	.pll[17] = 0x8F,
> +	.pll[18] = 0xE6,
> +	.pll[19] = 0x23,
> +};
> +
> +static const struct intel_c10mpllb_state mtl_c10_dp_hbr1 = {
> +	.clock = 270000,
> +	.pll[0] = 0xF4,
> +	.pll[1] = 0,
> +	.pll[2] = 0xF8,
> +	.pll[3] = 0x0,
> +	.pll[4] = 0x20,
> +	.pll[5] = 0x0A,
> +	.pll[6] = 0x29,
> +	.pll[7] = 0x10,
> +	.pll[8] = 0x1,   /* Verify */
> +	.pll[9] = 0x1,
> +	.pll[10] = 0,
> +	.pll[11] = 0,
> +	.pll[12] = 0xA0,
> +	.pll[13] = 0,
> +	.pll[14] = 0,
> +	.pll[15] = 0x1,
> +	.pll[16] = 0x84,
> +	.pll[17] = 0x4F,
> +	.pll[18] = 0xE5,
> +	.pll[19] = 0x23,
> +};
> +
> +static const struct intel_c10mpllb_state mtl_c10_edp_r324 = {
> +	.clock = 324000,
> +	.pll[0] = 0xB4,
> +	.pll[1] = 0,
> +	.pll[2] = 0x30,
> +	.pll[3] = 0x1,
> +	.pll[4] = 0x26,
> +	.pll[5] = 0x0C,
> +	.pll[6] = 0x98,
> +	.pll[7] = 0x46,
> +	.pll[8] = 0x1,
> +	.pll[9] = 0x1,
> +	.pll[10] = 0,
> +	.pll[11] = 0,
> +	.pll[12] = 0xC0,
> +	.pll[13] = 0,
> +	.pll[14] = 0,
> +	.pll[15] = 0x1,
> +	.pll[16] = 0x85,
> +	.pll[17] = 0x4F,
> +	.pll[18] = 0xE6,
> +	.pll[19] = 0x23,
> +};
> +
> +static const struct intel_c10mpllb_state mtl_c10_edp_r432 = {
> +	.clock = 432000,
> +	.pll[0] = 0x4,
> +	.pll[1] = 0,
> +	.pll[2] = 0xA2,
> +	.pll[3] = 0x1,
> +	.pll[4] = 0x33,
> +	.pll[5] = 0x10,
> +	.pll[6] = 0x75,
> +	.pll[7] = 0xB3,
> +	.pll[8] = 0x1,
> +	.pll[9] = 0x1,
> +	.pll[10] = 0,
> +	.pll[11] = 0,
> +	.pll[12] = 0,
> +	.pll[13] = 0,
> +	.pll[14] = 0,
> +	.pll[15] = 0x1,
> +	.pll[16] = 0x85,
> +	.pll[17] = 0x0F,
> +	.pll[18] = 0xE6,
> +	.pll[19] = 0x23,
> +};
> +
> +static const struct intel_c10mpllb_state mtl_c10_dp_hbr2 = {
> +	.clock = 540000,
> +	.pll[0] = 0xF4,
> +	.pll[1] = 0,
> +	.pll[2] = 0xF8,
> +	.pll[3] = 0,
> +	.pll[4] = 0x20,
> +	.pll[5] = 0x0A,
> +	.pll[6] = 0x29,
> +	.pll[7] = 0x10,
> +	.pll[8] = 0x1,
> +	.pll[9] = 0x1,
> +	.pll[10] = 0,
> +	.pll[11] = 0,
> +	.pll[12] = 0xA0,
> +	.pll[13] = 0,
> +	.pll[14] = 0,
> +	.pll[15] = 0,
> +	.pll[16] = 0x84,
> +	.pll[17] = 0x4F,
> +	.pll[18] = 0xE5,
> +	.pll[19] = 0x23,
> +};
> +
> +static const struct intel_c10mpllb_state mtl_c10_edp_r675 = {
> +	.clock = 675000,
> +	.pll[0] = 0xB4,
> +	.pll[1] = 0,
> +	.pll[2] = 0x3E,
> +	.pll[3] = 0x1,
> +	.pll[4] = 0xA8,
> +	.pll[5] = 0x0C,
> +	.pll[6] = 0x33,
> +	.pll[7] = 0x54,
> +	.pll[8] = 0x1,
> +	.pll[9] = 0x1,
> +	.pll[10] = 0,
> +	.pll[11] = 0,
> +	.pll[12] = 0xC8,
> +	.pll[13] = 0,
> +	.pll[14] = 0,
> +	.pll[15] = 0,
> +	.pll[16] = 0x85,
> +	.pll[17] = 0x8F,
> +	.pll[18] = 0xE6,
> +	.pll[19] = 0x23,
> +};
> +
> +static const struct intel_c10mpllb_state mtl_c10_dp_hbr3 = {
> +	.clock = 810000,
> +	.pll[0] = 0x34,
> +	.pll[1] = 0,
> +	.pll[2] = 0x84,
> +	.pll[3] = 0x1,
> +	.pll[4] = 0x30,
> +	.pll[5] = 0x0F,
> +	.pll[6] = 0x3D,
> +	.pll[7] = 0x98,
> +	.pll[8] = 0x1,
> +	.pll[9] = 0x1,
> +	.pll[10] = 0,
> +	.pll[11] = 0,
> +	.pll[12] = 0xF0,
> +	.pll[13] = 0,
> +	.pll[14] = 0,
> +	.pll[15] = 0,
> +	.pll[16] = 0x84,
> +	.pll[17] = 0x0F,
> +	.pll[18] = 0xE5,
> +	.pll[19] = 0x23,
> +};
> +
> +static const struct intel_c10mpllb_state * const mtl_c10_dp_tables[] = {
> +	&mtl_c10_dp_rbr,
> +	&mtl_c10_dp_hbr1,
> +	&mtl_c10_dp_hbr2,
> +	&mtl_c10_dp_hbr3,
> +	NULL,
> +};
> +
> +static const struct intel_c10mpllb_state * const mtl_c10_edp_tables[] = {
> +	&mtl_c10_dp_rbr,
> +	&mtl_c10_edp_r216,
> +	&mtl_c10_edp_r243,
> +	&mtl_c10_dp_hbr1,
> +	&mtl_c10_edp_r324,
> +	&mtl_c10_edp_r432,
> +	&mtl_c10_dp_hbr2,
> +	&mtl_c10_edp_r675,
> +	&mtl_c10_dp_hbr3,
> +	NULL,
> +};
> +
> +static const struct intel_c10mpllb_state * const *
> +intel_c10_mpllb_tables_get(struct intel_crtc_state *crtc_state,
> +			   struct intel_encoder *encoder)
> +{
> +	if (intel_crtc_has_dp_encoder(crtc_state)) {
> +		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
> +			return mtl_c10_edp_tables;
> +		else
> +			return mtl_c10_dp_tables;
> +	}
> +
> +	/* TODO: Add HDMI Support */
> +	MISSING_CASE(encoder->type);
> +	return NULL;
> +}
> +
> +static int intel_c10mpllb_calc_state(struct intel_crtc_state *crtc_state,
> +				     struct intel_encoder *encoder)
> +{
> +	const struct intel_c10mpllb_state * const *tables;
> +	int i;
> +
> +	tables = intel_c10_mpllb_tables_get(crtc_state, encoder);
> +	if (!tables)
> +		return -EINVAL;
> +
> +	for (i = 0; tables[i]; i++) {
> +		if (crtc_state->port_clock <= tables[i]->clock) {
> +			crtc_state->c10mpllb_state = *tables[i];
> +			return 0;
> +		}
> +	}
> +
> +	return -EINVAL;
> +}
> +
> +int intel_cx0mpllb_calc_state(struct intel_crtc_state *crtc_state,
> +			      struct intel_encoder *encoder)
> +{
> +	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
> +	enum phy phy = intel_port_to_phy(i915, encoder->port);
> +
> +	drm_WARN_ON(&i915->drm, !intel_is_c10phy(i915, phy));
> +
> +	return intel_c10mpllb_calc_state(crtc_state, encoder);
> +}
> +
> +void intel_c10mpllb_readout_hw_state(struct intel_encoder *encoder,
> +				     struct intel_c10mpllb_state *pll_state)
> +{
> +	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
> +	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
> +	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
> +	enum intel_cx0_lanes lane = lane_reversal ? INTEL_CX0_LANE1 :
> +				    INTEL_CX0_LANE0;
> +	enum phy phy = intel_port_to_phy(i915, encoder->port);
> +	int i;
> +	u8 cmn, tx0;
> +
> +	/*
> +	 * According to C10 VDR Register programming Sequence we need
> +	 * to do this to read PHY internal registers from MsgBus.
> +	 */
> +	intel_cx0_rmw(i915, encoder->port, lane, PHY_C10_VDR_CONTROL(1), 0,
> +		      C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);
> +
> +	for (i = 0; i < 20; i++)

ARRAY_SIZE()

> +		pll_state->pll[i] = intel_cx0_read(i915, encoder->port, lane,
> +						   PHY_C10_VDR_PLL(i));
> +
> +	cmn = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_CMN(0));
> +	tx0 = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_TX(0));
> +
> +	if (tx0 != C10_TX0_VAL || cmn != C10_CMN0_DP_VAL)
> +		drm_warn(&i915->drm, "Unexpected tx: %x or cmn: %x for phy: %c.\n",
> +			 tx0, cmn, phy_name(phy));
> +}
> +
> +__maybe_unused static void intel_c10_pll_program(struct drm_i915_private *i915,
> +						 const struct intel_crtc_state *crtc_state,
> +						 struct intel_encoder *encoder)
> +{
> +	const struct intel_c10mpllb_state *pll_state = &crtc_state->c10mpllb_state;
> +	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
> +	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
> +	enum intel_cx0_lanes master_lane = lane_reversal ? INTEL_CX0_LANE1 :
> +				 INTEL_CX0_LANE0;
> +	enum intel_cx0_lanes follower_lane = lane_reversal ? INTEL_CX0_LANE0 :
> +				 INTEL_CX0_LANE1;
> +
> +	int i;
> +	struct intel_dp *intel_dp;
> +	bool use_ssc = false;
> +	u8 cmn0 = 0;
> +
> +	if (intel_crtc_has_dp_encoder(crtc_state)) {
> +		intel_dp = enc_to_intel_dp(encoder);
> +		use_ssc = (intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
> +			  DP_MAX_DOWNSPREAD_0_5);
> +
> +		if (intel_dp_is_edp(intel_dp) && !intel_panel_use_ssc(i915))
> +			use_ssc = false;
> +
> +		cmn0 = C10_CMN0_DP_VAL;
> +	}
> +
> +	intel_cx0_write(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
> +		        C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);
> +	/* Custom width needs to be programmed to 0 for both the phy lanes */
> +	intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES,
> +		      PHY_C10_VDR_CUSTOM_WIDTH, 0x3, 0, MB_WRITE_COMMITTED);
> +	intel_cx0_rmw(i915, encoder->port, follower_lane, PHY_C10_VDR_CONTROL(1),
> +		      C10_VDR_CTRL_MASTER_LANE, C10_VDR_CTRL_UPDATE_CFG,
> +		      MB_WRITE_COMMITTED);
> +
> +	/* Program the pll values only for the master lane */
> +	for (i = 0; i < 20; i++)
> +		/* If not using ssc pll[4] through pll[8] must be 0*/
> +		intel_cx0_write(i915, encoder->port, master_lane, PHY_C10_VDR_PLL(i),
> +				(!use_ssc && (i > 3 && i < 9)) ? 0 : pll_state->pll[i],
> +				(i % 4) ? MB_WRITE_UNCOMMITTED : MB_WRITE_COMMITTED);

This might benefit from adding intermediate variables. See state_verify below.

> +
> +	intel_cx0_write(i915, encoder->port, master_lane, PHY_C10_VDR_CMN(0), cmn0, MB_WRITE_COMMITTED);
> +	intel_cx0_write(i915, encoder->port, master_lane, PHY_C10_VDR_TX(0), C10_TX0_VAL, MB_WRITE_COMMITTED);
> +	intel_cx0_rmw(i915, encoder->port, master_lane, PHY_C10_VDR_CONTROL(1),
> +		      C10_VDR_CTRL_MSGBUS_ACCESS, C10_VDR_CTRL_MASTER_LANE |
> +		      C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED);
> +}
> +
> +void intel_c10mpllb_dump_hw_state(struct drm_i915_private *dev_priv,
> +				  const struct intel_c10mpllb_state *hw_state)
> +{
> +	bool fracen;
> +	int i;
> +	unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1;
> +	unsigned int multiplier, tx_clk_div;
> +
> +	fracen = hw_state->pll[0] & C10_PLL0_FRACEN;
> +	drm_dbg_kms(&dev_priv->drm, "c10pll_hw_state: fracen: %s, ",
> +		    str_yes_no(fracen));
> +
> +	if (fracen) {
> +		frac_quot = hw_state->pll[12] << 8 | hw_state->pll[11];
> +		frac_rem =  hw_state->pll[14] << 8 | hw_state->pll[13];
> +		frac_den =  hw_state->pll[10] << 8 | hw_state->pll[9];
> +		drm_dbg_kms(&dev_priv->drm, "quot: %u, rem: %u, den: %u,\n",
> +			    frac_quot, frac_rem, frac_den);
> +	}
> +
> +	multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, hw_state->pll[3]) << 8 |
> +		      hw_state->pll[2]) / 2 + 16;
> +	tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, hw_state->pll[15]);
> +	drm_dbg_kms(&dev_priv->drm,
> +		    "multiplier: %u, tx_clk_div: %u.\n", multiplier, tx_clk_div);
> +
> +	drm_dbg_kms(&dev_priv->drm, "c10pll_rawhw_state:");
> +
> +	for (i = 0; i < 20; i = i + 4)

Lots of duplication of magic 20 here.

> +		drm_dbg_kms(&dev_priv->drm, "pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x\n",
> +			    i, hw_state->pll[i], i + 1, hw_state->pll[i+ 1],
> +			    i + 2, hw_state->pll[i + 2], i + 3, hw_state->pll[i + 3]);
> +}
> +
> +int intel_c10mpllb_calc_port_clock(struct intel_encoder *encoder,
> +				   const struct intel_c10mpllb_state *pll_state)
> +{
> +	unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1;
> +	unsigned int multiplier, tx_clk_div, refclk = 38400;
> +
> +	if (pll_state->pll[0] & C10_PLL0_FRACEN) {
> +		frac_quot = pll_state->pll[12] << 8 | pll_state->pll[11];
> +		frac_rem =  pll_state->pll[14] << 8 | pll_state->pll[13];
> +		frac_den =  pll_state->pll[10] << 8 | pll_state->pll[9];
> +	}
> +
> +	multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, pll_state->pll[3]) << 8 |
> +		      pll_state->pll[2]) / 2 + 16;
> +
> +	tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, pll_state->pll[15]);
> +
> +	return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, (multiplier << 16) + frac_quot) +
> +				     DIV_ROUND_CLOSEST(refclk * frac_rem, frac_den),
> +				     10 << (tx_clk_div + 16));
> +}
> +
> +void intel_c10mpllb_state_verify(struct intel_atomic_state *state,
> +				 struct intel_crtc_state *new_crtc_state)
> +{
> +	struct drm_i915_private *i915 = to_i915(state->base.dev);
> +	struct intel_c10mpllb_state mpllb_hw_state = { 0 };
> +	struct intel_c10mpllb_state *mpllb_sw_state = &new_crtc_state->c10mpllb_state;
> +	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
> +	struct intel_encoder *encoder;
> +	struct intel_dp *intel_dp;
> +	enum phy phy;
> +	int i;
> +	bool use_ssc = false;
> +
> +	if (DISPLAY_VER(i915) < 14)
> +		return;
> +
> +	if (!new_crtc_state->hw.active)
> +		return;
> +
> +	encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
> +	phy = intel_port_to_phy(i915, encoder->port);
> +
> +	if (intel_crtc_has_dp_encoder(new_crtc_state)) {
> +		intel_dp = enc_to_intel_dp(encoder);
> +		use_ssc = (intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
> +			  DP_MAX_DOWNSPREAD_0_5);
> +
> +		if (intel_dp_is_edp(intel_dp) && !intel_panel_use_ssc(i915))
> +			use_ssc = false;
> +	}
> +
> +	if (!intel_is_c10phy(i915, phy))
> +		return;
> +
> +	intel_c10mpllb_readout_hw_state(encoder, &mpllb_hw_state);
> +
> +	for (i = 0; i < 20; i++) {
> +		u8 expected;
> +
> +		if (!use_ssc && i > 3 && i < 9)
> +			expected = 0;
> +		else
> +			expected = mpllb_sw_state->pll[i];
> +
> +		I915_STATE_WARN(mpllb_hw_state.pll[i] != expected,
> +				"[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)",
> +				crtc->base.base.id, crtc->base.name,
> +				i, expected, mpllb_hw_state.pll[i]);
> +	}
> +}
> diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
> new file mode 100644
> index 000000000000..cf1f300b6a7b
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
> @@ -0,0 +1,128 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2021 Intel Corporation
> + */
> +
> +#ifndef __INTEL_CX0_PHY_H__
> +#define __INTEL_CX0_PHY_H__
> +
> +#include <linux/types.h>
> +#include <linux/bitfield.h>
> +#include <linux/bits.h>
> +
> +#include "i915_drv.h"
> +#include "intel_display_types.h"

Please use forward declarations instead of including these headers.

> +
> +/**
> + * REG_BIT8() - Prepare a u8 bit value
> + * @__n: 0-based bit number
> + *
> + * Local wrapper for BIT() to force u8, with compile time checks.
> + *
> + * @return: Value with bit @__n set.
> + */
> +#define REG_BIT8(__n)							\
> +	((u8)(BIT(__n) +						\
> +	       BUILD_BUG_ON_ZERO(__is_constexpr(__n) &&		\
> +				 ((__n) < 0 || (__n) > 7))))
> +
> +/**
> + * REG_GENMASK8() - Prepare a continuous u8 bitmask
> + * @__high: 0-based high bit
> + * @__low: 0-based low bit
> + *
> + * Local wrapper for GENMASK() to force u8, with compile time checks.
> + *
> + * @return: Continuous bitmask from @__high to @__low, inclusive.
> + */
> +#define REG_GENMASK8(__high, __low)					\
> +	((u8)(GENMASK(__high, __low) +					\
> +	       BUILD_BUG_ON_ZERO(__is_constexpr(__high) &&	\
> +				 __is_constexpr(__low) &&		\
> +				 ((__low) < 0 || (__high) > 7 || (__low) > (__high)))))
> +
> +/*
> + * Local integer constant expression version of is_power_of_2().
> + */
> +#define IS_POWER_OF_2(__x)		((__x) && (((__x) & ((__x) - 1)) == 0))
> +
> +/**
> + * REG_FIELD_PREP8() - Prepare a u8 bitfield value
> + * @__mask: shifted mask defining the field's length and position
> + * @__val: value to put in the field
> + *
> + * Local copy of FIELD_PREP8() to generate an integer constant expression, force
> + * u8 and for consistency with REG_FIELD_GET8(), REG_BIT8() and REG_GENMASK8().
> + *
> + * @return: @__val masked and shifted into the field defined by @__mask.
> + */
> +#define REG_FIELD_PREP8(__mask, __val)						\
> +	((u8)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) +	\
> +	       BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) +		\
> +	       BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U8_MAX) +		\
> +	       BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
> +	       BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
> +
> +/**
> + * REG_FIELD_GET8() - Extract a u8 bitfield value
> + * @__mask: shifted mask defining the field's length and position
> + * @__val: value to extract the bitfield value from
> + *
> + * Local wrapper for FIELD_GET() to force u8 and for consistency with
> + * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK().
> + *
> + * @return: Masked and shifted value of the field defined by @__mask in @__val.
> + */
> +#define REG_FIELD_GET8(__mask, __val)	((u8)FIELD_GET(__mask, __val))

So that there's no confusion here: NAK on adding any of the above to a
phy specific header. They have no place here. i915_reg_defs.h is the
place.

> +
> +struct drm_i915_private;
> +struct intel_encoder;
> +struct intel_crtc_state;
> +enum phy;
> +
> +enum intel_cx0_lanes {
> +	INTEL_CX0_LANE0,
> +	INTEL_CX0_LANE1,
> +	INTEL_CX0_BOTH_LANES,
> +};

Now that's ugly. A bitmask on the lanes is probably the way to go.

> +
> +#define MB_WRITE_COMMITTED		1
> +#define MB_WRITE_UNCOMMITTED		0
> +
> +/* C10 Vendor Registers */
> +#define PHY_C10_VDR_PLL(idx)		(0xC00 + (idx))
> +#define  C10_PLL0_FRACEN		REG_BIT8(4)
> +#define  C10_PLL3_MULTIPLIERH_MASK	REG_GENMASK8(3, 0)
> +#define  C10_PLL15_TXCLKDIV_MASK	REG_GENMASK8(2, 0)
> +#define PHY_C10_VDR_CMN(idx)		(0xC20 + (idx))
> +#define  C10_CMN0_DP_VAL		0x21
> +#define  C10_CMN3_TXVBOOST_MASK		REG_GENMASK8(7, 5)
> +#define  C10_CMN3_TXVBOOST(val)		REG_FIELD_PREP8(C10_CMN3_TXVBOOST_MASK, val)
> +#define PHY_C10_VDR_TX(idx)		(0xC30 + (idx))
> +#define  C10_TX0_VAL			0x10
> +#define PHY_C10_VDR_CONTROL(idx)	(0xC70 + (idx) - 1)
> +#define  C10_VDR_CTRL_MSGBUS_ACCESS	REG_BIT8(2)
> +#define  C10_VDR_CTRL_MASTER_LANE	REG_BIT8(1)
> +#define  C10_VDR_CTRL_UPDATE_CFG	REG_BIT8(0)
> +#define PHY_C10_VDR_CUSTOM_WIDTH	0xD02

Register macros don't belong here. Add a file just for the registers,
intel_cx0_phy_regs.h.

> +
> +static inline bool intel_is_c10phy(struct drm_i915_private *dev_priv, enum phy phy)
> +{
> +	if (!IS_METEORLAKE(dev_priv))
> +		return false;
> +	else
> +		return (phy < PHY_C);
> +}

Please don't add new static inlines that require you to pull in
i915_drv.h into a new header.

If you have a simple if-else, please don't do if (!something), just
reverse the branches.

> +
> +void intel_c10mpllb_readout_hw_state(struct intel_encoder *encoder,
> +				     struct intel_c10mpllb_state *pll_state);
> +int intel_cx0mpllb_calc_state(struct intel_crtc_state *crtc_state,
> +			      struct intel_encoder *encoder);
> +void intel_c10mpllb_dump_hw_state(struct drm_i915_private *dev_priv,
> +				  const struct intel_c10mpllb_state *hw_state);
> +int intel_c10mpllb_calc_port_clock(struct intel_encoder *encoder,
> +				   const struct intel_c10mpllb_state *pll_state);
> +void intel_c10mpllb_state_verify(struct intel_atomic_state *state,
> +				 struct intel_crtc_state *new_crtc_state);
> +
> +#endif /* __INTEL_CX0_PHY_H__ */
> diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
> index 971356237eca..aaa8846c3b18 100644
> --- a/drivers/gpu/drm/i915/display/intel_ddi.c
> +++ b/drivers/gpu/drm/i915/display/intel_ddi.c
> @@ -38,6 +38,7 @@
>  #include "intel_combo_phy_regs.h"
>  #include "intel_connector.h"
>  #include "intel_crtc.h"
> +#include "intel_cx0_phy.h"
>  #include "intel_ddi.h"
>  #include "intel_ddi_buf_trans.h"
>  #include "intel_de.h"
> @@ -3487,6 +3488,21 @@ void intel_ddi_get_clock(struct intel_encoder *encoder,
>  						     &crtc_state->dpll_hw_state);
>  }
>  
> +static void mtl_ddi_get_config(struct intel_encoder *encoder,
> +			       struct intel_crtc_state *crtc_state)
> +{
> +	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
> +	enum phy phy = intel_port_to_phy(i915, encoder->port);
> +
> +	drm_WARN_ON(&i915->drm, !intel_is_c10phy(i915, phy));
> +
> +	intel_c10mpllb_readout_hw_state(encoder, &crtc_state->c10mpllb_state);
> +	intel_c10mpllb_dump_hw_state(i915, &crtc_state->c10mpllb_state);

Whoa, do we want this noise? Let's just dump the state if it does not
match the sw state, and that gets done elsewhere.

> +	crtc_state->port_clock = intel_c10mpllb_calc_port_clock(encoder, &crtc_state->c10mpllb_state);
> +
> +	intel_ddi_get_config(encoder, crtc_state);
> +}
> +
>  static void dg2_ddi_get_config(struct intel_encoder *encoder,
>  				struct intel_crtc_state *crtc_state)
>  {
> @@ -4367,7 +4383,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
>  	encoder->cloneable = 0;
>  	encoder->pipe_mask = ~0;
>  
> -	if (IS_DG2(dev_priv)) {
> +	if (DISPLAY_VER(dev_priv) >= 14) {
> +		encoder->get_config = mtl_ddi_get_config;
> +	} else if (IS_DG2(dev_priv)) {
>  		encoder->enable_clock = intel_mpllb_enable;
>  		encoder->disable_clock = intel_mpllb_disable;
>  		encoder->get_config = dg2_ddi_get_config;
> diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
> index eb8eaeb19881..5f9272f6e186 100644
> --- a/drivers/gpu/drm/i915/display/intel_display.c
> +++ b/drivers/gpu/drm/i915/display/intel_display.c
> @@ -47,6 +47,7 @@
>  
>  #include "display/intel_audio.h"
>  #include "display/intel_crt.h"
> +#include "display/intel_cx0_phy.h"
>  #include "display/intel_ddi.h"
>  #include "display/intel_display_debugfs.h"
>  #include "display/intel_display_power.h"
> diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
> index 1e608b9e5055..451c90b6d08d 100644
> --- a/drivers/gpu/drm/i915/display/intel_display_power.c
> +++ b/drivers/gpu/drm/i915/display/intel_display_power.c
> @@ -1626,7 +1626,8 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
>  		return;
>  
>  	/* 2. Initialize all combo phys */
> -	intel_combo_phy_init(dev_priv);
> +	if (DISPLAY_VER(dev_priv) < 14)
> +		intel_combo_phy_init(dev_priv);
>  
>  	/*
>  	 * 3. Enable Power Well 1 (PG1).
> diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
> index df7ee4969ef1..84e7f9d44ff9 100644
> --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
> +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
> @@ -980,7 +980,7 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
>  	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
>  		bxt_verify_ddi_phy_power_wells(dev_priv);
>  
> -	if (DISPLAY_VER(dev_priv) >= 11)
> +	if (DISPLAY_VER(dev_priv) >= 11 && DISPLAY_VER(dev_priv) < 14)
>  		/*
>  		 * DMC retains HW context only for port A, the other combo
>  		 * PHY's HW context for port B is lost after DC transitions,
> diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
> index e2b853e9e51d..be6ff6cdfb0b 100644
> --- a/drivers/gpu/drm/i915/display/intel_display_types.h
> +++ b/drivers/gpu/drm/i915/display/intel_display_types.h
> @@ -969,6 +969,11 @@ struct intel_mpllb_state {
>  	u32 mpllb_sscstep;
>  };
>  
> +struct intel_c10mpllb_state {
> +	u32 clock; /* in KHz */
> +	u8 pll[20];
> +};
> +
>  struct intel_crtc_state {
>  	/*
>  	 * uapi (drm) state. This is the software state shown to userspace.
> @@ -1108,6 +1113,7 @@ struct intel_crtc_state {
>  	union {
>  		struct intel_dpll_hw_state dpll_hw_state;
>  		struct intel_mpllb_state mpllb_state;
> +		struct intel_c10mpllb_state c10mpllb_state;
>  	};
>  
>  	/*
> diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
> index b15ba78d64d6..73f541050913 100644
> --- a/drivers/gpu/drm/i915/display/intel_dpll.c
> +++ b/drivers/gpu/drm/i915/display/intel_dpll.c
> @@ -7,6 +7,7 @@
>  #include <linux/string_helpers.h>
>  
>  #include "intel_crtc.h"
> +#include "intel_cx0_phy.h"
>  #include "intel_de.h"
>  #include "intel_display.h"
>  #include "intel_display_types.h"
> @@ -993,6 +994,17 @@ static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
>  	return 0;
>  }
>  
> +static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
> +				  struct intel_crtc *crtc)
> +{
> +	struct intel_crtc_state *crtc_state =
> +		intel_atomic_get_new_crtc_state(state, crtc);
> +	struct intel_encoder *encoder =
> +		intel_get_crtc_new_encoder(state, crtc_state);
> +
> +	return intel_cx0mpllb_calc_state(crtc_state, encoder);
> +}
> +
>  static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
>  {
>  	return dpll->m < factor * dpll->n;
> @@ -1421,6 +1433,10 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
>  	return 0;
>  }
>  
> +static const struct intel_dpll_funcs mtl_dpll_funcs = {
> +	.crtc_compute_clock = mtl_crtc_compute_clock,
> +};
> +
>  static const struct intel_dpll_funcs dg2_dpll_funcs = {
>  	.crtc_compute_clock = dg2_crtc_compute_clock,
>  };
> @@ -1515,7 +1531,9 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
>  void
>  intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
>  {
> -	if (IS_DG2(dev_priv))
> +	if (DISPLAY_VER(dev_priv) >= 14)
> +		dev_priv->display.funcs.dpll = &mtl_dpll_funcs;
> +	else if (IS_DG2(dev_priv))
>  		dev_priv->display.funcs.dpll = &dg2_dpll_funcs;
>  	else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
>  		dev_priv->display.funcs.dpll = &hsw_dpll_funcs;
> diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
> index b63600d8ebeb..a3d015f44eed 100644
> --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
> +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
> @@ -4173,7 +4173,7 @@ void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
>  
>  	mutex_init(&dev_priv->display.dpll.lock);
>  
> -	if (IS_DG2(dev_priv))
> +	if (DISPLAY_VER(dev_priv) >= 14 || IS_DG2(dev_priv))
>  		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
>  		dpll_mgr = NULL;
>  	else if (IS_ALDERLAKE_P(dev_priv))
> diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
> index 0fdcf2e6d57f..dfd9a0108b0f 100644
> --- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
> +++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
> @@ -11,6 +11,7 @@
>  #include "intel_atomic.h"
>  #include "intel_crtc.h"
>  #include "intel_crtc_state_dump.h"
> +#include "intel_cx0_phy.h"
>  #include "intel_display.h"
>  #include "intel_display_types.h"
>  #include "intel_fdi.h"
> @@ -235,6 +236,7 @@ void intel_modeset_verify_crtc(struct intel_crtc *crtc,
>  	verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
>  	intel_shared_dpll_state_verify(crtc, old_crtc_state, new_crtc_state);
>  	intel_mpllb_state_verify(state, new_crtc_state);
> +	intel_c10mpllb_state_verify(state, new_crtc_state);
>  }
>  
>  void intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,

-- 
Jani Nikula, Intel Open Source Graphics Center

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Intel-gfx] [PATCH 3/5] drm/i915/mtl: Add support for C10 phy programming
  2022-09-29 13:17 ` [Intel-gfx] [PATCH 3/5] drm/i915/mtl: Add support for C10 phy programming Mika Kahola
@ 2022-09-30  9:32   ` Jani Nikula
  2022-10-14 12:44     ` Kahola, Mika
  0 siblings, 1 reply; 17+ messages in thread
From: Jani Nikula @ 2022-09-30  9:32 UTC (permalink / raw)
  To: Mika Kahola, intel-gfx

On Thu, 29 Sep 2022, Mika Kahola <mika.kahola@intel.com> wrote:
> From: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
>
> Add sequences for C10 phy enable/disable phy lane reset,
> powerdown change sequence and phy lane programming.
>
> Bspec: 64539, 67636, 65451, 65450, 64568
>
> Cc: Imre Deak <imre.deak@intel.com>
> Cc: Mika Kahola <mika.kahola@intel.com>
> Cc: Uma Shankar <uma.shankar@intel.com>
> Signed-off-by: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
> Signed-off-by: Mika Kahola <mika.kahola@intel.com> (v9)
> ---
>  drivers/gpu/drm/i915/Makefile                |   1 +
>  drivers/gpu/drm/i915/display/intel_cx0_phy.c | 352 ++++++++++++++++++-
>  drivers/gpu/drm/i915/display/intel_cx0_phy.h |  17 +
>  drivers/gpu/drm/i915/display/intel_ddi.c     |   2 +
>  drivers/gpu/drm/i915/display/intel_dp.c      |  15 +-
>  drivers/gpu/drm/i915/display/intel_dpll.c    |   2 +
>  drivers/gpu/drm/i915/i915_reg.h              | 141 ++++++++
>  7 files changed, 526 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
> index a26edcdadc21..994f87a12782 100644
> --- a/drivers/gpu/drm/i915/Makefile
> +++ b/drivers/gpu/drm/i915/Makefile
> @@ -279,6 +279,7 @@ i915-y += \
>  	display/icl_dsi.o \
>  	display/intel_backlight.o \
>  	display/intel_crt.o \
> +	display/intel_cx0_phy.o \

This belongs where intel_cx0_phy.c is added.

>  	display/intel_ddi.o \
>  	display/intel_ddi_buf_trans.o \
>  	display/intel_display_trace.o \
> diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
> index 2f401116d1d0..6ba11cd7cd75 100644
> --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
> +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
> @@ -526,9 +526,9 @@ void intel_c10mpllb_readout_hw_state(struct intel_encoder *encoder,
>  			 tx0, cmn, phy_name(phy));
>  }
>  
> -__maybe_unused static void intel_c10_pll_program(struct drm_i915_private *i915,
> -						 const struct intel_crtc_state *crtc_state,
> -						 struct intel_encoder *encoder)
> +static void intel_c10_pll_program(struct drm_i915_private *i915,
> +				  const struct intel_crtc_state *crtc_state,
> +				  struct intel_encoder *encoder)
>  {
>  	const struct intel_c10mpllb_state *pll_state = &crtc_state->c10mpllb_state;
>  	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
> @@ -633,6 +633,352 @@ int intel_c10mpllb_calc_port_clock(struct intel_encoder *encoder,
>  				     10 << (tx_clk_div + 16));
>  }
>  
> +#define PHY_LANES_VAL_ARG(FIELD, lanes, arg)	({u32 __val; switch(lanes) {\
> +						  case INTEL_CX0_BOTH_LANES:	\
> +							__val = ((XELPDP_LANE0_##FIELD(arg)) |\
> +							        (XELPDP_LANE1_##FIELD(arg))); \
> +							break;				\
> +						  case INTEL_CX0_LANE0:         \
> +							__val = (XELPDP_LANE0_##FIELD(arg));\
> +							break;				\
> +						  case INTEL_CX0_LANE1:         \
> +							__val = (XELPDP_LANE1_##FIELD(arg));\
> +							break;  \
> +						 }; __val; })
> +
> +#define PHY_LANES_VAL(FIELD, lanes)	({u32 __val; switch(lanes) {\
> +						  case INTEL_CX0_BOTH_LANES:	\
> +							__val = (XELPDP_LANE0_##FIELD | \
> +							        XELPDP_LANE1_##FIELD); \
> +							break;				\
> +						  case INTEL_CX0_LANE0:         \
> +							__val = (XELPDP_LANE0_##FIELD);	     \
> +							break;				\
> +						  case INTEL_CX0_LANE1:         \
> +							__val = (XELPDP_LANE1_##FIELD);\
> +							break;  \
> +						 }; __val; })

Ugh that's ugly. I'll try to look the other way.

> +
> +static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
> +					 const struct intel_crtc_state *crtc_state,
> +					 bool lane_reversal)
> +{
> +	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
> +	struct intel_dp *intel_dp;
> +	bool ssc_enabled;
> +	u32 val = 0;
> +
> +	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(encoder->port), XELPDP_PORT_REVERSAL,
> +		     lane_reversal ? XELPDP_PORT_REVERSAL : 0);
> +
> +	if (lane_reversal)
> +		val |= XELPDP_LANE1_PHY_CLOCK_SELECT;
> +
> +	val |= XELPDP_FORWARD_CLOCK_UNGATE;
> +	val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
> +
> +	if (intel_crtc_has_dp_encoder(crtc_state)) {
> +		intel_dp = enc_to_intel_dp(encoder);
> +		ssc_enabled = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
> +			      DP_MAX_DOWNSPREAD_0_5;

It is almost certainly the wrong thing to do to look at sink DPCD
register values at the low level PHY code. Smells like something that
should be added to crtc state.

> +
> +		/* TODO: DP2.0 10G and 20G rates enable MPLLA*/
> +		val |= ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
> +	}
> +	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
> +		     XELPDP_LANE1_PHY_CLOCK_SELECT |
> +		     XELPDP_FORWARD_CLOCK_UNGATE |
> +		     XELPDP_DDI_CLOCK_SELECT_MASK |
> +		     XELPDP_SSC_ENABLE_PLLB, val);
> +}
> +
> +static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
> +						enum port port,
> +						enum intel_cx0_lanes lane, u8 state)
> +{
> +	enum phy phy = intel_port_to_phy(i915, port);
> +
> +	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
> +		     PHY_LANES_VAL(POWERDOWN_NEW_STATE_MASK, lane),
> +		     PHY_LANES_VAL_ARG(POWERDOWN_NEW_STATE, lane, state));
> +	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
> +		     PHY_LANES_VAL(POWERDOWN_UPDATE, lane),
> +		     PHY_LANES_VAL(POWERDOWN_UPDATE, lane));
> +
> +	/* Update Timeout Value */
> +	if (__intel_wait_for_register(&i915->uncore, XELPDP_PORT_BUF_CTL2(port),
> +				      PHY_LANES_VAL(POWERDOWN_UPDATE, lane), 0,
> +				      XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
> +		drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
> +			 phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
> +}
> +
> +static void intel_cx0_setup_powerdown(struct drm_i915_private *i915, enum port port)
> +{
> +	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
> +		     XELPDP_POWER_STATE_READY_MASK,
> +		     XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY));
> +	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL3(port),
> +		     XELPDP_POWER_STATE_ACTIVE_MASK |
> +		     XELPDP_PLL_LANE_STAGGERING_DELAY_MASK,
> +		     XELPDP_POWER_STATE_ACTIVE(CX0_P0_STATE_ACTIVE) |
> +		     XELPDP_PLL_LANE_STAGGERING_DELAY(0));
> +}
> +
> +/* FIXME: Some Type-C cases need not reset both the lanes. Handle those cases. */
> +static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915, enum port port,
> +				     bool lane_reversal)
> +{
> +	enum phy phy = intel_port_to_phy(i915, port);
> +	enum intel_cx0_lanes lane = lane_reversal ? INTEL_CX0_LANE1 :
> +				    INTEL_CX0_LANE0;
> +
> +	if (__intel_wait_for_register(&i915->uncore, XELPDP_PORT_BUF_CTL1(port),
> +				      XELPDP_PORT_BUF_SOC_PHY_READY,
> +				      XELPDP_PORT_BUF_SOC_PHY_READY,
> +				      XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL))
> +		drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n",
> +			 phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
> +
> +	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
> +		     PHY_LANES_VAL(PIPE_RESET, INTEL_CX0_BOTH_LANES),
> +		     PHY_LANES_VAL(PIPE_RESET, INTEL_CX0_BOTH_LANES));
> +
> +	if (__intel_wait_for_register(&i915->uncore, XELPDP_PORT_BUF_CTL2(port),
> +				      PHY_LANES_VAL(PHY_CURRENT_STATUS, INTEL_CX0_BOTH_LANES),
> +				      PHY_LANES_VAL(PHY_CURRENT_STATUS, INTEL_CX0_BOTH_LANES),
> +				      XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
> +		drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
> +			 phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
> +
> +	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(port),
> +		     PHY_LANES_VAL(PCLK_REFCLK_REQUEST, lane),
> +		     PHY_LANES_VAL(PCLK_REFCLK_REQUEST, lane));
> +
> +	if (__intel_wait_for_register(&i915->uncore, XELPDP_PORT_CLOCK_CTL(port),
> +				      PHY_LANES_VAL(PCLK_REFCLK_ACK, lane),
> +				      PHY_LANES_VAL(PCLK_REFCLK_ACK, lane),
> +				      XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
> +		drm_warn(&i915->drm, "PHY %c failed to request refclk after %dus.\n",
> +			 phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US);
> +
> +	intel_cx0_powerdown_change_sequence(i915, port, INTEL_CX0_BOTH_LANES,
> +					    CX0_P2_STATE_RESET);
> +	intel_cx0_setup_powerdown(i915, port);
> +
> +	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
> +		     PHY_LANES_VAL(PIPE_RESET, INTEL_CX0_BOTH_LANES), 0);
> +
> +	if (intel_de_wait_for_clear(i915, XELPDP_PORT_BUF_CTL2(port),
> +				    PHY_LANES_VAL(PHY_CURRENT_STATUS,
> +						  INTEL_CX0_BOTH_LANES),
> +				    XELPDP_PORT_RESET_END_TIMEOUT))
> +		drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dms.\n",
> +			 phy_name(phy), XELPDP_PORT_RESET_END_TIMEOUT);
> +}
> +
> +static void intel_c10_program_phy_lane(struct drm_i915_private *i915,
> +				       enum port port, int lane_count,
> +				       bool lane_reversal)
> +{
> +	u8 l0t1, l0t2, l1t1, l1t2;
> +
> +	intel_cx0_rmw(i915, port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
> +		      C10_VDR_CTRL_MSGBUS_ACCESS, C10_VDR_CTRL_MSGBUS_ACCESS,
> +		      MB_WRITE_COMMITTED);
> +
> +	l0t1 = intel_cx0_read(i915, port, 0, PHY_CX0_TX_CONTROL(1, 2));
> +	l0t2 = intel_cx0_read(i915, port, 0, PHY_CX0_TX_CONTROL(2, 2));
> +	l1t1 = intel_cx0_read(i915, port, 1, PHY_CX0_TX_CONTROL(1, 2));
> +	l1t2 = intel_cx0_read(i915, port, 1, PHY_CX0_TX_CONTROL(2, 2));
> +
> +	if (lane_reversal) {
> +		switch (lane_count) {
> +		case 1:
> +			/* Disable MLs 1(lane0), 2(lane0), 3(lane1) */
> +			intel_cx0_write(i915, port, 1, PHY_CX0_TX_CONTROL(1, 2),
> +					l1t1 | CONTROL2_DISABLE_SINGLE_TX,
> +					MB_WRITE_COMMITTED);
> +			fallthrough;
> +		case 2:
> +			/* Disable MLs 1(lane0), 2(lane0) */
> +			intel_cx0_write(i915, port, 0, PHY_CX0_TX_CONTROL(2, 2),
> +					l0t2 | CONTROL2_DISABLE_SINGLE_TX,
> +					MB_WRITE_COMMITTED);
> +			fallthrough;
> +		case 3:
> +			/* Disable MLs 1(lane0) */
> +			intel_cx0_write(i915, port, 0, PHY_CX0_TX_CONTROL(1, 2),
> +					l0t1 | CONTROL2_DISABLE_SINGLE_TX,
> +					MB_WRITE_COMMITTED);
> +			break;
> +		}
> +	} else {
> +		switch (lane_count) {
> +		case 1:
> +			/* Disable MLs 2(lane0), 3(lane1), 4(lane1) */
> +			intel_cx0_write(i915, port, 0, PHY_CX0_TX_CONTROL(2, 2),
> +					l0t2 | CONTROL2_DISABLE_SINGLE_TX,
> +					MB_WRITE_COMMITTED);
> +			fallthrough;
> +		case 2:
> +			/* Disable MLs 3(lane1), 4(lane1) */
> +			intel_cx0_write(i915, port, 1, PHY_CX0_TX_CONTROL(1, 2),
> +					l1t1 | CONTROL2_DISABLE_SINGLE_TX,
> +					MB_WRITE_COMMITTED);
> +			fallthrough;
> +		case 3:
> +			/* Disable MLs 4(lane1) */
> +			intel_cx0_write(i915, port, 1, PHY_CX0_TX_CONTROL(2, 2),
> +					l1t2 | CONTROL2_DISABLE_SINGLE_TX,
> +					MB_WRITE_COMMITTED);
> +			break;
> +		}
> +	}
> +
> +	intel_cx0_rmw(i915, port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
> +		      C10_VDR_CTRL_UPDATE_CFG, C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED);
> +}
> +
> +static void intel_c10pll_enable(struct intel_encoder *encoder,
> +				const struct intel_crtc_state *crtc_state)
> +{
> +	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
> +	enum phy phy = intel_port_to_phy(i915, encoder->port);
> +	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
> +	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
> +	enum intel_cx0_lanes maxpclk_lane = lane_reversal ? INTEL_CX0_LANE1 :
> +				    INTEL_CX0_LANE0;
> +
> +	/*
> +	 * 1. Program PORT_CLOCK_CTL REGISTER to configure
> +	 * clock muxes, gating and SSC
> +	 */
> +	intel_program_port_clock_ctl(encoder, crtc_state, lane_reversal);
> +
> +	/* 2. Bring PHY out of reset. */
> +	intel_cx0_phy_lane_reset(i915, encoder->port, lane_reversal);
> +
> +	/*
> +	 * 3. Change Phy power state to Ready.
> +	 * TODO: For DP alt mode use only one lane.
> +	 */
> +	intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES,
> +					    CX0_P2_STATE_READY);
> +
> +	/* 4. Program PHY internal PLL internal registers. */
> +	intel_c10_pll_program(i915, crtc_state, encoder);
> +
> +	/*
> +	 * 5. Program the enabled and disabled owned PHY lane
> +	 * transmitters over message bus
> +	 */
> +	intel_c10_program_phy_lane(i915, encoder->port, crtc_state->lane_count, lane_reversal);
> +
> +	/*
> +	 * 6. Follow the Display Voltage Frequency Switching - Sequence
> +	 * Before Frequency Change. We handle this step in bxt_set_cdclk().
> +	 */
> +
> +	/*
> +	 * 7. Program DDI_CLK_VALFREQ to match intended DDI
> +	 * clock frequency.
> +	 */
> +	intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port),
> +		       crtc_state->port_clock);
> +	/*
> +	 * 8. Set PORT_CLOCK_CTL register PCLK PLL Request
> +	 * LN<Lane for maxPCLK> to "1" to enable PLL.
> +	 */
> +	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), 0,
> +		     PHY_LANES_VAL(PCLK_PLL_REQUEST, maxpclk_lane));
> +
> +	/* 9. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */
> +	if (__intel_wait_for_register(&i915->uncore, XELPDP_PORT_CLOCK_CTL(encoder->port),
> +				      PHY_LANES_VAL(PCLK_PLL_ACK, maxpclk_lane),
> +				      PHY_LANES_VAL(PCLK_PLL_ACK, maxpclk_lane),
> +				      XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL))
> +		drm_warn(&i915->drm, "Port %c PLL not locked after %dus.\n",
> +			 phy_name(phy), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US);
> +
> +	/*
> +	 * 10. Follow the Display Voltage Frequency Switching Sequence After
> +	 * Frequency Change. We handle this step in bxt_set_cdclk().
> +	 */
> +}
> +
> +void intel_cx0pll_enable(struct intel_encoder *encoder,
> +			 const struct intel_crtc_state *crtc_state)
> +{
> +	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
> +	enum phy phy = intel_port_to_phy(i915, encoder->port);
> +
> +	drm_WARN_ON(&i915->drm, !intel_is_c10phy(i915, phy));
> +	intel_c10pll_enable(encoder, crtc_state);
> +}
> +
> +static void intel_c10pll_disable(struct intel_encoder *encoder)
> +{
> +	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
> +	enum phy phy = intel_port_to_phy(i915, encoder->port);
> +	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
> +	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
> +	enum intel_cx0_lanes lane = lane_reversal ? INTEL_CX0_LANE1 :
> +				    INTEL_CX0_LANE0;
> +
> +	/* 1. Change owned PHY lane power to Disable state. */
> +	intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES,
> +					    CX0_P2PG_STATE_DISABLE);
> +
> +	/*
> +	 * 2. Follow the Display Voltage Frequency Switching Sequence Before
> +	 * Frequency Change. We handle this step in bxt_set_cdclk().
> +	 */
> +
> +	/*
> +	 * 3. Set PORT_CLOCK_CTL register PCLK PLL Request LN<Lane for maxPCLK>
> +	 * to "0" to disable PLL.
> +	 */
> +	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
> +		     PHY_LANES_VAL(PCLK_PLL_REQUEST, INTEL_CX0_BOTH_LANES) |
> +		     PHY_LANES_VAL(PCLK_REFCLK_REQUEST, INTEL_CX0_BOTH_LANES), 0);
> +
> +	/* 4. Program DDI_CLK_VALFREQ to 0. */
> +	intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), 0);
> +
> +	/*
> +	 * 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0".
> +	 */
> +	if (__intel_wait_for_register(&i915->uncore, XELPDP_PORT_CLOCK_CTL(encoder->port),
> +				      PHY_LANES_VAL(PCLK_PLL_ACK, lane) |
> +				      PHY_LANES_VAL(PCLK_REFCLK_ACK, lane), 0,
> +				      XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL))
> +		drm_warn(&i915->drm, "Port %c PLL not unlocked after %dus.\n",
> +			 phy_name(phy), XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US);
> +
> +	/*
> +	 * 6. Follow the Display Voltage Frequency Switching Sequence After
> +	 * Frequency Change. We handle this step in bxt_set_cdclk().
> +	 */
> +
> +	/* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */
> +	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
> +		     XELPDP_DDI_CLOCK_SELECT_MASK |
> +		     XELPDP_FORWARD_CLOCK_UNGATE, 0);
> +}
> +
> +void intel_cx0pll_disable(struct intel_encoder *encoder)
> +{
> +	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
> +	enum phy phy = intel_port_to_phy(i915, encoder->port);
> +
> +	drm_WARN_ON(&i915->drm, !intel_is_c10phy(i915, phy));
> +	intel_c10pll_disable(encoder);
> +}
> +
> +#undef PHY_LANES_VAL_ARG
> +#undef PHY_LANES_VAL
> +
>  void intel_c10mpllb_state_verify(struct intel_atomic_state *state,
>  				 struct intel_crtc_state *new_crtc_state)
>  {
> diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
> index cf1f300b6a7b..d12d2e2f02ee 100644
> --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
> +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
> @@ -106,6 +106,19 @@ enum intel_cx0_lanes {
>  #define  C10_VDR_CTRL_UPDATE_CFG	REG_BIT8(0)
>  #define PHY_C10_VDR_CUSTOM_WIDTH	0xD02
>  
> +#define CX0_P0_STATE_ACTIVE		0x0
> +#define CX0_P2_STATE_READY		0x2
> +#define CX0_P2PG_STATE_DISABLE		0x9
> +#define CX0_P4PG_STATE_DISABLE		0xC
> +#define CX0_P2_STATE_RESET		0x2
> +
> +/* PHY_C10_VDR_PLL0 */
> +#define PLL_C10_MPLL_SSC_EN		REG_BIT8(0)
> +
> +/* PIPE SPEC Defined Registers */
> +#define PHY_CX0_TX_CONTROL(tx, control)	(0x400 + ((tx) - 1) * 0x200 + (control))
> +#define CONTROL2_DISABLE_SINGLE_TX	REG_BIT(6)
> +

Again, register definitions don't belong here.

>  static inline bool intel_is_c10phy(struct drm_i915_private *dev_priv, enum phy phy)
>  {
>  	if (!IS_METEORLAKE(dev_priv))
> @@ -114,6 +127,10 @@ static inline bool intel_is_c10phy(struct drm_i915_private *dev_priv, enum phy p
>  		return (phy < PHY_C);
>  }
>  
> +void intel_cx0pll_enable(struct intel_encoder *encoder,
> +			 const struct intel_crtc_state *crtc_state);
> +void intel_cx0pll_disable(struct intel_encoder *encoder);
> +
>  void intel_c10mpllb_readout_hw_state(struct intel_encoder *encoder,
>  				     struct intel_c10mpllb_state *pll_state);
>  int intel_cx0mpllb_calc_state(struct intel_crtc_state *crtc_state,
> diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
> index aaa8846c3b18..639ec604babf 100644
> --- a/drivers/gpu/drm/i915/display/intel_ddi.c
> +++ b/drivers/gpu/drm/i915/display/intel_ddi.c
> @@ -4384,6 +4384,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
>  	encoder->pipe_mask = ~0;
>  
>  	if (DISPLAY_VER(dev_priv) >= 14) {
> +		encoder->enable_clock = intel_cx0pll_enable;
> +		encoder->disable_clock = intel_cx0pll_disable;
>  		encoder->get_config = mtl_ddi_get_config;
>  	} else if (IS_DG2(dev_priv)) {
>  		encoder->enable_clock = intel_mpllb_enable;
> diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
> index 70b06806ec0d..db32799b5f46 100644
> --- a/drivers/gpu/drm/i915/display/intel_dp.c
> +++ b/drivers/gpu/drm/i915/display/intel_dp.c
> @@ -420,6 +420,11 @@ static int ehl_max_source_rate(struct intel_dp *intel_dp)
>  	return 810000;
>  }
>  
> +static int mtl_max_source_rate(struct intel_dp *intel_dp)
> +{
> +	return intel_dp_is_edp(intel_dp) ? 675000 : 810000;
> +}
> +
>  static int vbt_max_link_rate(struct intel_dp *intel_dp)
>  {
>  	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
> @@ -444,6 +449,10 @@ static void
>  intel_dp_set_source_rates(struct intel_dp *intel_dp)
>  {
>  	/* The values must be in increasing order */
> +	static const int mtl_rates[] = {
> +		162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000,
> +		810000,
> +	};
>  	static const int icl_rates[] = {
>  		162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000,
>  		1000000, 1350000,
> @@ -469,7 +478,11 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
>  	drm_WARN_ON(&dev_priv->drm,
>  		    intel_dp->source_rates || intel_dp->num_source_rates);
>  
> -	if (DISPLAY_VER(dev_priv) >= 11) {
> +	if (DISPLAY_VER(dev_priv) >= 14) {
> +		source_rates = mtl_rates;
> +		size = ARRAY_SIZE(mtl_rates);
> +		max_rate = mtl_max_source_rate(intel_dp);
> +	} else if (DISPLAY_VER(dev_priv) >= 11) {
>  		source_rates = icl_rates;
>  		size = ARRAY_SIZE(icl_rates);
>  		if (IS_DG2(dev_priv))

All of the changes to intel_dp.c should be a separate patch.

> diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
> index 73f541050913..d6fcdf4eba0e 100644
> --- a/drivers/gpu/drm/i915/display/intel_dpll.c
> +++ b/drivers/gpu/drm/i915/display/intel_dpll.c
> @@ -1533,6 +1533,8 @@ intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
>  {
>  	if (DISPLAY_VER(dev_priv) >= 14)
>  		dev_priv->display.funcs.dpll = &mtl_dpll_funcs;
> +	else if (DISPLAY_VER(dev_priv) >= 14)
> +		dev_priv->display.funcs.dpll = &mtl_dpll_funcs;
>  	else if (IS_DG2(dev_priv))
>  		dev_priv->display.funcs.dpll = &dg2_dpll_funcs;
>  	else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
> diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
> index 5003a5ffbc6a..5e6ff9f2aa10 100644
> --- a/drivers/gpu/drm/i915/i915_reg.h
> +++ b/drivers/gpu/drm/i915/i915_reg.h
> @@ -2121,6 +2121,11 @@
>  #define   TRANS_PUSH_EN			REG_BIT(31)
>  #define   TRANS_PUSH_SEND		REG_BIT(30)
>  
> +/* DDI Buffer Control */
> +#define _DDI_CLK_VALFREQ_A		0x64030
> +#define _DDI_CLK_VALFREQ_B		0x64130
> +#define DDI_CLK_VALFREQ(port)		_MMIO_PORT(port, _DDI_CLK_VALFREQ_A, _DDI_CLK_VALFREQ_B)
> +
>  /*
>   * HSW+ eDP PSR registers
>   *
> @@ -8375,4 +8380,140 @@ enum skl_power_gate {
>  
>  #define MTL_MEDIA_GSI_BASE		0x380000
>  
> +#define PUNIT_MMIO_CR_POC_STRAPS	_MMIO(0x281078)
> +#define   NUM_TILES_MASK		REG_GENMASK(1, 0)
> +#define   CD_ALIVE			REG_BIT(2)
> +#define   SOCKET_ID_MASK		REG_GENMASK(7, 3)
> +
> +/* Define the BAR and offset for the accelerator fabric CSRs */
> +#define CD_BASE_OFFSET 0x291000
> +#define CD_BAR_SIZE (256 * 1024)
> +
> +/*
> + * In general, the i915 should not touch the IAF registers.  The registers
> + * will be passed as an IO resource via the MFD interface.  However, it
> + * is necessary to put the IRQ bits in a known state, before the MFD cell
> + * is registered.
> + *
> + * So define these registers for i915 usage.

These should probably be split to a separate _regs file, like we've been
doing for other registers. Especially because "In general, the i915
should not touch the IAF registers."

> + */
> +#define CPORT_MBDB_CSRS (CD_BASE_OFFSET + 0x6000)
> +#define CPORT_MBDB_CSRS_END (CPORT_MBDB_CSRS + 0x1000)
> +#define CPORT_MBDB_INT_ENABLE_MASK _MMIO(CPORT_MBDB_CSRS + 0x8)
> +
> +#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A		0x64040
> +#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B		0x64140
> +#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1		0x16F240
> +#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2		0x16F440
> +#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC3		0x16F640
> +#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC4		0x16F840
> +#define _XELPDP_PORT_M2P_MSGBUS_CTL(port, lane)		(_PICK(port, \
> +							[PORT_A] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A, \
> +							[PORT_B] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B, \
> +							[PORT_TC1] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1, \
> +							[PORT_TC2] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2, \
> +							[PORT_TC3] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC3, \
> +							[PORT_TC4] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC4) + ((lane) * 4))
> +
> +#define XELPDP_PORT_M2P_MSGBUS_CTL(port, lane)		_MMIO(_XELPDP_PORT_M2P_MSGBUS_CTL(port, lane))
> +#define  XELPDP_PORT_M2P_TRANSACTION_PENDING		REG_BIT(31)
> +#define  XELPDP_PORT_M2P_COMMAND_TYPE_MASK		REG_GENMASK(30, 27)
> +#define  XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED	REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x1)
> +#define  XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED	REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x2)
> +#define  XELPDP_PORT_M2P_COMMAND_READ			REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x3)
> +#define  XELPDP_PORT_M2P_DATA_MASK			REG_GENMASK(23, 16)
> +#define  XELPDP_PORT_M2P_DATA(val)			REG_FIELD_PREP(XELPDP_PORT_M2P_DATA_MASK, val)
> +#define  XELPDP_PORT_M2P_TRANSACTION_RESET		REG_BIT(15)
> +#define  XELPDP_PORT_M2P_ADDRESS_MASK			REG_GENMASK(11, 0)
> +#define  XELPDP_PORT_M2P_ADDRESS(val)			REG_FIELD_PREP(XELPDP_PORT_M2P_ADDRESS_MASK, val)
> +
> +#define XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane)	_MMIO(_XELPDP_PORT_M2P_MSGBUS_CTL(port, lane) + 8)
> +#define  XELPDP_PORT_P2M_RESPONSE_READY			REG_BIT(31)
> +#define  XELPDP_PORT_P2M_COMMAND_TYPE_MASK		REG_GENMASK(30, 27)
> +#define  XELPDP_PORT_P2M_COMMAND_READ_ACK		0x4
> +#define  XELPDP_PORT_P2M_COMMAND_WRITE_ACK		0x5
> +#define  XELPDP_PORT_P2M_DATA_MASK			REG_GENMASK(23, 16)
> +#define  XELPDP_PORT_P2M_DATA(val)			REG_FIELD_PREP(XELPDP_PORT_P2M_DATA_MASK, val)
> +#define  XELPDP_PORT_P2M_ERROR_SET			REG_BIT(15)
> +
> +#define  XELPDP_MSGBUS_TIMEOUT_SLOW			1
> +#define  XELPDP_MSGBUS_TIMEOUT_FAST_US			2
> +#define XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US		3200
> +#define XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US		20
> +#define XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US		100
> +#define XELPDP_PORT_RESET_START_TIMEOUT_US		5
> +#define XELPDP_PORT_RESET_END_TIMEOUT			15
> +#define XELPDP_REFCLK_ENABLE_TIMEOUT_US			1
> +
> +#define _XELPDP_PORT_BUF_CTL1_LN0_A			0x64004
> +#define _XELPDP_PORT_BUF_CTL1_LN0_B			0x64104
> +#define _XELPDP_PORT_BUF_CTL1_LN0_USBC1			0x16F200
> +#define _XELPDP_PORT_BUF_CTL1_LN0_USBC2			0x16F400
> +#define _XELPDP_PORT_BUF_CTL1_LN0_USBC3			0x16F600
> +#define _XELPDP_PORT_BUF_CTL1_LN0_USBC4			0x16F800
> +#define _XELPDP_PORT_BUF_CTL1(port)			(_PICK(port, \
> +							[PORT_A] = _XELPDP_PORT_BUF_CTL1_LN0_A, \
> +							[PORT_B] = _XELPDP_PORT_BUF_CTL1_LN0_B, \
> +							[PORT_TC1] = _XELPDP_PORT_BUF_CTL1_LN0_USBC1, \
> +							[PORT_TC2] = _XELPDP_PORT_BUF_CTL1_LN0_USBC2, \
> +							[PORT_TC3] = _XELPDP_PORT_BUF_CTL1_LN0_USBC3, \
> +							[PORT_TC4] = _XELPDP_PORT_BUF_CTL1_LN0_USBC4))
> +
> +#define XELPDP_PORT_BUF_CTL1(port)			_MMIO(_XELPDP_PORT_BUF_CTL1(port))
> +#define  XELPDP_PORT_BUF_SOC_PHY_READY			REG_BIT(24)
> +#define  XELPDP_PORT_REVERSAL				REG_BIT(16)
> +#define  XELPDP_PORT_WIDTH_MASK				REG_GENMASK(3, 1)
> +#define  XELPDP_PORT_WIDTH(val)				REG_FIELD_PREP(XELPDP_PORT_WIDTH_MASK, val)
> +
> +#define XELPDP_PORT_BUF_CTL2(port)			_MMIO(_XELPDP_PORT_BUF_CTL1(port) + 4)
> +#define  XELPDP_LANE0_PIPE_RESET			REG_BIT(31)
> +#define  XELPDP_LANE1_PIPE_RESET			REG_BIT(30)
> +#define  XELPDP_LANE0_PHY_CURRENT_STATUS		REG_BIT(29)
> +#define  XELPDP_LANE1_PHY_CURRENT_STATUS		REG_BIT(28)
> +#define  XELPDP_LANE0_POWERDOWN_UPDATE			REG_BIT(25)
> +#define  XELPDP_LANE1_POWERDOWN_UPDATE			REG_BIT(24)
> +#define  XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK		REG_GENMASK(23, 20)
> +#define  XELPDP_LANE0_POWERDOWN_NEW_STATE(val)		REG_FIELD_PREP(XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK, val)
> +#define  XELPDP_LANE1_POWERDOWN_NEW_STATE_MASK		REG_GENMASK(19, 16)
> +#define  XELPDP_LANE1_POWERDOWN_NEW_STATE(val)		REG_FIELD_PREP(XELPDP_LANE1_POWERDOWN_NEW_STATE_MASK, val)
> +#define  XELPDP_POWER_STATE_READY_MASK			REG_GENMASK(7, 4)
> +#define  XELPDP_POWER_STATE_READY(val)			REG_FIELD_PREP(XELPDP_POWER_STATE_READY_MASK, val)
> +
> +#define XELPDP_PORT_BUF_CTL3(port)			_MMIO(_XELPDP_PORT_BUF_CTL1(port) + 8)
> +#define  XELPDP_PLL_LANE_STAGGERING_DELAY_MASK		REG_GENMASK(15, 8)
> +#define  XELPDP_PLL_LANE_STAGGERING_DELAY(val)		REG_FIELD_PREP(XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, val)
> +#define  XELPDP_POWER_STATE_ACTIVE_MASK			REG_GENMASK(3, 0)
> +#define  XELPDP_POWER_STATE_ACTIVE(val)			REG_FIELD_PREP(XELPDP_POWER_STATE_ACTIVE_MASK, val)
> +
> +#define _XELPDP_PORT_CLOCK_CTL_A			0x640E0
> +#define _XELPDP_PORT_CLOCK_CTL_B			0x641E0
> +#define _XELPDP_PORT_CLOCK_CTL_USBC1			0x16F260
> +#define _XELPDP_PORT_CLOCK_CTL_USBC2			0x16F460
> +#define _XELPDP_PORT_CLOCK_CTL_USBC3			0x16F660
> +#define _XELPDP_PORT_CLOCK_CTL_USBC4			0x16F860
> +#define XELPDP_PORT_CLOCK_CTL(port)			_MMIO(_PICK(port, \
> +							[PORT_A] = _XELPDP_PORT_CLOCK_CTL_A, \
> +							[PORT_B] = _XELPDP_PORT_CLOCK_CTL_B, \
> +							[PORT_TC1] = _XELPDP_PORT_CLOCK_CTL_USBC1, \
> +							[PORT_TC2] = _XELPDP_PORT_CLOCK_CTL_USBC2, \
> +							[PORT_TC3] = _XELPDP_PORT_CLOCK_CTL_USBC3, \
> +							[PORT_TC4] = _XELPDP_PORT_CLOCK_CTL_USBC4))
> +
> +#define XELPDP_LANE0_PCLK_PLL_REQUEST			REG_BIT(31)
> +#define XELPDP_LANE0_PCLK_PLL_ACK			REG_BIT(30)
> +#define XELPDP_LANE0_PCLK_REFCLK_REQUEST		REG_BIT(29)
> +#define XELPDP_LANE0_PCLK_REFCLK_ACK			REG_BIT(28)
> +#define XELPDP_LANE1_PCLK_PLL_REQUEST			REG_BIT(27)
> +#define XELPDP_LANE1_PCLK_PLL_ACK			REG_BIT(26)
> +#define XELPDP_LANE1_PCLK_REFCLK_REQUEST		REG_BIT(25)
> +#define XELPDP_LANE1_PCLK_REFCLK_ACK			REG_BIT(24)
> +#define XELPDP_DDI_CLOCK_SELECT_MASK			REG_GENMASK(15, 12)
> +#define XELPDP_DDI_CLOCK_SELECT(val)			REG_FIELD_PREP(XELPDP_DDI_CLOCK_SELECT_MASK, val)
> +#define XELPDP_DDI_CLOCK_SELECT_NONE			0x0
> +#define XELPDP_DDI_CLOCK_SELECT_MAXPCLK			0x8
> +#define XELPDP_FORWARD_CLOCK_UNGATE			REG_BIT(10)
> +#define XELPDP_LANE1_PHY_CLOCK_SELECT			REG_BIT(8)
> +#define XELPDP_SSC_ENABLE_PLLA				REG_BIT(1)
> +#define XELPDP_SSC_ENABLE_PLLB				REG_BIT(0)
> +
>  #endif /* _I915_REG_H_ */

-- 
Jani Nikula, Intel Open Source Graphics Center

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [Intel-gfx] ✓ Fi.CI.IGT: success for drm/i915/mtl: Add C10 phy support
  2022-09-29 13:17 [Intel-gfx] [PATCH 0/5] drm/i915/mtl: Add C10 phy support Mika Kahola
                   ` (6 preceding siblings ...)
  2022-09-29 20:08 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
@ 2022-09-30 21:08 ` Patchwork
  7 siblings, 0 replies; 17+ messages in thread
From: Patchwork @ 2022-09-30 21:08 UTC (permalink / raw)
  To: Mika Kahola; +Cc: intel-gfx

[-- Attachment #1: Type: text/plain, Size: 32709 bytes --]

== Series Details ==

Series: drm/i915/mtl: Add C10 phy support
URL   : https://patchwork.freedesktop.org/series/109248/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_12199_full -> Patchwork_109248v1_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  

Participating hosts (9 -> 9)
------------------------------

  No changes in participating hosts

Known issues
------------

  Here are the changes found in Patchwork_109248v1_full that come from known issues:

### CI changes ###

#### Possible fixes ####

  * boot:
    - shard-glk:          ([PASS][1], [PASS][2], [PASS][3], [PASS][4], [PASS][5], [PASS][6], [PASS][7], [PASS][8], [PASS][9], [PASS][10], [PASS][11], [PASS][12], [PASS][13], [PASS][14], [PASS][15], [PASS][16], [FAIL][17], [PASS][18], [PASS][19], [FAIL][20], [PASS][21], [PASS][22], [PASS][23], [PASS][24], [PASS][25]) ([i915#4392]) -> ([PASS][26], [PASS][27], [PASS][28], [PASS][29], [PASS][30], [PASS][31], [PASS][32], [PASS][33], [PASS][34], [PASS][35], [PASS][36], [PASS][37], [PASS][38], [PASS][39], [PASS][40], [PASS][41], [PASS][42], [PASS][43], [PASS][44], [PASS][45], [PASS][46], [PASS][47], [PASS][48], [PASS][49])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk9/boot.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk9/boot.html
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk8/boot.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk8/boot.html
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk8/boot.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk7/boot.html
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk7/boot.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk7/boot.html
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk6/boot.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk6/boot.html
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk6/boot.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk5/boot.html
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk5/boot.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk5/boot.html
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk3/boot.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk3/boot.html
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk3/boot.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk3/boot.html
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk2/boot.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk2/boot.html
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk2/boot.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk2/boot.html
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk1/boot.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk1/boot.html
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk9/boot.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk9/boot.html
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk9/boot.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk9/boot.html
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk8/boot.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk8/boot.html
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk8/boot.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk7/boot.html
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk7/boot.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk6/boot.html
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk6/boot.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk6/boot.html
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk5/boot.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk5/boot.html
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk5/boot.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk5/boot.html
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk3/boot.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk3/boot.html
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk3/boot.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk2/boot.html
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk2/boot.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk2/boot.html
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk1/boot.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk1/boot.html
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk1/boot.html

  

### IGT changes ###

#### Issues hit ####

  * igt@gem_ctx_sseu@invalid-sseu:
    - shard-tglb:         NOTRUN -> [SKIP][50] ([i915#280])
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@gem_ctx_sseu@invalid-sseu.html

  * igt@gem_exec_balancer@parallel-bb-first:
    - shard-iclb:         [PASS][51] -> [SKIP][52] ([i915#4525]) +2 similar issues
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-iclb4/igt@gem_exec_balancer@parallel-bb-first.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-iclb5/igt@gem_exec_balancer@parallel-bb-first.html

  * igt@gem_exec_fair@basic-pace@rcs0:
    - shard-iclb:         [PASS][53] -> [FAIL][54] ([i915#2842])
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-iclb7/igt@gem_exec_fair@basic-pace@rcs0.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-iclb1/igt@gem_exec_fair@basic-pace@rcs0.html

  * igt@gem_exec_fair@basic-pace@vcs0:
    - shard-glk:          [PASS][55] -> [FAIL][56] ([i915#2842]) +2 similar issues
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk6/igt@gem_exec_fair@basic-pace@vcs0.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk9/igt@gem_exec_fair@basic-pace@vcs0.html

  * igt@gem_exec_fair@basic-pace@vcs1:
    - shard-iclb:         NOTRUN -> [FAIL][57] ([i915#2842])
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-iclb1/igt@gem_exec_fair@basic-pace@vcs1.html

  * igt@gem_exec_params@rsvd2-dirt:
    - shard-tglb:         NOTRUN -> [SKIP][58] ([fdo#109283])
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@gem_exec_params@rsvd2-dirt.html

  * igt@gem_lmem_swapping@random:
    - shard-tglb:         NOTRUN -> [SKIP][59] ([i915#4613])
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@gem_lmem_swapping@random.html

  * igt@gem_lmem_swapping@random-engines:
    - shard-glk:          NOTRUN -> [SKIP][60] ([fdo#109271] / [i915#4613]) +1 similar issue
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk8/igt@gem_lmem_swapping@random-engines.html

  * igt@gem_pxp@fail-invalid-protected-context:
    - shard-tglb:         NOTRUN -> [SKIP][61] ([i915#4270])
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@gem_pxp@fail-invalid-protected-context.html

  * igt@gem_userptr_blits@coherency-unsync:
    - shard-tglb:         NOTRUN -> [SKIP][62] ([i915#3297])
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@gem_userptr_blits@coherency-unsync.html

  * igt@gem_workarounds@suspend-resume:
    - shard-apl:          [PASS][63] -> [DMESG-WARN][64] ([i915#180]) +1 similar issue
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-apl2/igt@gem_workarounds@suspend-resume.html
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-apl2/igt@gem_workarounds@suspend-resume.html

  * igt@gen9_exec_parse@batch-invalid-length:
    - shard-tglb:         NOTRUN -> [SKIP][65] ([i915#2527] / [i915#2856])
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@gen9_exec_parse@batch-invalid-length.html

  * igt@i915_pm_rc6_residency@rc6-idle@rcs0:
    - shard-tglb:         NOTRUN -> [WARN][66] ([i915#2681]) +3 similar issues
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@i915_pm_rc6_residency@rc6-idle@rcs0.html

  * igt@kms_addfb_basic@legacy-format:
    - shard-tglb:         [PASS][67] -> [INCOMPLETE][68] ([i915#6987])
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-tglb5/igt@kms_addfb_basic@legacy-format.html
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb5/igt@kms_addfb_basic@legacy-format.html

  * igt@kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-180:
    - shard-tglb:         NOTRUN -> [SKIP][69] ([i915#5286])
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-180.html

  * igt@kms_big_fb@y-tiled-8bpp-rotate-270:
    - shard-tglb:         NOTRUN -> [SKIP][70] ([fdo#111614]) +1 similar issue
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@kms_big_fb@y-tiled-8bpp-rotate-270.html

  * igt@kms_big_fb@yf-tiled-addfb:
    - shard-tglb:         NOTRUN -> [SKIP][71] ([fdo#111615])
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@kms_big_fb@yf-tiled-addfb.html

  * igt@kms_big_joiner@basic:
    - shard-tglb:         NOTRUN -> [SKIP][72] ([i915#2705])
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@kms_big_joiner@basic.html

  * igt@kms_ccs@pipe-a-bad-rotation-90-4_tiled_dg2_mc_ccs:
    - shard-tglb:         NOTRUN -> [SKIP][73] ([i915#6095])
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@kms_ccs@pipe-a-bad-rotation-90-4_tiled_dg2_mc_ccs.html

  * igt@kms_ccs@pipe-a-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs:
    - shard-glk:          NOTRUN -> [SKIP][74] ([fdo#109271] / [i915#3886]) +3 similar issues
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk8/igt@kms_ccs@pipe-a-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs.html

  * igt@kms_ccs@pipe-b-crc-primary-basic-yf_tiled_ccs:
    - shard-tglb:         NOTRUN -> [SKIP][75] ([fdo#111615] / [i915#3689]) +1 similar issue
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@kms_ccs@pipe-b-crc-primary-basic-yf_tiled_ccs.html

  * igt@kms_ccs@pipe-b-missing-ccs-buffer-y_tiled_ccs:
    - shard-tglb:         NOTRUN -> [SKIP][76] ([i915#3689]) +1 similar issue
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@kms_ccs@pipe-b-missing-ccs-buffer-y_tiled_ccs.html

  * igt@kms_ccs@pipe-c-bad-pixel-format-4_tiled_dg2_rc_ccs_cc:
    - shard-tglb:         NOTRUN -> [SKIP][77] ([i915#3689] / [i915#6095])
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@kms_ccs@pipe-c-bad-pixel-format-4_tiled_dg2_rc_ccs_cc.html

  * igt@kms_ccs@pipe-c-crc-primary-basic-y_tiled_gen12_mc_ccs:
    - shard-apl:          NOTRUN -> [SKIP][78] ([fdo#109271] / [i915#3886]) +2 similar issues
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-apl6/igt@kms_ccs@pipe-c-crc-primary-basic-y_tiled_gen12_mc_ccs.html

  * igt@kms_ccs@pipe-c-missing-ccs-buffer-y_tiled_gen12_mc_ccs:
    - shard-tglb:         NOTRUN -> [SKIP][79] ([i915#3689] / [i915#3886])
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@kms_ccs@pipe-c-missing-ccs-buffer-y_tiled_gen12_mc_ccs.html

  * igt@kms_chamelium@dp-hpd-fast:
    - shard-tglb:         NOTRUN -> [SKIP][80] ([fdo#109284] / [fdo#111827]) +2 similar issues
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@kms_chamelium@dp-hpd-fast.html

  * igt@kms_chamelium@dp-hpd-storm-disable:
    - shard-glk:          NOTRUN -> [SKIP][81] ([fdo#109271] / [fdo#111827]) +3 similar issues
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk8/igt@kms_chamelium@dp-hpd-storm-disable.html

  * igt@kms_color_chamelium@ctm-0-75:
    - shard-apl:          NOTRUN -> [SKIP][82] ([fdo#109271] / [fdo#111827])
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-apl1/igt@kms_color_chamelium@ctm-0-75.html

  * igt@kms_content_protection@dp-mst-lic-type-1:
    - shard-tglb:         NOTRUN -> [SKIP][83] ([i915#3116] / [i915#3299])
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@kms_content_protection@dp-mst-lic-type-1.html

  * igt@kms_cursor_crc@cursor-suspend@pipe-c-edp-1:
    - shard-iclb:         [PASS][84] -> [DMESG-WARN][85] ([i915#2867])
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-iclb8/igt@kms_cursor_crc@cursor-suspend@pipe-c-edp-1.html
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-iclb2/igt@kms_cursor_crc@cursor-suspend@pipe-c-edp-1.html

  * igt@kms_flip@flip-vs-expired-vblank-interruptible@a-hdmi-a1:
    - shard-glk:          [PASS][86] -> [FAIL][87] ([i915#79])
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk6/igt@kms_flip@flip-vs-expired-vblank-interruptible@a-hdmi-a1.html
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk7/igt@kms_flip@flip-vs-expired-vblank-interruptible@a-hdmi-a1.html

  * igt@kms_flip_scaled_crc@flip-32bpp-yftile-to-32bpp-yftileccs-downscaling@pipe-a-default-mode:
    - shard-iclb:         NOTRUN -> [SKIP][88] ([i915#6375])
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-iclb2/igt@kms_flip_scaled_crc@flip-32bpp-yftile-to-32bpp-yftileccs-downscaling@pipe-a-default-mode.html

  * igt@kms_flip_scaled_crc@flip-32bpp-yftileccs-to-64bpp-yftile-upscaling@pipe-a-valid-mode:
    - shard-iclb:         NOTRUN -> [SKIP][89] ([i915#2587] / [i915#2672])
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-iclb4/igt@kms_flip_scaled_crc@flip-32bpp-yftileccs-to-64bpp-yftile-upscaling@pipe-a-valid-mode.html

  * igt@kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tile-downscaling@pipe-a-default-mode:
    - shard-iclb:         NOTRUN -> [SKIP][90] ([i915#2672])
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-iclb2/igt@kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tile-downscaling@pipe-a-default-mode.html

  * igt@kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tiledg2rcccs-downscaling@pipe-a-valid-mode:
    - shard-tglb:         NOTRUN -> [SKIP][91] ([i915#2587] / [i915#2672])
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tiledg2rcccs-downscaling@pipe-a-valid-mode.html

  * igt@kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling@pipe-a-default-mode:
    - shard-iclb:         NOTRUN -> [SKIP][92] ([i915#3555])
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-iclb2/igt@kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling@pipe-a-default-mode.html

  * igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-shrfb-pgflip-blt:
    - shard-tglb:         NOTRUN -> [SKIP][93] ([fdo#109280] / [fdo#111825]) +9 similar issues
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-shrfb-pgflip-blt.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-onoff:
    - shard-iclb:         [PASS][94] -> [FAIL][95] ([i915#1888] / [i915#2546])
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-iclb6/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-onoff.html
   [95]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-iclb2/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-spr-indfb-onoff.html

  * igt@kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-spr-indfb-draw-mmap-wc:
    - shard-glk:          NOTRUN -> [SKIP][96] ([fdo#109271]) +73 similar issues
   [96]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk8/igt@kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-spr-indfb-draw-mmap-wc.html

  * igt@kms_frontbuffer_tracking@fbcpsr-rgb565-draw-mmap-wc:
    - shard-tglb:         NOTRUN -> [SKIP][97] ([i915#6497]) +2 similar issues
   [97]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@kms_frontbuffer_tracking@fbcpsr-rgb565-draw-mmap-wc.html

  * igt@kms_frontbuffer_tracking@psr-rgb565-draw-pwrite:
    - shard-apl:          NOTRUN -> [SKIP][98] ([fdo#109271]) +42 similar issues
   [98]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-apl1/igt@kms_frontbuffer_tracking@psr-rgb565-draw-pwrite.html

  * igt@kms_invalid_mode@clock-too-high@edp-1-pipe-d:
    - shard-tglb:         NOTRUN -> [SKIP][99] ([i915#6403]) +3 similar issues
   [99]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@kms_invalid_mode@clock-too-high@edp-1-pipe-d.html

  * igt@kms_plane_lowres@tiling-yf:
    - shard-tglb:         NOTRUN -> [SKIP][100] ([fdo#112054] / [i915#5288])
   [100]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@kms_plane_lowres@tiling-yf.html

  * igt@kms_psr2_sf@cursor-plane-update-sf:
    - shard-glk:          NOTRUN -> [SKIP][101] ([fdo#109271] / [i915#658]) +1 similar issue
   [101]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk8/igt@kms_psr2_sf@cursor-plane-update-sf.html

  * igt@kms_psr2_sf@overlay-plane-move-continuous-sf:
    - shard-tglb:         NOTRUN -> [SKIP][102] ([i915#2920])
   [102]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@kms_psr2_sf@overlay-plane-move-continuous-sf.html

  * igt@kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb:
    - shard-apl:          NOTRUN -> [SKIP][103] ([fdo#109271] / [i915#658]) +1 similar issue
   [103]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-apl6/igt@kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb.html

  * igt@kms_psr@psr2_cursor_plane_onoff:
    - shard-tglb:         NOTRUN -> [FAIL][104] ([i915#132] / [i915#3467])
   [104]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@kms_psr@psr2_cursor_plane_onoff.html

  * igt@kms_psr@psr2_sprite_plane_move:
    - shard-iclb:         [PASS][105] -> [SKIP][106] ([fdo#109441]) +2 similar issues
   [105]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-iclb2/igt@kms_psr@psr2_sprite_plane_move.html
   [106]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-iclb4/igt@kms_psr@psr2_sprite_plane_move.html

  * igt@kms_setmode@invalid-clone-single-crtc:
    - shard-tglb:         NOTRUN -> [SKIP][107] ([i915#3555])
   [107]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@kms_setmode@invalid-clone-single-crtc.html

  * igt@kms_writeback@writeback-invalid-parameters:
    - shard-tglb:         NOTRUN -> [SKIP][108] ([i915#2437])
   [108]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@kms_writeback@writeback-invalid-parameters.html

  * igt@perf_pmu@event-wait@rcs0:
    - shard-tglb:         NOTRUN -> [SKIP][109] ([fdo#112283])
   [109]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@perf_pmu@event-wait@rcs0.html

  * igt@sysfs_clients@create:
    - shard-apl:          NOTRUN -> [SKIP][110] ([fdo#109271] / [i915#2994])
   [110]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-apl6/igt@sysfs_clients@create.html

  * igt@sysfs_clients@sema-50:
    - shard-tglb:         NOTRUN -> [SKIP][111] ([i915#2994])
   [111]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb2/igt@sysfs_clients@sema-50.html

  
#### Possible fixes ####

  * igt@gem_exec_balancer@parallel-keep-submit-fence:
    - shard-iclb:         [SKIP][112] ([i915#4525]) -> [PASS][113]
   [112]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-iclb5/igt@gem_exec_balancer@parallel-keep-submit-fence.html
   [113]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-iclb1/igt@gem_exec_balancer@parallel-keep-submit-fence.html

  * igt@gem_exec_fair@basic-flow@rcs0:
    - shard-tglb:         [FAIL][114] ([i915#2842]) -> [PASS][115]
   [114]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-tglb8/igt@gem_exec_fair@basic-flow@rcs0.html
   [115]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-tglb7/igt@gem_exec_fair@basic-flow@rcs0.html

  * igt@gem_exec_fair@basic-none@vcs0:
    - shard-glk:          [FAIL][116] ([i915#2842]) -> [PASS][117]
   [116]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk3/igt@gem_exec_fair@basic-none@vcs0.html
   [117]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk6/igt@gem_exec_fair@basic-none@vcs0.html

  * igt@gem_exec_reloc@basic-cpu-gtt-active:
    - shard-apl:          [DMESG-WARN][118] ([i915#62]) -> [PASS][119] +16 similar issues
   [118]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-apl2/igt@gem_exec_reloc@basic-cpu-gtt-active.html
   [119]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-apl2/igt@gem_exec_reloc@basic-cpu-gtt-active.html

  * igt@gen9_exec_parse@allowed-single:
    - shard-apl:          [DMESG-WARN][120] ([i915#5566] / [i915#716]) -> [PASS][121]
   [120]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-apl7/igt@gen9_exec_parse@allowed-single.html
   [121]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-apl6/igt@gen9_exec_parse@allowed-single.html

  * igt@i915_pm_dc@dc6-psr:
    - shard-iclb:         [FAIL][122] ([i915#3989] / [i915#454]) -> [PASS][123]
   [122]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-iclb7/igt@i915_pm_dc@dc6-psr.html
   [123]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-iclb8/igt@i915_pm_dc@dc6-psr.html

  * igt@kms_flip@2x-flip-vs-wf_vblank-interruptible@bc-hdmi-a1-hdmi-a2:
    - shard-glk:          [FAIL][124] ([i915#2122]) -> [PASS][125]
   [124]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk6/igt@kms_flip@2x-flip-vs-wf_vblank-interruptible@bc-hdmi-a1-hdmi-a2.html
   [125]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk7/igt@kms_flip@2x-flip-vs-wf_vblank-interruptible@bc-hdmi-a1-hdmi-a2.html

  * igt@kms_flip@basic-flip-vs-wf_vblank@b-dp1:
    - shard-apl:          [FAIL][126] -> [PASS][127]
   [126]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-apl2/igt@kms_flip@basic-flip-vs-wf_vblank@b-dp1.html
   [127]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-apl2/igt@kms_flip@basic-flip-vs-wf_vblank@b-dp1.html

  * igt@kms_flip@basic-flip-vs-wf_vblank@c-dp1:
    - shard-apl:          [DMESG-FAIL][128] ([i915#62]) -> [PASS][129]
   [128]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-apl2/igt@kms_flip@basic-flip-vs-wf_vblank@c-dp1.html
   [129]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-apl2/igt@kms_flip@basic-flip-vs-wf_vblank@c-dp1.html

  * igt@kms_flip@flip-vs-expired-vblank-interruptible@b-hdmi-a1:
    - shard-glk:          [FAIL][130] ([i915#79]) -> [PASS][131]
   [130]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-glk6/igt@kms_flip@flip-vs-expired-vblank-interruptible@b-hdmi-a1.html
   [131]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-glk7/igt@kms_flip@flip-vs-expired-vblank-interruptible@b-hdmi-a1.html

  * igt@kms_psr2_su@page_flip-xrgb8888:
    - shard-iclb:         [SKIP][132] ([fdo#109642] / [fdo#111068] / [i915#658]) -> [PASS][133]
   [132]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-iclb8/igt@kms_psr2_su@page_flip-xrgb8888.html
   [133]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-iclb2/igt@kms_psr2_su@page_flip-xrgb8888.html

  * igt@kms_psr@psr2_sprite_mmap_gtt:
    - shard-iclb:         [SKIP][134] ([fdo#109441]) -> [PASS][135]
   [134]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-iclb6/igt@kms_psr@psr2_sprite_mmap_gtt.html
   [135]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-iclb2/igt@kms_psr@psr2_sprite_mmap_gtt.html

  * igt@perf_pmu@rc6-suspend:
    - shard-apl:          [DMESG-WARN][136] ([i915#180]) -> [PASS][137]
   [136]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-apl8/igt@perf_pmu@rc6-suspend.html
   [137]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-apl1/igt@perf_pmu@rc6-suspend.html

  
#### Warnings ####

  * igt@gem_exec_balancer@parallel-ordering:
    - shard-iclb:         [SKIP][138] ([i915#4525]) -> [FAIL][139] ([i915#6117])
   [138]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-iclb6/igt@gem_exec_balancer@parallel-ordering.html
   [139]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-iclb2/igt@gem_exec_balancer@parallel-ordering.html

  * igt@i915_pm_dc@dc3co-vpb-simulation:
    - shard-iclb:         [SKIP][140] ([i915#658]) -> [SKIP][141] ([i915#588])
   [140]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-iclb6/igt@i915_pm_dc@dc3co-vpb-simulation.html
   [141]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-iclb2/igt@i915_pm_dc@dc3co-vpb-simulation.html

  * igt@i915_pm_rpm@modeset-non-lpsp-stress:
    - shard-iclb:         [SKIP][142] ([fdo#110892]) -> [INCOMPLETE][143] ([i915#6604])
   [142]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-iclb4/igt@i915_pm_rpm@modeset-non-lpsp-stress.html
   [143]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-iclb7/igt@i915_pm_rpm@modeset-non-lpsp-stress.html

  * igt@kms_psr2_sf@overlay-plane-move-continuous-exceed-sf:
    - shard-iclb:         [SKIP][144] ([i915#658]) -> [SKIP][145] ([i915#2920])
   [144]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-iclb6/igt@kms_psr2_sf@overlay-plane-move-continuous-exceed-sf.html
   [145]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-iclb2/igt@kms_psr2_sf@overlay-plane-move-continuous-exceed-sf.html

  * igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area:
    - shard-iclb:         [SKIP][146] ([i915#2920]) -> [SKIP][147] ([fdo#111068] / [i915#658])
   [146]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-iclb2/igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area.html
   [147]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-iclb5/igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area.html

  * igt@kms_psr2_sf@plane-move-sf-dmg-area:
    - shard-iclb:         [SKIP][148] ([fdo#111068] / [i915#658]) -> [SKIP][149] ([i915#2920])
   [148]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-iclb6/igt@kms_psr2_sf@plane-move-sf-dmg-area.html
   [149]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-iclb2/igt@kms_psr2_sf@plane-move-sf-dmg-area.html

  * igt@kms_psr2_su@page_flip-p010:
    - shard-iclb:         [SKIP][150] ([fdo#109642] / [fdo#111068] / [i915#658]) -> [FAIL][151] ([i915#5939])
   [150]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12199/shard-iclb6/igt@kms_psr2_su@page_flip-p010.html
   [151]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/shard-iclb2/igt@kms_psr2_su@page_flip-p010.html

  
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109280]: https://bugs.freedesktop.org/show_bug.cgi?id=109280
  [fdo#109283]: https://bugs.freedesktop.org/show_bug.cgi?id=109283
  [fdo#109284]: https://bugs.freedesktop.org/show_bug.cgi?id=109284
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#109642]: https://bugs.freedesktop.org/show_bug.cgi?id=109642
  [fdo#110892]: https://bugs.freedesktop.org/show_bug.cgi?id=110892
  [fdo#111068]: https://bugs.freedesktop.org/show_bug.cgi?id=111068
  [fdo#111614]: https://bugs.freedesktop.org/show_bug.cgi?id=111614
  [fdo#111615]: https://bugs.freedesktop.org/show_bug.cgi?id=111615
  [fdo#111825]: https://bugs.freedesktop.org/show_bug.cgi?id=111825
  [fdo#111827]: https://bugs.freedesktop.org/show_bug.cgi?id=111827
  [fdo#112054]: https://bugs.freedesktop.org/show_bug.cgi?id=112054
  [fdo#112283]: https://bugs.freedesktop.org/show_bug.cgi?id=112283
  [i915#132]: https://gitlab.freedesktop.org/drm/intel/issues/132
  [i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
  [i915#1888]: https://gitlab.freedesktop.org/drm/intel/issues/1888
  [i915#2122]: https://gitlab.freedesktop.org/drm/intel/issues/2122
  [i915#2437]: https://gitlab.freedesktop.org/drm/intel/issues/2437
  [i915#2527]: https://gitlab.freedesktop.org/drm/intel/issues/2527
  [i915#2546]: https://gitlab.freedesktop.org/drm/intel/issues/2546
  [i915#2587]: https://gitlab.freedesktop.org/drm/intel/issues/2587
  [i915#2672]: https://gitlab.freedesktop.org/drm/intel/issues/2672
  [i915#2681]: https://gitlab.freedesktop.org/drm/intel/issues/2681
  [i915#2705]: https://gitlab.freedesktop.org/drm/intel/issues/2705
  [i915#280]: https://gitlab.freedesktop.org/drm/intel/issues/280
  [i915#2842]: https://gitlab.freedesktop.org/drm/intel/issues/2842
  [i915#2856]: https://gitlab.freedesktop.org/drm/intel/issues/2856
  [i915#2867]: https://gitlab.freedesktop.org/drm/intel/issues/2867
  [i915#2920]: https://gitlab.freedesktop.org/drm/intel/issues/2920
  [i915#2994]: https://gitlab.freedesktop.org/drm/intel/issues/2994
  [i915#3116]: https://gitlab.freedesktop.org/drm/intel/issues/3116
  [i915#3297]: https://gitlab.freedesktop.org/drm/intel/issues/3297
  [i915#3299]: https://gitlab.freedesktop.org/drm/intel/issues/3299
  [i915#3467]: https://gitlab.freedesktop.org/drm/intel/issues/3467
  [i915#3555]: https://gitlab.freedesktop.org/drm/intel/issues/3555
  [i915#3689]: https://gitlab.freedesktop.org/drm/intel/issues/3689
  [i915#3886]: https://gitlab.freedesktop.org/drm/intel/issues/3886
  [i915#3989]: https://gitlab.freedesktop.org/drm/intel/issues/3989
  [i915#4270]: https://gitlab.freedesktop.org/drm/intel/issues/4270
  [i915#4392]: https://gitlab.freedesktop.org/drm/intel/issues/4392
  [i915#4525]: https://gitlab.freedesktop.org/drm/intel/issues/4525
  [i915#454]: https://gitlab.freedesktop.org/drm/intel/issues/454
  [i915#4613]: https://gitlab.freedesktop.org/drm/intel/issues/4613
  [i915#5286]: https://gitlab.freedesktop.org/drm/intel/issues/5286
  [i915#5288]: https://gitlab.freedesktop.org/drm/intel/issues/5288
  [i915#5566]: https://gitlab.freedesktop.org/drm/intel/issues/5566
  [i915#588]: https://gitlab.freedesktop.org/drm/intel/issues/588
  [i915#5939]: https://gitlab.freedesktop.org/drm/intel/issues/5939
  [i915#6095]: https://gitlab.freedesktop.org/drm/intel/issues/6095
  [i915#6117]: https://gitlab.freedesktop.org/drm/intel/issues/6117
  [i915#62]: https://gitlab.freedesktop.org/drm/intel/issues/62
  [i915#6375]: https://gitlab.freedesktop.org/drm/intel/issues/6375
  [i915#6403]: https://gitlab.freedesktop.org/drm/intel/issues/6403
  [i915#6497]: https://gitlab.freedesktop.org/drm/intel/issues/6497
  [i915#658]: https://gitlab.freedesktop.org/drm/intel/issues/658
  [i915#6604]: https://gitlab.freedesktop.org/drm/intel/issues/6604
  [i915#6987]: https://gitlab.freedesktop.org/drm/intel/issues/6987
  [i915#716]: https://gitlab.freedesktop.org/drm/intel/issues/716
  [i915#79]: https://gitlab.freedesktop.org/drm/intel/issues/79


Build changes
-------------

  * Linux: CI_DRM_12199 -> Patchwork_109248v1

  CI-20190529: 20190529
  CI_DRM_12199: 6fa6bc62d3b91e5a70b8e4869436a0b03083abf5 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_6669: 3d2df081c14c251e0269e3510ddc4e9d26ffe925 @ https://gitlab.freedesktop.org/drm/igt-gpu-tools.git
  Patchwork_109248v1: 6fa6bc62d3b91e5a70b8e4869436a0b03083abf5 @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_109248v1/index.html

[-- Attachment #2: Type: text/html, Size: 38672 bytes --]

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Intel-gfx] [PATCH 1/5] drm/i915/mtl: Add Support for C10, C20 PHY Message Bus
  2022-09-30  9:04   ` Jani Nikula
@ 2022-10-06 10:04     ` Kahola, Mika
  0 siblings, 0 replies; 17+ messages in thread
From: Kahola, Mika @ 2022-10-06 10:04 UTC (permalink / raw)
  To: Jani Nikula, intel-gfx

> -----Original Message-----
> From: Jani Nikula <jani.nikula@linux.intel.com>
> Sent: Friday, September 30, 2022 12:05 PM
> To: Kahola, Mika <mika.kahola@intel.com>; intel-gfx@lists.freedesktop.org
> Subject: Re: [Intel-gfx] [PATCH 1/5] drm/i915/mtl: Add Support for C10, C20 PHY
> Message Bus
> 
> On Thu, 29 Sep 2022, Mika Kahola <mika.kahola@intel.com> wrote:
> > From: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
> >
> > XELPDP has C10 and C20 phys from Synopsys to drive displays. Each phy
> > has a dedicated PIPE 5.2 Message bus for configuration. This message
> > bus is used to configure the phy internal registers.
> 
> This looks like a silly intermediate step, adding a bunch of static functions with
> __maybe_unused, just to be modified again in the next patch.

Yes, this was an intermediate step to get around gcc warn on unused functions.

> 
> >
> > Bspec: 64599, 65100, 65101, 67610, 67636
> >
> > Cc: Mika Kahola <mika.kahola@intel.com>
> > Cc: Imre Deak <imre.deak@intel.com>
> > Cc: Uma Shankar <uma.shankar@intel.com>
> > Signed-off-by: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
> > Signed-off-by: Mika Kahola <mika.kahola@intel.com> (v4)
> > ---
> >  drivers/gpu/drm/i915/display/intel_cx0_phy.c | 179
> > +++++++++++++++++++
> >  1 file changed, 179 insertions(+)
> >  create mode 100644 drivers/gpu/drm/i915/display/intel_cx0_phy.c
> >
> > diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
> > b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
> > new file mode 100644
> > index 000000000000..7930b0255cfa
> > --- /dev/null
> > +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
> > @@ -0,0 +1,179 @@
> > +// SPDX-License-Identifier: MIT
> > +/*
> > + * Copyright © 2021 Intel Corporation  */
> > +
> > +#include "intel_de.h"
> > +#include "intel_uncore.h"
> 
> Do you use anything from intel_uncore.h directly, or is it just intel_de.h?

I don't think this C10 patch series use intel_uncore.h directly. I have to double check that though. If not this intel_uncore.h is not needed.

> 
> > +
> > +static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum
> > +port port, int lane) {
> > +	enum phy phy = intel_port_to_phy(i915, port);
> > +
> > +	/* Bring the phy to idle. */
> > +	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
> > +		       XELPDP_PORT_M2P_TRANSACTION_RESET);
> > +
> > +	/* Wait for Idle Clear. */
> > +	if (intel_de_wait_for_clear(i915,
> XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
> > +				    XELPDP_PORT_M2P_TRANSACTION_RESET,
> > +				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
> > +		drm_err_once(&i915->drm, "Failed to bring PHY %c to idle. \n",
> phy_name(phy));
> > +		return;
> > +	}
> > +
> > +	intel_de_write(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
> ~0);
> > +	return;

Yeah, true.

> 
> Unnecessary return statement.
> 
> > +}
> > +
> > +__maybe_unused static u8 intel_cx0_read(struct drm_i915_private *i915,
> enum port port,
> > +			 int lane, u16 addr)
> > +{
> > +	enum phy phy = intel_port_to_phy(i915, port);
> > +	u32 val = 0;
> > +	int attempts = 0;
> > +
> > +retry:
> > +	if (attempts == 3) {
> > +		drm_err_once(&i915->drm, "PHY %c Read %04x failed after %d
> retries. Status: 0x%x\n", phy_name(phy), addr, attempts, val ?: 0);
> > +		return 0;
> > +	}
> 
> The code looks like it would benefit from abstracting a non-retrying read
> function that returns errors, with this function doing the retry loop using a
> conventional for loop.

Yes, I could do some tidying up here

> 
> There's four copy-pasted bits of error handling here that's just error prone.
> 
> > +
> > +	/* Wait for pending transactions.*/
> > +	if (intel_de_wait_for_clear(i915,
> XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
> > +
> XELPDP_PORT_M2P_TRANSACTION_PENDING,
> > +				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
> > +		drm_dbg(&i915->drm, "PHY %c Timeout waiting for previous
> > +transaction to complete. Reset the bus and retry.\n", phy_name(phy));
> 
> drm_dbg_kms() throughout.
> 
> > +		attempts++;
> > +		intel_cx0_bus_reset(i915, port, lane);
> > +		goto retry;
> > +	}
> > +
> > +	/* Issue the read command. */
> > +	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
> > +		       XELPDP_PORT_M2P_TRANSACTION_PENDING |
> > +		       XELPDP_PORT_M2P_COMMAND_READ |
> > +		       XELPDP_PORT_M2P_ADDRESS(addr));
> > +
> > +	/* Wait for response ready. And read response.*/
> > +	if (__intel_wait_for_register(&i915->uncore,
> > +
> XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
> > +				      XELPDP_PORT_P2M_RESPONSE_READY,
> > +				      XELPDP_PORT_P2M_RESPONSE_READY,
> > +				      XELPDP_MSGBUS_TIMEOUT_FAST_US,
> > +				      XELPDP_MSGBUS_TIMEOUT_SLOW, &val)) {
> > +		drm_dbg(&i915->drm, "PHY %c Timeout waiting for Read
> response ACK. Status: 0x%x\n", phy_name(phy), val);
> > +		attempts++;
> > +		intel_cx0_bus_reset(i915, port, lane);
> > +		goto retry;
> > +	}
> > +
> > +	/* Check for error. */
> > +	if (val & XELPDP_PORT_P2M_ERROR_SET) {
> > +		drm_dbg(&i915->drm, "PHY %c Error occurred during read
> command. Status: 0x%x\n", phy_name(phy), val);
> > +		attempts++;
> > +		intel_cx0_bus_reset(i915, port, lane);
> > +		goto retry;
> > +	}
> > +
> > +	/* Check for Read Ack. */
> > +	if (REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, val)
> !=
> > +	    XELPDP_PORT_P2M_COMMAND_READ_ACK) {
> > +		drm_dbg(&i915->drm, "PHY %c Not a Read response. MSGBUS
> Status: 0x%x.\n", phy_name(phy), val);
> > +		attempts++;
> > +		intel_cx0_bus_reset(i915, port, lane);
> > +		goto retry;
> > +	}
> > +
> > +	/* Clear Response Ready flag.*/
> > +	intel_de_write(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
> ~0);
> 
> Blank line before return.
I will delete this line

> 
> > +	return (u8)REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val);
> 
> Unnecessary cast.
Fixing it with next set of patches.

> 
> > +}
> > +
> > +static int intel_cx0_wait_cwrite_ack(struct drm_i915_private *i915,
> > +				      enum port port, int lane)
> > +{
> > +	enum phy phy = intel_port_to_phy(i915, port);
> > +	u32 val;
> > +
> > +	/* Check for write ack. */
> > +	if (__intel_wait_for_register(&i915->uncore,
> > +
> XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
> > +				      XELPDP_PORT_P2M_RESPONSE_READY,
> > +				      XELPDP_PORT_P2M_RESPONSE_READY,
> > +				      XELPDP_MSGBUS_TIMEOUT_FAST_US,
> > +				      XELPDP_MSGBUS_TIMEOUT_SLOW, &val)) {
> > +		drm_dbg(&i915->drm, "PHY %c Timeout waiting for Committed
> message ACK. Status: 0x%x\n", phy_name(phy), val);
> > +		return -ETIMEDOUT;
> > +	}
> > +
> > +	if ((REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, val)
> !=
> > +	     XELPDP_PORT_P2M_COMMAND_WRITE_ACK) || val &
> XELPDP_PORT_P2M_ERROR_SET) {
> > +		drm_dbg(&i915->drm, "PHY %c Unexpected ACK received.
> MSGBUS STATUS: 0x%x.\n", phy_name(phy), val);
> > +		return -EINVAL;
> > +	}
> 
> This is also copy-paste duplicating the stuff in the previous function. So why isn't
> this function used there?

This would benefit an own function. I will fix that in the next series of patches.

> 
> > +
> > +	return 0;
> > +}
> > +
> > +__maybe_unused static void intel_cx0_write(struct drm_i915_private *i915,
> enum port port,
> > +			    int lane, u16 addr, u8 data, bool committed) {
> > +	enum phy phy = intel_port_to_phy(i915, port);
> > +	int attempts = 0;
> > +
> > +retry:
> > +	if (attempts == 3) {
> > +		drm_err_once(&i915->drm, "PHY %c Write %04x failed after %d
> retries.\n", phy_name(phy), addr, attempts);
> > +		return;
> > +	}
> 
> Same here with the retries as in the write. Have a lower level non-retrying write
> function, and handle the rewrites at a different abstraction level.

I'll try to rephrase these.

> 
> > +
> > +	/* Wait for pending transactions.*/
> > +	if (intel_de_wait_for_clear(i915,
> XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
> > +
> XELPDP_PORT_M2P_TRANSACTION_PENDING,
> > +				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
> > +		drm_dbg(&i915->drm, "PHY %c Timeout waiting for previous
> transaction to complete. Reset the bus and retry.\n", phy_name(phy));
> > +		attempts++;
> > +		intel_cx0_bus_reset(i915, port, lane);
> > +		goto retry;
> > +	}
> > +
> > +	/* Issue the write command. */
> > +	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
> > +		       XELPDP_PORT_M2P_TRANSACTION_PENDING |
> > +		       (committed ?
> XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED :
> > +		       XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED)
> |
> > +		       XELPDP_PORT_M2P_DATA(data) |
> > +		       XELPDP_PORT_M2P_ADDRESS(addr));
> > +
> > +	/* Check for error. */
> > +	if (committed) {
> > +		if (intel_cx0_wait_cwrite_ack(i915, port, lane) < 0) {
> > +			attempts++;
> > +			intel_cx0_bus_reset(i915, port, lane);
> > +			goto retry;
> > +		}
> > +	} else if ((intel_de_read(i915,
> XELPDP_PORT_P2M_MSGBUS_STATUS(phy, lane)) &
> > +			    XELPDP_PORT_P2M_ERROR_SET)) {
> > +		drm_dbg(&i915->drm, "PHY %c Error occurred during write
> command.\n", phy_name(phy));
> > +		attempts++;
> > +		intel_cx0_bus_reset(i915, port, lane);
> > +		goto retry;
> > +	}
> > +
> > +	intel_de_write(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
> ~0);
> > +
> > +	return;
> 
> Unnecessary return statement.
Yes.

Thanks for the comments and a review. I will try to address these finding with the next iteration of this patch series.

-Mika-

> 
> > +}
> > +
> > +__maybe_unused static void intel_cx0_rmw(struct drm_i915_private *i915,
> enum port port,
> > +			  int lane, u16 addr, u8 clear, u8 set, bool committed) {
> > +	u8 old, val;
> > +
> > +	old = intel_cx0_read(i915, port, lane, addr);
> > +	val = (old & ~clear) | set;
> > +
> > +	if (val != old)
> > +		intel_cx0_write(i915, port, lane, addr, val, committed); }
> 
> --
> Jani Nikula, Intel Open Source Graphics Center

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Intel-gfx] [PATCH 1/5] drm/i915/mtl: Add Support for C10, C20 PHY Message Bus
  2022-09-29 13:17 ` [Intel-gfx] [PATCH 1/5] drm/i915/mtl: Add Support for C10, C20 PHY Message Bus Mika Kahola
  2022-09-30  9:04   ` Jani Nikula
@ 2022-10-11  0:00   ` Lucas De Marchi
  1 sibling, 0 replies; 17+ messages in thread
From: Lucas De Marchi @ 2022-10-11  0:00 UTC (permalink / raw)
  To: Mika Kahola; +Cc: intel-gfx

On Thu, Sep 29, 2022 at 04:17:43PM +0300, Mika Kahola wrote:
>From: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
>
>XELPDP has C10 and C20 phys from Synopsys to drive displays. Each phy
>has a dedicated PIPE 5.2 Message bus for configuration. This message
>bus is used to configure the phy internal registers.
>
>Bspec: 64599, 65100, 65101, 67610, 67636
>
>Cc: Mika Kahola <mika.kahola@intel.com>
>Cc: Imre Deak <imre.deak@intel.com>
>Cc: Uma Shankar <uma.shankar@intel.com>
>Signed-off-by: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
>Signed-off-by: Mika Kahola <mika.kahola@intel.com> (v4)
>---
> drivers/gpu/drm/i915/display/intel_cx0_phy.c | 179 +++++++++++++++++++
> 1 file changed, 179 insertions(+)
> create mode 100644 drivers/gpu/drm/i915/display/intel_cx0_phy.c
>
>diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
>new file mode 100644
>index 000000000000..7930b0255cfa
>--- /dev/null
>+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
>@@ -0,0 +1,179 @@
>+// SPDX-License-Identifier: MIT
>+/*
>+ * Copyright © 2021 Intel Corporation
>+ */
>+
>+#include "intel_de.h"
>+#include "intel_uncore.h"
>+
>+static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, int lane)
>+{
>+	enum phy phy = intel_port_to_phy(i915, port);
>+
>+	/* Bring the phy to idle. */
>+	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
>+		       XELPDP_PORT_M2P_TRANSACTION_RESET);
>+
>+	/* Wait for Idle Clear. */
>+	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
>+				    XELPDP_PORT_M2P_TRANSACTION_RESET,
>+				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
>+		drm_err_once(&i915->drm, "Failed to bring PHY %c to idle. \n", phy_name(phy));
>+		return;
>+	}
>+
>+	intel_de_write(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane), ~0);
>+	return;
>+}
>+
>+__maybe_unused static u8 intel_cx0_read(struct drm_i915_private *i915, enum port port,
>+			 int lane, u16 addr)
>+{
>+	enum phy phy = intel_port_to_phy(i915, port);
>+	u32 val = 0;
>+	int attempts = 0;
>+
>+retry:
>+	if (attempts == 3) {
>+		drm_err_once(&i915->drm, "PHY %c Read %04x failed after %d retries. Status: 0x%x\n", phy_name(phy), addr, attempts, val ?: 0);
>+		return 0;
>+	}
>+
>+	/* Wait for pending transactions.*/
>+	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
>+				    XELPDP_PORT_M2P_TRANSACTION_PENDING,
>+				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
>+		drm_dbg(&i915->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));
>+		attempts++;
>+		intel_cx0_bus_reset(i915, port, lane);
>+		goto retry;
>+	}
>+
>+	/* Issue the read command. */
>+	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
>+		       XELPDP_PORT_M2P_TRANSACTION_PENDING |
>+		       XELPDP_PORT_M2P_COMMAND_READ |
>+		       XELPDP_PORT_M2P_ADDRESS(addr));
>+
>+	/* Wait for response ready. And read response.*/
>+	if (__intel_wait_for_register(&i915->uncore,
>+				      XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
>+				      XELPDP_PORT_P2M_RESPONSE_READY,
>+				      XELPDP_PORT_P2M_RESPONSE_READY,
>+				      XELPDP_MSGBUS_TIMEOUT_FAST_US,
>+				      XELPDP_MSGBUS_TIMEOUT_SLOW, &val)) {
>+		drm_dbg(&i915->drm, "PHY %c Timeout waiting for Read response ACK. Status: 0x%x\n", phy_name(phy), val);
>+		attempts++;
>+		intel_cx0_bus_reset(i915, port, lane);
>+		goto retry;
>+	}
>+
>+	/* Check for error. */
>+	if (val & XELPDP_PORT_P2M_ERROR_SET) {
>+		drm_dbg(&i915->drm, "PHY %c Error occurred during read command. Status: 0x%x\n", phy_name(phy), val);
>+		attempts++;
>+		intel_cx0_bus_reset(i915, port, lane);
>+		goto retry;
>+	}
>+
>+	/* Check for Read Ack. */
>+	if (REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, val) !=
>+	    XELPDP_PORT_P2M_COMMAND_READ_ACK) {
>+		drm_dbg(&i915->drm, "PHY %c Not a Read response. MSGBUS Status: 0x%x.\n", phy_name(phy), val);
>+		attempts++;
>+		intel_cx0_bus_reset(i915, port, lane);
>+		goto retry;
>+	}
>+
>+	/* Clear Response Ready flag.*/
>+	intel_de_write(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane), ~0);
>+	return (u8)REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val);
>+}
>+
>+static int intel_cx0_wait_cwrite_ack(struct drm_i915_private *i915,
>+				      enum port port, int lane)
>+{
>+	enum phy phy = intel_port_to_phy(i915, port);
>+	u32 val;
>+
>+	/* Check for write ack. */
>+	if (__intel_wait_for_register(&i915->uncore,
>+				      XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
>+				      XELPDP_PORT_P2M_RESPONSE_READY,
>+				      XELPDP_PORT_P2M_RESPONSE_READY,
>+				      XELPDP_MSGBUS_TIMEOUT_FAST_US,
>+				      XELPDP_MSGBUS_TIMEOUT_SLOW, &val)) {
>+		drm_dbg(&i915->drm, "PHY %c Timeout waiting for Committed message ACK. Status: 0x%x\n", phy_name(phy), val);
>+		return -ETIMEDOUT;
>+	}
>+
>+	if ((REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, val) !=
>+	     XELPDP_PORT_P2M_COMMAND_WRITE_ACK) || val & XELPDP_PORT_P2M_ERROR_SET) {
>+		drm_dbg(&i915->drm, "PHY %c Unexpected ACK received. MSGBUS STATUS: 0x%x.\n", phy_name(phy), val);
>+		return -EINVAL;
>+	}
>+
>+	return 0;
>+}
>+
>+__maybe_unused static void intel_cx0_write(struct drm_i915_private *i915, enum port port,
>+			    int lane, u16 addr, u8 data, bool committed)
>+{
>+	enum phy phy = intel_port_to_phy(i915, port);
>+	int attempts = 0;
>+
>+retry:
>+	if (attempts == 3) {
>+		drm_err_once(&i915->drm, "PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, attempts);
>+		return;
>+	}
>+
>+	/* Wait for pending transactions.*/
>+	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
>+				    XELPDP_PORT_M2P_TRANSACTION_PENDING,
>+				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
>+		drm_dbg(&i915->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));
>+		attempts++;
>+		intel_cx0_bus_reset(i915, port, lane);
>+		goto retry;
>+	}
>+
>+	/* Issue the write command. */
>+	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
>+		       XELPDP_PORT_M2P_TRANSACTION_PENDING |
>+		       (committed ? XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED :
>+		       XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED) |
>+		       XELPDP_PORT_M2P_DATA(data) |
>+		       XELPDP_PORT_M2P_ADDRESS(addr));
>+
>+	/* Check for error. */
>+	if (committed) {
>+		if (intel_cx0_wait_cwrite_ack(i915, port, lane) < 0) {
>+			attempts++;
>+			intel_cx0_bus_reset(i915, port, lane);
>+			goto retry;
>+		}
>+	} else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(phy, lane)) &


wrong argument here to XELPDP_PORT_P2M_MSGBUS_STATUS(). It should be
port, not phy.

Lucas De Marchi

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Intel-gfx] [PATCH 3/5] drm/i915/mtl: Add support for C10 phy programming
  2022-09-30  9:32   ` Jani Nikula
@ 2022-10-14 12:44     ` Kahola, Mika
  2022-10-18 10:39       ` Jani Nikula
  0 siblings, 1 reply; 17+ messages in thread
From: Kahola, Mika @ 2022-10-14 12:44 UTC (permalink / raw)
  To: Jani Nikula, intel-gfx

> -----Original Message-----
> From: Jani Nikula <jani.nikula@linux.intel.com>
> Sent: Friday, September 30, 2022 12:32 PM
> To: Kahola, Mika <mika.kahola@intel.com>; intel-gfx@lists.freedesktop.org
> Subject: Re: [Intel-gfx] [PATCH 3/5] drm/i915/mtl: Add support for C10 phy
> programming
> 
> On Thu, 29 Sep 2022, Mika Kahola <mika.kahola@intel.com> wrote:
> > From: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
> >
> > Add sequences for C10 phy enable/disable phy lane reset, powerdown
> > change sequence and phy lane programming.
> >
> > Bspec: 64539, 67636, 65451, 65450, 64568
> >
> > Cc: Imre Deak <imre.deak@intel.com>
> > Cc: Mika Kahola <mika.kahola@intel.com>
> > Cc: Uma Shankar <uma.shankar@intel.com>
> > Signed-off-by: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
> > Signed-off-by: Mika Kahola <mika.kahola@intel.com> (v9)
> > ---
> >  drivers/gpu/drm/i915/Makefile                |   1 +
> >  drivers/gpu/drm/i915/display/intel_cx0_phy.c | 352
> > ++++++++++++++++++-  drivers/gpu/drm/i915/display/intel_cx0_phy.h |  17 +
> >  drivers/gpu/drm/i915/display/intel_ddi.c     |   2 +
> >  drivers/gpu/drm/i915/display/intel_dp.c      |  15 +-
> >  drivers/gpu/drm/i915/display/intel_dpll.c    |   2 +
> >  drivers/gpu/drm/i915/i915_reg.h              | 141 ++++++++
> >  7 files changed, 526 insertions(+), 4 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/Makefile
> > b/drivers/gpu/drm/i915/Makefile index a26edcdadc21..994f87a12782
> > 100644
> > --- a/drivers/gpu/drm/i915/Makefile
> > +++ b/drivers/gpu/drm/i915/Makefile
> > @@ -279,6 +279,7 @@ i915-y += \
> >  	display/icl_dsi.o \
> >  	display/intel_backlight.o \
> >  	display/intel_crt.o \
> > +	display/intel_cx0_phy.o \
> 
> This belongs where intel_cx0_phy.c is added.
> 
> >  	display/intel_ddi.o \
> >  	display/intel_ddi_buf_trans.o \
> >  	display/intel_display_trace.o \
> > diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
> > b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
> > index 2f401116d1d0..6ba11cd7cd75 100644
> > --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
> > +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
> > @@ -526,9 +526,9 @@ void intel_c10mpllb_readout_hw_state(struct
> intel_encoder *encoder,
> >  			 tx0, cmn, phy_name(phy));
> >  }
> >
> > -__maybe_unused static void intel_c10_pll_program(struct drm_i915_private
> *i915,
> > -						 const struct intel_crtc_state
> *crtc_state,
> > -						 struct intel_encoder
> *encoder)
> > +static void intel_c10_pll_program(struct drm_i915_private *i915,
> > +				  const struct intel_crtc_state *crtc_state,
> > +				  struct intel_encoder *encoder)
> >  {
> >  	const struct intel_c10mpllb_state *pll_state = &crtc_state-
> >c10mpllb_state;
> >  	struct intel_digital_port *dig_port = enc_to_dig_port(encoder); @@
> > -633,6 +633,352 @@ int intel_c10mpllb_calc_port_clock(struct intel_encoder
> *encoder,
> >  				     10 << (tx_clk_div + 16));
> >  }
> >
> > +#define PHY_LANES_VAL_ARG(FIELD, lanes, arg)	({u32 __val;
> switch(lanes) {\
> > +						  case
> INTEL_CX0_BOTH_LANES:	\
> > +							__val =
> ((XELPDP_LANE0_##FIELD(arg)) |\
> > +
> (XELPDP_LANE1_##FIELD(arg))); \
> > +							break;
> 		\
> > +						  case INTEL_CX0_LANE0:
> \
> > +							__val =
> (XELPDP_LANE0_##FIELD(arg));\
> > +							break;
> 		\
> > +						  case INTEL_CX0_LANE1:
> \
> > +							__val =
> (XELPDP_LANE1_##FIELD(arg));\
> > +							break;  \
> > +						 }; __val; })
> > +
> > +#define PHY_LANES_VAL(FIELD, lanes)	({u32 __val; switch(lanes) {\
> > +						  case
> INTEL_CX0_BOTH_LANES:	\
> > +							__val =
> (XELPDP_LANE0_##FIELD | \
> > +
> XELPDP_LANE1_##FIELD); \
> > +							break;
> 		\
> > +						  case INTEL_CX0_LANE0:
> \
> > +							__val =
> (XELPDP_LANE0_##FIELD);	     \
> > +							break;
> 		\
> > +						  case INTEL_CX0_LANE1:
> \
> > +							__val =
> (XELPDP_LANE1_##FIELD);\
> > +							break;  \
> > +						 }; __val; })
> 
> Ugh that's ugly. I'll try to look the other way.
> 
> > +
> > +static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
> > +					 const struct intel_crtc_state
> *crtc_state,
> > +					 bool lane_reversal)
> > +{
> > +	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
> > +	struct intel_dp *intel_dp;
> > +	bool ssc_enabled;
> > +	u32 val = 0;
> > +
> > +	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(encoder->port),
> XELPDP_PORT_REVERSAL,
> > +		     lane_reversal ? XELPDP_PORT_REVERSAL : 0);
> > +
> > +	if (lane_reversal)
> > +		val |= XELPDP_LANE1_PHY_CLOCK_SELECT;
> > +
> > +	val |= XELPDP_FORWARD_CLOCK_UNGATE;
> > +	val |=
> XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
> > +
> > +	if (intel_crtc_has_dp_encoder(crtc_state)) {
> > +		intel_dp = enc_to_intel_dp(encoder);
> > +		ssc_enabled = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
> > +			      DP_MAX_DOWNSPREAD_0_5;
> 
> It is almost certainly the wrong thing to do to look at sink DPCD register values
> at the low level PHY code. Smells like something that should be added to crtc
> state.
> 
> > +
> > +		/* TODO: DP2.0 10G and 20G rates enable MPLLA*/
> > +		val |= ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
> > +	}
> > +	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
> > +		     XELPDP_LANE1_PHY_CLOCK_SELECT |
> > +		     XELPDP_FORWARD_CLOCK_UNGATE |
> > +		     XELPDP_DDI_CLOCK_SELECT_MASK |
> > +		     XELPDP_SSC_ENABLE_PLLB, val);
> > +}
> > +
> > +static void intel_cx0_powerdown_change_sequence(struct drm_i915_private
> *i915,
> > +						enum port port,
> > +						enum intel_cx0_lanes lane, u8
> state) {
> > +	enum phy phy = intel_port_to_phy(i915, port);
> > +
> > +	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
> > +		     PHY_LANES_VAL(POWERDOWN_NEW_STATE_MASK, lane),
> > +		     PHY_LANES_VAL_ARG(POWERDOWN_NEW_STATE, lane,
> state));
> > +	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
> > +		     PHY_LANES_VAL(POWERDOWN_UPDATE, lane),
> > +		     PHY_LANES_VAL(POWERDOWN_UPDATE, lane));
> > +
> > +	/* Update Timeout Value */
> > +	if (__intel_wait_for_register(&i915->uncore,
> XELPDP_PORT_BUF_CTL2(port),
> > +				      PHY_LANES_VAL(POWERDOWN_UPDATE,
> lane), 0,
> > +
> XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
> > +		drm_warn(&i915->drm, "PHY %c failed to bring out of Lane
> reset after %dus.\n",
> > +			 phy_name(phy),
> XELPDP_PORT_RESET_START_TIMEOUT_US);
> > +}
> > +
> > +static void intel_cx0_setup_powerdown(struct drm_i915_private *i915,
> > +enum port port) {
> > +	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
> > +		     XELPDP_POWER_STATE_READY_MASK,
> > +		     XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY));
> > +	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL3(port),
> > +		     XELPDP_POWER_STATE_ACTIVE_MASK |
> > +		     XELPDP_PLL_LANE_STAGGERING_DELAY_MASK,
> > +		     XELPDP_POWER_STATE_ACTIVE(CX0_P0_STATE_ACTIVE) |
> > +		     XELPDP_PLL_LANE_STAGGERING_DELAY(0));
> > +}
> > +
> > +/* FIXME: Some Type-C cases need not reset both the lanes. Handle
> > +those cases. */ static void intel_cx0_phy_lane_reset(struct drm_i915_private
> *i915, enum port port,
> > +				     bool lane_reversal)
> > +{
> > +	enum phy phy = intel_port_to_phy(i915, port);
> > +	enum intel_cx0_lanes lane = lane_reversal ? INTEL_CX0_LANE1 :
> > +				    INTEL_CX0_LANE0;
> > +
> > +	if (__intel_wait_for_register(&i915->uncore,
> XELPDP_PORT_BUF_CTL1(port),
> > +				      XELPDP_PORT_BUF_SOC_PHY_READY,
> > +				      XELPDP_PORT_BUF_SOC_PHY_READY,
> > +
> XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL))
> > +		drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset
> after %dus.\n",
> > +			 phy_name(phy),
> XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
> > +
> > +	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
> > +		     PHY_LANES_VAL(PIPE_RESET, INTEL_CX0_BOTH_LANES),
> > +		     PHY_LANES_VAL(PIPE_RESET, INTEL_CX0_BOTH_LANES));
> > +
> > +	if (__intel_wait_for_register(&i915->uncore,
> XELPDP_PORT_BUF_CTL2(port),
> > +				      PHY_LANES_VAL(PHY_CURRENT_STATUS,
> INTEL_CX0_BOTH_LANES),
> > +				      PHY_LANES_VAL(PHY_CURRENT_STATUS,
> INTEL_CX0_BOTH_LANES),
> > +
> XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
> > +		drm_warn(&i915->drm, "PHY %c failed to bring out of Lane
> reset after %dus.\n",
> > +			 phy_name(phy),
> XELPDP_PORT_RESET_START_TIMEOUT_US);
> > +
> > +	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(port),
> > +		     PHY_LANES_VAL(PCLK_REFCLK_REQUEST, lane),
> > +		     PHY_LANES_VAL(PCLK_REFCLK_REQUEST, lane));
> > +
> > +	if (__intel_wait_for_register(&i915->uncore,
> XELPDP_PORT_CLOCK_CTL(port),
> > +				      PHY_LANES_VAL(PCLK_REFCLK_ACK, lane),
> > +				      PHY_LANES_VAL(PCLK_REFCLK_ACK, lane),
> > +				      XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0,
> NULL))
> > +		drm_warn(&i915->drm, "PHY %c failed to request refclk after
> %dus.\n",
> > +			 phy_name(phy),
> XELPDP_REFCLK_ENABLE_TIMEOUT_US);
> > +
> > +	intel_cx0_powerdown_change_sequence(i915, port,
> INTEL_CX0_BOTH_LANES,
> > +					    CX0_P2_STATE_RESET);
> > +	intel_cx0_setup_powerdown(i915, port);
> > +
> > +	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
> > +		     PHY_LANES_VAL(PIPE_RESET, INTEL_CX0_BOTH_LANES), 0);
> > +
> > +	if (intel_de_wait_for_clear(i915, XELPDP_PORT_BUF_CTL2(port),
> > +				    PHY_LANES_VAL(PHY_CURRENT_STATUS,
> > +						  INTEL_CX0_BOTH_LANES),
> > +				    XELPDP_PORT_RESET_END_TIMEOUT))
> > +		drm_warn(&i915->drm, "PHY %c failed to bring out of Lane
> reset after %dms.\n",
> > +			 phy_name(phy),
> XELPDP_PORT_RESET_END_TIMEOUT); }
> > +
> > +static void intel_c10_program_phy_lane(struct drm_i915_private *i915,
> > +				       enum port port, int lane_count,
> > +				       bool lane_reversal)
> > +{
> > +	u8 l0t1, l0t2, l1t1, l1t2;
> > +
> > +	intel_cx0_rmw(i915, port, INTEL_CX0_BOTH_LANES,
> PHY_C10_VDR_CONTROL(1),
> > +		      C10_VDR_CTRL_MSGBUS_ACCESS,
> C10_VDR_CTRL_MSGBUS_ACCESS,
> > +		      MB_WRITE_COMMITTED);
> > +
> > +	l0t1 = intel_cx0_read(i915, port, 0, PHY_CX0_TX_CONTROL(1, 2));
> > +	l0t2 = intel_cx0_read(i915, port, 0, PHY_CX0_TX_CONTROL(2, 2));
> > +	l1t1 = intel_cx0_read(i915, port, 1, PHY_CX0_TX_CONTROL(1, 2));
> > +	l1t2 = intel_cx0_read(i915, port, 1, PHY_CX0_TX_CONTROL(2, 2));
> > +
> > +	if (lane_reversal) {
> > +		switch (lane_count) {
> > +		case 1:
> > +			/* Disable MLs 1(lane0), 2(lane0), 3(lane1) */
> > +			intel_cx0_write(i915, port, 1,
> PHY_CX0_TX_CONTROL(1, 2),
> > +					l1t1 |
> CONTROL2_DISABLE_SINGLE_TX,
> > +					MB_WRITE_COMMITTED);
> > +			fallthrough;
> > +		case 2:
> > +			/* Disable MLs 1(lane0), 2(lane0) */
> > +			intel_cx0_write(i915, port, 0,
> PHY_CX0_TX_CONTROL(2, 2),
> > +					l0t2 |
> CONTROL2_DISABLE_SINGLE_TX,
> > +					MB_WRITE_COMMITTED);
> > +			fallthrough;
> > +		case 3:
> > +			/* Disable MLs 1(lane0) */
> > +			intel_cx0_write(i915, port, 0,
> PHY_CX0_TX_CONTROL(1, 2),
> > +					l0t1 |
> CONTROL2_DISABLE_SINGLE_TX,
> > +					MB_WRITE_COMMITTED);
> > +			break;
> > +		}
> > +	} else {
> > +		switch (lane_count) {
> > +		case 1:
> > +			/* Disable MLs 2(lane0), 3(lane1), 4(lane1) */
> > +			intel_cx0_write(i915, port, 0,
> PHY_CX0_TX_CONTROL(2, 2),
> > +					l0t2 |
> CONTROL2_DISABLE_SINGLE_TX,
> > +					MB_WRITE_COMMITTED);
> > +			fallthrough;
> > +		case 2:
> > +			/* Disable MLs 3(lane1), 4(lane1) */
> > +			intel_cx0_write(i915, port, 1,
> PHY_CX0_TX_CONTROL(1, 2),
> > +					l1t1 |
> CONTROL2_DISABLE_SINGLE_TX,
> > +					MB_WRITE_COMMITTED);
> > +			fallthrough;
> > +		case 3:
> > +			/* Disable MLs 4(lane1) */
> > +			intel_cx0_write(i915, port, 1,
> PHY_CX0_TX_CONTROL(2, 2),
> > +					l1t2 |
> CONTROL2_DISABLE_SINGLE_TX,
> > +					MB_WRITE_COMMITTED);
> > +			break;
> > +		}
> > +	}
> > +
> > +	intel_cx0_rmw(i915, port, INTEL_CX0_BOTH_LANES,
> PHY_C10_VDR_CONTROL(1),
> > +		      C10_VDR_CTRL_UPDATE_CFG,
> C10_VDR_CTRL_UPDATE_CFG,
> > +MB_WRITE_COMMITTED); }
> > +
> > +static void intel_c10pll_enable(struct intel_encoder *encoder,
> > +				const struct intel_crtc_state *crtc_state) {
> > +	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
> > +	enum phy phy = intel_port_to_phy(i915, encoder->port);
> > +	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
> > +	bool lane_reversal = dig_port->saved_port_bits &
> DDI_BUF_PORT_REVERSAL;
> > +	enum intel_cx0_lanes maxpclk_lane = lane_reversal ?
> INTEL_CX0_LANE1 :
> > +				    INTEL_CX0_LANE0;
> > +
> > +	/*
> > +	 * 1. Program PORT_CLOCK_CTL REGISTER to configure
> > +	 * clock muxes, gating and SSC
> > +	 */
> > +	intel_program_port_clock_ctl(encoder, crtc_state, lane_reversal);
> > +
> > +	/* 2. Bring PHY out of reset. */
> > +	intel_cx0_phy_lane_reset(i915, encoder->port, lane_reversal);
> > +
> > +	/*
> > +	 * 3. Change Phy power state to Ready.
> > +	 * TODO: For DP alt mode use only one lane.
> > +	 */
> > +	intel_cx0_powerdown_change_sequence(i915, encoder->port,
> INTEL_CX0_BOTH_LANES,
> > +					    CX0_P2_STATE_READY);
> > +
> > +	/* 4. Program PHY internal PLL internal registers. */
> > +	intel_c10_pll_program(i915, crtc_state, encoder);
> > +
> > +	/*
> > +	 * 5. Program the enabled and disabled owned PHY lane
> > +	 * transmitters over message bus
> > +	 */
> > +	intel_c10_program_phy_lane(i915, encoder->port,
> > +crtc_state->lane_count, lane_reversal);
> > +
> > +	/*
> > +	 * 6. Follow the Display Voltage Frequency Switching - Sequence
> > +	 * Before Frequency Change. We handle this step in bxt_set_cdclk().
> > +	 */
> > +
> > +	/*
> > +	 * 7. Program DDI_CLK_VALFREQ to match intended DDI
> > +	 * clock frequency.
> > +	 */
> > +	intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port),
> > +		       crtc_state->port_clock);
> > +	/*
> > +	 * 8. Set PORT_CLOCK_CTL register PCLK PLL Request
> > +	 * LN<Lane for maxPCLK> to "1" to enable PLL.
> > +	 */
> > +	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), 0,
> > +		     PHY_LANES_VAL(PCLK_PLL_REQUEST, maxpclk_lane));
> > +
> > +	/* 9. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> ==
> "1". */
> > +	if (__intel_wait_for_register(&i915->uncore,
> XELPDP_PORT_CLOCK_CTL(encoder->port),
> > +				      PHY_LANES_VAL(PCLK_PLL_ACK,
> maxpclk_lane),
> > +				      PHY_LANES_VAL(PCLK_PLL_ACK,
> maxpclk_lane),
> > +				      XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US,
> 0, NULL))
> > +		drm_warn(&i915->drm, "Port %c PLL not locked after %dus.\n",
> > +			 phy_name(phy),
> XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US);
> > +
> > +	/*
> > +	 * 10. Follow the Display Voltage Frequency Switching Sequence After
> > +	 * Frequency Change. We handle this step in bxt_set_cdclk().
> > +	 */
> > +}
> > +
> > +void intel_cx0pll_enable(struct intel_encoder *encoder,
> > +			 const struct intel_crtc_state *crtc_state) {
> > +	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
> > +	enum phy phy = intel_port_to_phy(i915, encoder->port);
> > +
> > +	drm_WARN_ON(&i915->drm, !intel_is_c10phy(i915, phy));
> > +	intel_c10pll_enable(encoder, crtc_state); }
> > +
> > +static void intel_c10pll_disable(struct intel_encoder *encoder) {
> > +	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
> > +	enum phy phy = intel_port_to_phy(i915, encoder->port);
> > +	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
> > +	bool lane_reversal = dig_port->saved_port_bits &
> DDI_BUF_PORT_REVERSAL;
> > +	enum intel_cx0_lanes lane = lane_reversal ? INTEL_CX0_LANE1 :
> > +				    INTEL_CX0_LANE0;
> > +
> > +	/* 1. Change owned PHY lane power to Disable state. */
> > +	intel_cx0_powerdown_change_sequence(i915, encoder->port,
> INTEL_CX0_BOTH_LANES,
> > +					    CX0_P2PG_STATE_DISABLE);
> > +
> > +	/*
> > +	 * 2. Follow the Display Voltage Frequency Switching Sequence Before
> > +	 * Frequency Change. We handle this step in bxt_set_cdclk().
> > +	 */
> > +
> > +	/*
> > +	 * 3. Set PORT_CLOCK_CTL register PCLK PLL Request LN<Lane for
> maxPCLK>
> > +	 * to "0" to disable PLL.
> > +	 */
> > +	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
> > +		     PHY_LANES_VAL(PCLK_PLL_REQUEST,
> INTEL_CX0_BOTH_LANES) |
> > +		     PHY_LANES_VAL(PCLK_REFCLK_REQUEST,
> INTEL_CX0_BOTH_LANES), 0);
> > +
> > +	/* 4. Program DDI_CLK_VALFREQ to 0. */
> > +	intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), 0);
> > +
> > +	/*
> > +	 * 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**>
> == "0".
> > +	 */
> > +	if (__intel_wait_for_register(&i915->uncore,
> XELPDP_PORT_CLOCK_CTL(encoder->port),
> > +				      PHY_LANES_VAL(PCLK_PLL_ACK, lane) |
> > +				      PHY_LANES_VAL(PCLK_REFCLK_ACK, lane),
> 0,
> > +				      XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US,
> 0, NULL))
> > +		drm_warn(&i915->drm, "Port %c PLL not unlocked after
> %dus.\n",
> > +			 phy_name(phy),
> XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US);
> > +
> > +	/*
> > +	 * 6. Follow the Display Voltage Frequency Switching Sequence After
> > +	 * Frequency Change. We handle this step in bxt_set_cdclk().
> > +	 */
> > +
> > +	/* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */
> > +	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
> > +		     XELPDP_DDI_CLOCK_SELECT_MASK |
> > +		     XELPDP_FORWARD_CLOCK_UNGATE, 0); }
> > +
> > +void intel_cx0pll_disable(struct intel_encoder *encoder) {
> > +	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
> > +	enum phy phy = intel_port_to_phy(i915, encoder->port);
> > +
> > +	drm_WARN_ON(&i915->drm, !intel_is_c10phy(i915, phy));
> > +	intel_c10pll_disable(encoder);
> > +}
> > +
> > +#undef PHY_LANES_VAL_ARG
> > +#undef PHY_LANES_VAL
> > +
> >  void intel_c10mpllb_state_verify(struct intel_atomic_state *state,
> >  				 struct intel_crtc_state *new_crtc_state)  { diff
> --git
> > a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
> > b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
> > index cf1f300b6a7b..d12d2e2f02ee 100644
> > --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
> > +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
> > @@ -106,6 +106,19 @@ enum intel_cx0_lanes {
> >  #define  C10_VDR_CTRL_UPDATE_CFG	REG_BIT8(0)
> >  #define PHY_C10_VDR_CUSTOM_WIDTH	0xD02
> >
> > +#define CX0_P0_STATE_ACTIVE		0x0
> > +#define CX0_P2_STATE_READY		0x2
> > +#define CX0_P2PG_STATE_DISABLE		0x9
> > +#define CX0_P4PG_STATE_DISABLE		0xC
> > +#define CX0_P2_STATE_RESET		0x2
> > +
> > +/* PHY_C10_VDR_PLL0 */
> > +#define PLL_C10_MPLL_SSC_EN		REG_BIT8(0)
> > +
> > +/* PIPE SPEC Defined Registers */
> > +#define PHY_CX0_TX_CONTROL(tx, control)	(0x400 + ((tx) - 1) * 0x200 +
> (control))
> > +#define CONTROL2_DISABLE_SINGLE_TX	REG_BIT(6)
> > +
> 
> Again, register definitions don't belong here.
Yes, I will move these to a separate file.

> 
> >  static inline bool intel_is_c10phy(struct drm_i915_private *dev_priv,
> > enum phy phy)  {
> >  	if (!IS_METEORLAKE(dev_priv))
> > @@ -114,6 +127,10 @@ static inline bool intel_is_c10phy(struct
> drm_i915_private *dev_priv, enum phy p
> >  		return (phy < PHY_C);
> >  }
> >
> > +void intel_cx0pll_enable(struct intel_encoder *encoder,
> > +			 const struct intel_crtc_state *crtc_state); void
> > +intel_cx0pll_disable(struct intel_encoder *encoder);
> > +
> >  void intel_c10mpllb_readout_hw_state(struct intel_encoder *encoder,
> >  				     struct intel_c10mpllb_state *pll_state);  int
> > intel_cx0mpllb_calc_state(struct intel_crtc_state *crtc_state, diff
> > --git a/drivers/gpu/drm/i915/display/intel_ddi.c
> > b/drivers/gpu/drm/i915/display/intel_ddi.c
> > index aaa8846c3b18..639ec604babf 100644
> > --- a/drivers/gpu/drm/i915/display/intel_ddi.c
> > +++ b/drivers/gpu/drm/i915/display/intel_ddi.c
> > @@ -4384,6 +4384,8 @@ void intel_ddi_init(struct drm_i915_private
> *dev_priv, enum port port)
> >  	encoder->pipe_mask = ~0;
> >
> >  	if (DISPLAY_VER(dev_priv) >= 14) {
> > +		encoder->enable_clock = intel_cx0pll_enable;
> > +		encoder->disable_clock = intel_cx0pll_disable;
> >  		encoder->get_config = mtl_ddi_get_config;
> >  	} else if (IS_DG2(dev_priv)) {
> >  		encoder->enable_clock = intel_mpllb_enable; diff --git
> > a/drivers/gpu/drm/i915/display/intel_dp.c
> > b/drivers/gpu/drm/i915/display/intel_dp.c
> > index 70b06806ec0d..db32799b5f46 100644
> > --- a/drivers/gpu/drm/i915/display/intel_dp.c
> > +++ b/drivers/gpu/drm/i915/display/intel_dp.c
> > @@ -420,6 +420,11 @@ static int ehl_max_source_rate(struct intel_dp
> *intel_dp)
> >  	return 810000;
> >  }
> >
> > +static int mtl_max_source_rate(struct intel_dp *intel_dp) {
> > +	return intel_dp_is_edp(intel_dp) ? 675000 : 810000; }
> > +
> >  static int vbt_max_link_rate(struct intel_dp *intel_dp)  {
> >  	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; @@
> > -444,6 +449,10 @@ static void  intel_dp_set_source_rates(struct
> > intel_dp *intel_dp)  {
> >  	/* The values must be in increasing order */
> > +	static const int mtl_rates[] = {
> > +		162000, 216000, 243000, 270000, 324000, 432000, 540000,
> 675000,
> > +		810000,
> > +	};
> >  	static const int icl_rates[] = {
> >  		162000, 216000, 270000, 324000, 432000, 540000, 648000,
> 810000,
> >  		1000000, 1350000,
> > @@ -469,7 +478,11 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
> >  	drm_WARN_ON(&dev_priv->drm,
> >  		    intel_dp->source_rates || intel_dp->num_source_rates);
> >
> > -	if (DISPLAY_VER(dev_priv) >= 11) {
> > +	if (DISPLAY_VER(dev_priv) >= 14) {
> > +		source_rates = mtl_rates;
> > +		size = ARRAY_SIZE(mtl_rates);
> > +		max_rate = mtl_max_source_rate(intel_dp);
> > +	} else if (DISPLAY_VER(dev_priv) >= 11) {
> >  		source_rates = icl_rates;
> >  		size = ARRAY_SIZE(icl_rates);
> >  		if (IS_DG2(dev_priv))
> 
> All of the changes to intel_dp.c should be a separate patch.
Ok. I will split this patch so this one is separated from this patch

> 
> > diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c
> > b/drivers/gpu/drm/i915/display/intel_dpll.c
> > index 73f541050913..d6fcdf4eba0e 100644
> > --- a/drivers/gpu/drm/i915/display/intel_dpll.c
> > +++ b/drivers/gpu/drm/i915/display/intel_dpll.c
> > @@ -1533,6 +1533,8 @@ intel_dpll_init_clock_hook(struct
> > drm_i915_private *dev_priv)  {
> >  	if (DISPLAY_VER(dev_priv) >= 14)
> >  		dev_priv->display.funcs.dpll = &mtl_dpll_funcs;
> > +	else if (DISPLAY_VER(dev_priv) >= 14)
> > +		dev_priv->display.funcs.dpll = &mtl_dpll_funcs;
> >  	else if (IS_DG2(dev_priv))
> >  		dev_priv->display.funcs.dpll = &dg2_dpll_funcs;
> >  	else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv)) diff --git
> > a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
> > index 5003a5ffbc6a..5e6ff9f2aa10 100644
> > --- a/drivers/gpu/drm/i915/i915_reg.h
> > +++ b/drivers/gpu/drm/i915/i915_reg.h
> > @@ -2121,6 +2121,11 @@
> >  #define   TRANS_PUSH_EN			REG_BIT(31)
> >  #define   TRANS_PUSH_SEND		REG_BIT(30)
> >
> > +/* DDI Buffer Control */
> > +#define _DDI_CLK_VALFREQ_A		0x64030
> > +#define _DDI_CLK_VALFREQ_B		0x64130
> > +#define DDI_CLK_VALFREQ(port)		_MMIO_PORT(port,
> _DDI_CLK_VALFREQ_A, _DDI_CLK_VALFREQ_B)
> > +
> >  /*
> >   * HSW+ eDP PSR registers
> >   *
> > @@ -8375,4 +8380,140 @@ enum skl_power_gate {
> >
> >  #define MTL_MEDIA_GSI_BASE		0x380000
> >
> > +#define PUNIT_MMIO_CR_POC_STRAPS	_MMIO(0x281078)
> > +#define   NUM_TILES_MASK		REG_GENMASK(1, 0)
> > +#define   CD_ALIVE			REG_BIT(2)
> > +#define   SOCKET_ID_MASK		REG_GENMASK(7, 3)
> > +
> > +/* Define the BAR and offset for the accelerator fabric CSRs */
> > +#define CD_BASE_OFFSET 0x291000 #define CD_BAR_SIZE (256 * 1024)
> > +
> > +/*
> > + * In general, the i915 should not touch the IAF registers.  The
> > +registers
> > + * will be passed as an IO resource via the MFD interface.  However,
> > +it
> > + * is necessary to put the IRQ bits in a known state, before the MFD
> > +cell
> > + * is registered.
> > + *
> > + * So define these registers for i915 usage.
> 
> These should probably be split to a separate _regs file, like we've been doing for
> other registers. Especially because "In general, the i915 should not touch the IAF
> registers."
Maybe these could be moved into intel_cx0_reg_defs.h file?

> 
> > + */
> > +#define CPORT_MBDB_CSRS (CD_BASE_OFFSET + 0x6000) #define
> > +CPORT_MBDB_CSRS_END (CPORT_MBDB_CSRS + 0x1000) #define
> > +CPORT_MBDB_INT_ENABLE_MASK _MMIO(CPORT_MBDB_CSRS + 0x8)
> > +
> > +#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A		0x64040
> > +#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B		0x64140
> > +#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1
> 	0x16F240
> > +#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2
> 	0x16F440
> > +#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC3
> 	0x16F640
> > +#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC4
> 	0x16F840
> > +#define _XELPDP_PORT_M2P_MSGBUS_CTL(port, lane)
> 	(_PICK(port, \
> > +							[PORT_A] =
> _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A, \
> > +							[PORT_B] =
> _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B, \
> > +							[PORT_TC1] =
> _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1, \
> > +							[PORT_TC2] =
> _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2, \
> > +							[PORT_TC3] =
> _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC3, \
> > +							[PORT_TC4] =
> _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC4) + ((lane)
> > +* 4))
> > +
> > +#define XELPDP_PORT_M2P_MSGBUS_CTL(port, lane)
> 	_MMIO(_XELPDP_PORT_M2P_MSGBUS_CTL(port, lane))
> > +#define  XELPDP_PORT_M2P_TRANSACTION_PENDING
> 	REG_BIT(31)
> > +#define  XELPDP_PORT_M2P_COMMAND_TYPE_MASK
> 	REG_GENMASK(30, 27)
> > +#define  XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED
> 	REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x1)
> > +#define  XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED
> 	REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x2)
> > +#define  XELPDP_PORT_M2P_COMMAND_READ
> 	REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x3)
> > +#define  XELPDP_PORT_M2P_DATA_MASK
> 	REG_GENMASK(23, 16)
> > +#define  XELPDP_PORT_M2P_DATA(val)
> 	REG_FIELD_PREP(XELPDP_PORT_M2P_DATA_MASK, val)
> > +#define  XELPDP_PORT_M2P_TRANSACTION_RESET		REG_BIT(15)
> > +#define  XELPDP_PORT_M2P_ADDRESS_MASK
> 	REG_GENMASK(11, 0)
> > +#define  XELPDP_PORT_M2P_ADDRESS(val)
> 	REG_FIELD_PREP(XELPDP_PORT_M2P_ADDRESS_MASK, val)
> > +
> > +#define XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane)
> 	_MMIO(_XELPDP_PORT_M2P_MSGBUS_CTL(port, lane) + 8)
> > +#define  XELPDP_PORT_P2M_RESPONSE_READY
> 	REG_BIT(31)
> > +#define  XELPDP_PORT_P2M_COMMAND_TYPE_MASK
> 	REG_GENMASK(30, 27)
> > +#define  XELPDP_PORT_P2M_COMMAND_READ_ACK		0x4
> > +#define  XELPDP_PORT_P2M_COMMAND_WRITE_ACK		0x5
> > +#define  XELPDP_PORT_P2M_DATA_MASK
> 	REG_GENMASK(23, 16)
> > +#define  XELPDP_PORT_P2M_DATA(val)
> 	REG_FIELD_PREP(XELPDP_PORT_P2M_DATA_MASK, val)
> > +#define  XELPDP_PORT_P2M_ERROR_SET			REG_BIT(15)
> > +
> > +#define  XELPDP_MSGBUS_TIMEOUT_SLOW			1
> > +#define  XELPDP_MSGBUS_TIMEOUT_FAST_US			2
> > +#define XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US		3200
> > +#define XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US		20
> > +#define XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US		100
> > +#define XELPDP_PORT_RESET_START_TIMEOUT_US		5
> > +#define XELPDP_PORT_RESET_END_TIMEOUT			15
> > +#define XELPDP_REFCLK_ENABLE_TIMEOUT_US			1
> > +
> > +#define _XELPDP_PORT_BUF_CTL1_LN0_A			0x64004
> > +#define _XELPDP_PORT_BUF_CTL1_LN0_B			0x64104
> > +#define _XELPDP_PORT_BUF_CTL1_LN0_USBC1
> 	0x16F200
> > +#define _XELPDP_PORT_BUF_CTL1_LN0_USBC2
> 	0x16F400
> > +#define _XELPDP_PORT_BUF_CTL1_LN0_USBC3
> 	0x16F600
> > +#define _XELPDP_PORT_BUF_CTL1_LN0_USBC4
> 	0x16F800
> > +#define _XELPDP_PORT_BUF_CTL1(port)			(_PICK(port, \
> > +							[PORT_A] =
> _XELPDP_PORT_BUF_CTL1_LN0_A, \
> > +							[PORT_B] =
> _XELPDP_PORT_BUF_CTL1_LN0_B, \
> > +							[PORT_TC1] =
> _XELPDP_PORT_BUF_CTL1_LN0_USBC1, \
> > +							[PORT_TC2] =
> _XELPDP_PORT_BUF_CTL1_LN0_USBC2, \
> > +							[PORT_TC3] =
> _XELPDP_PORT_BUF_CTL1_LN0_USBC3, \
> > +							[PORT_TC4] =
> _XELPDP_PORT_BUF_CTL1_LN0_USBC4))
> > +
> > +#define XELPDP_PORT_BUF_CTL1(port)
> 	_MMIO(_XELPDP_PORT_BUF_CTL1(port))
> > +#define  XELPDP_PORT_BUF_SOC_PHY_READY
> 	REG_BIT(24)
> > +#define  XELPDP_PORT_REVERSAL				REG_BIT(16)
> > +#define  XELPDP_PORT_WIDTH_MASK
> 	REG_GENMASK(3, 1)
> > +#define  XELPDP_PORT_WIDTH(val)
> 	REG_FIELD_PREP(XELPDP_PORT_WIDTH_MASK, val)
> > +
> > +#define XELPDP_PORT_BUF_CTL2(port)
> 	_MMIO(_XELPDP_PORT_BUF_CTL1(port) + 4)
> > +#define  XELPDP_LANE0_PIPE_RESET			REG_BIT(31)
> > +#define  XELPDP_LANE1_PIPE_RESET			REG_BIT(30)
> > +#define  XELPDP_LANE0_PHY_CURRENT_STATUS		REG_BIT(29)
> > +#define  XELPDP_LANE1_PHY_CURRENT_STATUS		REG_BIT(28)
> > +#define  XELPDP_LANE0_POWERDOWN_UPDATE
> 	REG_BIT(25)
> > +#define  XELPDP_LANE1_POWERDOWN_UPDATE
> 	REG_BIT(24)
> > +#define  XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK
> 	REG_GENMASK(23, 20)
> > +#define  XELPDP_LANE0_POWERDOWN_NEW_STATE(val)
> 	REG_FIELD_PREP(XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK,
> val)
> > +#define  XELPDP_LANE1_POWERDOWN_NEW_STATE_MASK
> 	REG_GENMASK(19, 16)
> > +#define  XELPDP_LANE1_POWERDOWN_NEW_STATE(val)
> 	REG_FIELD_PREP(XELPDP_LANE1_POWERDOWN_NEW_STATE_MASK,
> val)
> > +#define  XELPDP_POWER_STATE_READY_MASK
> 	REG_GENMASK(7, 4)
> > +#define  XELPDP_POWER_STATE_READY(val)
> 	REG_FIELD_PREP(XELPDP_POWER_STATE_READY_MASK, val)
> > +
> > +#define XELPDP_PORT_BUF_CTL3(port)
> 	_MMIO(_XELPDP_PORT_BUF_CTL1(port) + 8)
> > +#define  XELPDP_PLL_LANE_STAGGERING_DELAY_MASK
> 	REG_GENMASK(15, 8)
> > +#define  XELPDP_PLL_LANE_STAGGERING_DELAY(val)
> 	REG_FIELD_PREP(XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, val)
> > +#define  XELPDP_POWER_STATE_ACTIVE_MASK
> 	REG_GENMASK(3, 0)
> > +#define  XELPDP_POWER_STATE_ACTIVE(val)
> 	REG_FIELD_PREP(XELPDP_POWER_STATE_ACTIVE_MASK, val)
> > +
> > +#define _XELPDP_PORT_CLOCK_CTL_A			0x640E0
> > +#define _XELPDP_PORT_CLOCK_CTL_B			0x641E0
> > +#define _XELPDP_PORT_CLOCK_CTL_USBC1			0x16F260
> > +#define _XELPDP_PORT_CLOCK_CTL_USBC2			0x16F460
> > +#define _XELPDP_PORT_CLOCK_CTL_USBC3			0x16F660
> > +#define _XELPDP_PORT_CLOCK_CTL_USBC4			0x16F860
> > +#define XELPDP_PORT_CLOCK_CTL(port)
> 	_MMIO(_PICK(port, \
> > +							[PORT_A] =
> _XELPDP_PORT_CLOCK_CTL_A, \
> > +							[PORT_B] =
> _XELPDP_PORT_CLOCK_CTL_B, \
> > +							[PORT_TC1] =
> _XELPDP_PORT_CLOCK_CTL_USBC1, \
> > +							[PORT_TC2] =
> _XELPDP_PORT_CLOCK_CTL_USBC2, \
> > +							[PORT_TC3] =
> _XELPDP_PORT_CLOCK_CTL_USBC3, \
> > +							[PORT_TC4] =
> _XELPDP_PORT_CLOCK_CTL_USBC4))
> > +
> > +#define XELPDP_LANE0_PCLK_PLL_REQUEST			REG_BIT(31)
> > +#define XELPDP_LANE0_PCLK_PLL_ACK			REG_BIT(30)
> > +#define XELPDP_LANE0_PCLK_REFCLK_REQUEST		REG_BIT(29)
> > +#define XELPDP_LANE0_PCLK_REFCLK_ACK			REG_BIT(28)
> > +#define XELPDP_LANE1_PCLK_PLL_REQUEST			REG_BIT(27)
> > +#define XELPDP_LANE1_PCLK_PLL_ACK			REG_BIT(26)
> > +#define XELPDP_LANE1_PCLK_REFCLK_REQUEST		REG_BIT(25)
> > +#define XELPDP_LANE1_PCLK_REFCLK_ACK			REG_BIT(24)
> > +#define XELPDP_DDI_CLOCK_SELECT_MASK
> 	REG_GENMASK(15, 12)
> > +#define XELPDP_DDI_CLOCK_SELECT(val)
> 	REG_FIELD_PREP(XELPDP_DDI_CLOCK_SELECT_MASK, val)
> > +#define XELPDP_DDI_CLOCK_SELECT_NONE			0x0
> > +#define XELPDP_DDI_CLOCK_SELECT_MAXPCLK			0x8
> > +#define XELPDP_FORWARD_CLOCK_UNGATE			REG_BIT(10)
> > +#define XELPDP_LANE1_PHY_CLOCK_SELECT			REG_BIT(8)
> > +#define XELPDP_SSC_ENABLE_PLLA				REG_BIT(1)
> > +#define XELPDP_SSC_ENABLE_PLLB				REG_BIT(0)
> > +
> >  #endif /* _I915_REG_H_ */
> 
> --
> Jani Nikula, Intel Open Source Graphics Center

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Intel-gfx] [PATCH 3/5] drm/i915/mtl: Add support for C10 phy programming
  2022-10-14 12:44     ` Kahola, Mika
@ 2022-10-18 10:39       ` Jani Nikula
  0 siblings, 0 replies; 17+ messages in thread
From: Jani Nikula @ 2022-10-18 10:39 UTC (permalink / raw)
  To: Kahola, Mika, intel-gfx

On Fri, 14 Oct 2022, "Kahola, Mika" <mika.kahola@intel.com> wrote:
> Maybe these could be moved into intel_cx0_reg_defs.h file?

Register definitions to intel_cx0_regs.h. See

$ find drivers/gpu/drm/i915/ -name "*_regs.h"

Any common helpers such as REG_FIELD_GET8() and friends to
i915_reg_defs.h where we already have some other sized helpers.

BR,
Jani.


-- 
Jani Nikula, Intel Open Source Graphics Center

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [Intel-gfx] [PATCH 1/5] drm/i915/mtl: Add Support for C10, C20 PHY Message Bus
  2022-09-29 11:16 [Intel-gfx] [PATCH 0/5] drm/i915/mtl: Add C10 support Mika Kahola
@ 2022-09-29 11:16 ` Mika Kahola
  0 siblings, 0 replies; 17+ messages in thread
From: Mika Kahola @ 2022-09-29 11:16 UTC (permalink / raw)
  To: intel-gfx

From: Radhakrishna Sripada <radhakrishna.sripada@intel.com>

XELPDP has C10 and C20 phys from Synopsys to drive displays. Each phy
has a dedicated PIPE 5.2 Message bus for configuration. This message
bus is used to configure the phy internal registers.

Bspec: 64599, 65100, 65101, 67610, 67636

Cc: Mika Kahola <mika.kahola@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Uma Shankar <uma.shankar@intel.com>
Signed-off-by: Radhakrishna Sripada <radhakrishna.sripada@intel.com>
Signed-off-by: Mika Kahola <mika.kahola@intel.com> (v4)
---
 drivers/gpu/drm/i915/Makefile                |    1 +
 drivers/gpu/drm/i915/display/intel_cx0_phy.c |  179 +
 drivers/gpu/drm/i915/i915_reg.h              |   59 +
 drivers/gpu/drm/i915/intel_pm.c              | 5066 ------------------
 4 files changed, 239 insertions(+), 5066 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/display/intel_cx0_phy.c
 delete mode 100644 drivers/gpu/drm/i915/intel_pm.c

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index a26edcdadc21..994f87a12782 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -279,6 +279,7 @@ i915-y += \
 	display/icl_dsi.o \
 	display/intel_backlight.o \
 	display/intel_crt.o \
+	display/intel_cx0_phy.o \
 	display/intel_ddi.o \
 	display/intel_ddi_buf_trans.o \
 	display/intel_display_trace.o \
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
new file mode 100644
index 000000000000..7930b0255cfa
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "intel_de.h"
+#include "intel_uncore.h"
+
+static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, int lane)
+{
+	enum phy phy = intel_port_to_phy(i915, port);
+
+	/* Bring the phy to idle. */
+	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+		       XELPDP_PORT_M2P_TRANSACTION_RESET);
+
+	/* Wait for Idle Clear. */
+	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+				    XELPDP_PORT_M2P_TRANSACTION_RESET,
+				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+		drm_err_once(&i915->drm, "Failed to bring PHY %c to idle. \n", phy_name(phy));
+		return;
+	}
+
+	intel_de_write(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane), ~0);
+	return;
+}
+
+__maybe_unused static u8 intel_cx0_read(struct drm_i915_private *i915, enum port port,
+			 int lane, u16 addr)
+{
+	enum phy phy = intel_port_to_phy(i915, port);
+	u32 val = 0;
+	int attempts = 0;
+
+retry:
+	if (attempts == 3) {
+		drm_err_once(&i915->drm, "PHY %c Read %04x failed after %d retries. Status: 0x%x\n", phy_name(phy), addr, attempts, val ?: 0);
+		return 0;
+	}
+
+	/* Wait for pending transactions.*/
+	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+				    XELPDP_PORT_M2P_TRANSACTION_PENDING,
+				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+		drm_dbg(&i915->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));
+		attempts++;
+		intel_cx0_bus_reset(i915, port, lane);
+		goto retry;
+	}
+
+	/* Issue the read command. */
+	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+		       XELPDP_PORT_M2P_TRANSACTION_PENDING |
+		       XELPDP_PORT_M2P_COMMAND_READ |
+		       XELPDP_PORT_M2P_ADDRESS(addr));
+
+	/* Wait for response ready. And read response.*/
+	if (__intel_wait_for_register(&i915->uncore,
+				      XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
+				      XELPDP_PORT_P2M_RESPONSE_READY,
+				      XELPDP_PORT_P2M_RESPONSE_READY,
+				      XELPDP_MSGBUS_TIMEOUT_FAST_US,
+				      XELPDP_MSGBUS_TIMEOUT_SLOW, &val)) {
+		drm_dbg(&i915->drm, "PHY %c Timeout waiting for Read response ACK. Status: 0x%x\n", phy_name(phy), val);
+		attempts++;
+		intel_cx0_bus_reset(i915, port, lane);
+		goto retry;
+	}
+
+	/* Check for error. */
+	if (val & XELPDP_PORT_P2M_ERROR_SET) {
+		drm_dbg(&i915->drm, "PHY %c Error occurred during read command. Status: 0x%x\n", phy_name(phy), val);
+		attempts++;
+		intel_cx0_bus_reset(i915, port, lane);
+		goto retry;
+	}
+
+	/* Check for Read Ack. */
+	if (REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, val) !=
+	    XELPDP_PORT_P2M_COMMAND_READ_ACK) {
+		drm_dbg(&i915->drm, "PHY %c Not a Read response. MSGBUS Status: 0x%x.\n", phy_name(phy), val);
+		attempts++;
+		intel_cx0_bus_reset(i915, port, lane);
+		goto retry;
+	}
+
+	/* Clear Response Ready flag.*/
+	intel_de_write(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane), ~0);
+	return (u8)REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val);
+}
+
+static int intel_cx0_wait_cwrite_ack(struct drm_i915_private *i915,
+				      enum port port, int lane)
+{
+	enum phy phy = intel_port_to_phy(i915, port);
+	u32 val;
+
+	/* Check for write ack. */
+	if (__intel_wait_for_register(&i915->uncore,
+				      XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
+				      XELPDP_PORT_P2M_RESPONSE_READY,
+				      XELPDP_PORT_P2M_RESPONSE_READY,
+				      XELPDP_MSGBUS_TIMEOUT_FAST_US,
+				      XELPDP_MSGBUS_TIMEOUT_SLOW, &val)) {
+		drm_dbg(&i915->drm, "PHY %c Timeout waiting for Committed message ACK. Status: 0x%x\n", phy_name(phy), val);
+		return -ETIMEDOUT;
+	}
+
+	if ((REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, val) !=
+	     XELPDP_PORT_P2M_COMMAND_WRITE_ACK) || val & XELPDP_PORT_P2M_ERROR_SET) {
+		drm_dbg(&i915->drm, "PHY %c Unexpected ACK received. MSGBUS STATUS: 0x%x.\n", phy_name(phy), val);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+__maybe_unused static void intel_cx0_write(struct drm_i915_private *i915, enum port port,
+			    int lane, u16 addr, u8 data, bool committed)
+{
+	enum phy phy = intel_port_to_phy(i915, port);
+	int attempts = 0;
+
+retry:
+	if (attempts == 3) {
+		drm_err_once(&i915->drm, "PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, attempts);
+		return;
+	}
+
+	/* Wait for pending transactions.*/
+	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+				    XELPDP_PORT_M2P_TRANSACTION_PENDING,
+				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+		drm_dbg(&i915->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));
+		attempts++;
+		intel_cx0_bus_reset(i915, port, lane);
+		goto retry;
+	}
+
+	/* Issue the write command. */
+	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+		       XELPDP_PORT_M2P_TRANSACTION_PENDING |
+		       (committed ? XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED :
+		       XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED) |
+		       XELPDP_PORT_M2P_DATA(data) |
+		       XELPDP_PORT_M2P_ADDRESS(addr));
+
+	/* Check for error. */
+	if (committed) {
+		if (intel_cx0_wait_cwrite_ack(i915, port, lane) < 0) {
+			attempts++;
+			intel_cx0_bus_reset(i915, port, lane);
+			goto retry;
+		}
+	} else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(phy, lane)) &
+			    XELPDP_PORT_P2M_ERROR_SET)) {
+		drm_dbg(&i915->drm, "PHY %c Error occurred during write command.\n", phy_name(phy));
+		attempts++;
+		intel_cx0_bus_reset(i915, port, lane);
+		goto retry;
+	}
+
+	intel_de_write(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane), ~0);
+
+	return;
+}
+
+__maybe_unused static void intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
+			  int lane, u16 addr, u8 clear, u8 set, bool committed)
+{
+	u8 old, val;
+
+	old = intel_cx0_read(i915, port, lane, addr);
+	val = (old & ~clear) | set;
+
+	if (val != old)
+		intel_cx0_write(i915, port, lane, addr, val, committed);
+}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5003a5ffbc6a..9b98d37de95c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -8375,4 +8375,63 @@ enum skl_power_gate {
 
 #define MTL_MEDIA_GSI_BASE		0x380000
 
+#define PUNIT_MMIO_CR_POC_STRAPS	_MMIO(0x281078)
+#define   NUM_TILES_MASK		REG_GENMASK(1, 0)
+#define   CD_ALIVE			REG_BIT(2)
+#define   SOCKET_ID_MASK		REG_GENMASK(7, 3)
+
+/* Define the BAR and offset for the accelerator fabric CSRs */
+#define CD_BASE_OFFSET 0x291000
+#define CD_BAR_SIZE (256 * 1024)
+
+/*
+ * In general, the i915 should not touch the IAF registers.  The registers
+ * will be passed as an IO resource via the MFD interface.  However, it
+ * is necessary to put the IRQ bits in a known state, before the MFD cell
+ * is registered.
+ *
+ * So define these registers for i915 usage.
+ */
+#define CPORT_MBDB_CSRS (CD_BASE_OFFSET + 0x6000)
+#define CPORT_MBDB_CSRS_END (CPORT_MBDB_CSRS + 0x1000)
+#define CPORT_MBDB_INT_ENABLE_MASK _MMIO(CPORT_MBDB_CSRS + 0x8)
+
+#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A		0x64040
+#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B		0x64140
+#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1		0x16F240
+#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2		0x16F440
+#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC3		0x16F640
+#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC4		0x16F840
+#define _XELPDP_PORT_M2P_MSGBUS_CTL(port, lane)		(_PICK(port, \
+							[PORT_A] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A, \
+							[PORT_B] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B, \
+							[PORT_TC1] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1, \
+							[PORT_TC2] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2, \
+							[PORT_TC3] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC3, \
+							[PORT_TC4] = _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC4) + ((lane) * 4))
+
+#define XELPDP_PORT_M2P_MSGBUS_CTL(port, lane)		_MMIO(_XELPDP_PORT_M2P_MSGBUS_CTL(port, lane))
+#define  XELPDP_PORT_M2P_TRANSACTION_PENDING		REG_BIT(31)
+#define  XELPDP_PORT_M2P_COMMAND_TYPE_MASK		REG_GENMASK(30, 27)
+#define  XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED	REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x1)
+#define  XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED	REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x2)
+#define  XELPDP_PORT_M2P_COMMAND_READ			REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x3)
+#define  XELPDP_PORT_M2P_DATA_MASK			REG_GENMASK(23, 16)
+#define  XELPDP_PORT_M2P_DATA(val)			REG_FIELD_PREP(XELPDP_PORT_M2P_DATA_MASK, val)
+#define  XELPDP_PORT_M2P_TRANSACTION_RESET		REG_BIT(15)
+#define  XELPDP_PORT_M2P_ADDRESS_MASK			REG_GENMASK(11, 0)
+#define  XELPDP_PORT_M2P_ADDRESS(val)			REG_FIELD_PREP(XELPDP_PORT_M2P_ADDRESS_MASK, val)
+
+#define XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane)	_MMIO(_XELPDP_PORT_M2P_MSGBUS_CTL(port, lane) + 8)
+#define  XELPDP_PORT_P2M_RESPONSE_READY			REG_BIT(31)
+#define  XELPDP_PORT_P2M_COMMAND_TYPE_MASK		REG_GENMASK(30, 27)
+#define  XELPDP_PORT_P2M_COMMAND_READ_ACK		0x4
+#define  XELPDP_PORT_P2M_COMMAND_WRITE_ACK		0x5
+#define  XELPDP_PORT_P2M_DATA_MASK			REG_GENMASK(23, 16)
+#define  XELPDP_PORT_P2M_DATA(val)			REG_FIELD_PREP(XELPDP_PORT_P2M_DATA_MASK, val)
+#define  XELPDP_PORT_P2M_ERROR_SET			REG_BIT(15)
+
+#define  XELPDP_MSGBUS_TIMEOUT_SLOW			1
+#define  XELPDP_MSGBUS_TIMEOUT_FAST_US			2
+
 #endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
deleted file mode 100644
index 2595ec5aeb77..000000000000
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ /dev/null
@@ -1,5066 +0,0 @@
-/*
- * Copyright © 2012 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- *    Eugeni Dodonov <eugeni.dodonov@intel.com>
- *
- */
-
-#include "display/intel_de.h"
-#include "display/intel_display_trace.h"
-#include "display/skl_watermark.h"
-
-#include "gt/intel_engine_regs.h"
-#include "gt/intel_gt_regs.h"
-
-#include "i915_drv.h"
-#include "intel_mchbar_regs.h"
-#include "intel_pm.h"
-#include "vlv_sideband.h"
-
-struct drm_i915_clock_gating_funcs {
-	void (*init_clock_gating)(struct drm_i915_private *i915);
-};
-
-/* used in computing the new watermarks state */
-struct intel_wm_config {
-	unsigned int num_pipes_active;
-	bool sprites_enabled;
-	bool sprites_scaled;
-};
-
-static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	if (HAS_LLC(dev_priv)) {
-		/*
-		 * WaCompressedResourceDisplayNewHashMode:skl,kbl
-		 * Display WA #0390: skl,kbl
-		 *
-		 * Must match Sampler, Pixel Back End, and Media. See
-		 * WaCompressedResourceSamplerPbeMediaNewHashMode.
-		 */
-		intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
-			   intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) |
-			   SKL_DE_COMPRESSED_HASH_MODE);
-	}
-
-	/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
-	intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
-		   intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
-
-	/* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
-	intel_uncore_write(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
-		   intel_uncore_read(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
-
-	/*
-	 * WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl
-	 * Display WA #0859: skl,bxt,kbl,glk,cfl
-	 */
-	intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
-		   DISP_FBC_MEMORY_WAKE);
-}
-
-static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	gen9_init_clock_gating(dev_priv);
-
-	/* WaDisableSDEUnitClockGating:bxt */
-	intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
-		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
-
-	/*
-	 * FIXME:
-	 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
-	 */
-	intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
-		   GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
-
-	/*
-	 * Wa: Backlight PWM may stop in the asserted state, causing backlight
-	 * to stay fully on.
-	 */
-	intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
-		   PWM1_GATING_DIS | PWM2_GATING_DIS);
-
-	/*
-	 * Lower the display internal timeout.
-	 * This is needed to avoid any hard hangs when DSI port PLL
-	 * is off and a MMIO access is attempted by any privilege
-	 * application, using batch buffers or any other means.
-	 */
-	intel_uncore_write(&dev_priv->uncore, RM_TIMEOUT, MMIO_TIMEOUT_US(950));
-
-	/*
-	 * WaFbcTurnOffFbcWatermark:bxt
-	 * Display WA #0562: bxt
-	 */
-	intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
-		   DISP_FBC_WM_DIS);
-
-	/*
-	 * WaFbcHighMemBwCorruptionAvoidance:bxt
-	 * Display WA #0883: bxt
-	 */
-	intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
-			   intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) |
-			   DPFC_DISABLE_DUMMY0);
-}
-
-static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	gen9_init_clock_gating(dev_priv);
-
-	/*
-	 * WaDisablePWMClockGating:glk
-	 * Backlight PWM may stop in the asserted state, causing backlight
-	 * to stay fully on.
-	 */
-	intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
-		   PWM1_GATING_DIS | PWM2_GATING_DIS);
-}
-
-static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
-{
-	u32 tmp;
-
-	tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG);
-
-	switch (tmp & CLKCFG_FSB_MASK) {
-	case CLKCFG_FSB_533:
-		dev_priv->fsb_freq = 533; /* 133*4 */
-		break;
-	case CLKCFG_FSB_800:
-		dev_priv->fsb_freq = 800; /* 200*4 */
-		break;
-	case CLKCFG_FSB_667:
-		dev_priv->fsb_freq =  667; /* 167*4 */
-		break;
-	case CLKCFG_FSB_400:
-		dev_priv->fsb_freq = 400; /* 100*4 */
-		break;
-	}
-
-	switch (tmp & CLKCFG_MEM_MASK) {
-	case CLKCFG_MEM_533:
-		dev_priv->mem_freq = 533;
-		break;
-	case CLKCFG_MEM_667:
-		dev_priv->mem_freq = 667;
-		break;
-	case CLKCFG_MEM_800:
-		dev_priv->mem_freq = 800;
-		break;
-	}
-
-	/* detect pineview DDR3 setting */
-	tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL);
-	dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
-}
-
-static void ilk_get_mem_freq(struct drm_i915_private *dev_priv)
-{
-	u16 ddrpll, csipll;
-
-	ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
-	csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
-
-	switch (ddrpll & 0xff) {
-	case 0xc:
-		dev_priv->mem_freq = 800;
-		break;
-	case 0x10:
-		dev_priv->mem_freq = 1066;
-		break;
-	case 0x14:
-		dev_priv->mem_freq = 1333;
-		break;
-	case 0x18:
-		dev_priv->mem_freq = 1600;
-		break;
-	default:
-		drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
-			ddrpll & 0xff);
-		dev_priv->mem_freq = 0;
-		break;
-	}
-
-	switch (csipll & 0x3ff) {
-	case 0x00c:
-		dev_priv->fsb_freq = 3200;
-		break;
-	case 0x00e:
-		dev_priv->fsb_freq = 3733;
-		break;
-	case 0x010:
-		dev_priv->fsb_freq = 4266;
-		break;
-	case 0x012:
-		dev_priv->fsb_freq = 4800;
-		break;
-	case 0x014:
-		dev_priv->fsb_freq = 5333;
-		break;
-	case 0x016:
-		dev_priv->fsb_freq = 5866;
-		break;
-	case 0x018:
-		dev_priv->fsb_freq = 6400;
-		break;
-	default:
-		drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
-			csipll & 0x3ff);
-		dev_priv->fsb_freq = 0;
-		break;
-	}
-}
-
-static const struct cxsr_latency cxsr_latency_table[] = {
-	{1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
-	{1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
-	{1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
-	{1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
-	{1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
-
-	{1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
-	{1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
-	{1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
-	{1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
-	{1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
-
-	{1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
-	{1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
-	{1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
-	{1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
-	{1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
-
-	{0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
-	{0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
-	{0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
-	{0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
-	{0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
-
-	{0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
-	{0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
-	{0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
-	{0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
-	{0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
-
-	{0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
-	{0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
-	{0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
-	{0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
-	{0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
-};
-
-static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
-							 bool is_ddr3,
-							 int fsb,
-							 int mem)
-{
-	const struct cxsr_latency *latency;
-	int i;
-
-	if (fsb == 0 || mem == 0)
-		return NULL;
-
-	for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
-		latency = &cxsr_latency_table[i];
-		if (is_desktop == latency->is_desktop &&
-		    is_ddr3 == latency->is_ddr3 &&
-		    fsb == latency->fsb_freq && mem == latency->mem_freq)
-			return latency;
-	}
-
-	DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
-
-	return NULL;
-}
-
-static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
-{
-	u32 val;
-
-	vlv_punit_get(dev_priv);
-
-	val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
-	if (enable)
-		val &= ~FORCE_DDR_HIGH_FREQ;
-	else
-		val |= FORCE_DDR_HIGH_FREQ;
-	val &= ~FORCE_DDR_LOW_FREQ;
-	val |= FORCE_DDR_FREQ_REQ_ACK;
-	vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
-
-	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
-		      FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
-		drm_err(&dev_priv->drm,
-			"timed out waiting for Punit DDR DVFS request\n");
-
-	vlv_punit_put(dev_priv);
-}
-
-static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
-{
-	u32 val;
-
-	vlv_punit_get(dev_priv);
-
-	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
-	if (enable)
-		val |= DSP_MAXFIFO_PM5_ENABLE;
-	else
-		val &= ~DSP_MAXFIFO_PM5_ENABLE;
-	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
-
-	vlv_punit_put(dev_priv);
-}
-
-#define FW_WM(value, plane) \
-	(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
-
-static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
-{
-	bool was_enabled;
-	u32 val;
-
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
-		intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
-		intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
-	} else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
-		was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
-		intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
-		intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
-	} else if (IS_PINEVIEW(dev_priv)) {
-		val = intel_uncore_read(&dev_priv->uncore, DSPFW3);
-		was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
-		if (enable)
-			val |= PINEVIEW_SELF_REFRESH_EN;
-		else
-			val &= ~PINEVIEW_SELF_REFRESH_EN;
-		intel_uncore_write(&dev_priv->uncore, DSPFW3, val);
-		intel_uncore_posting_read(&dev_priv->uncore, DSPFW3);
-	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
-		was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
-		val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
-			       _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
-		intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
-		intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
-	} else if (IS_I915GM(dev_priv)) {
-		/*
-		 * FIXME can't find a bit like this for 915G, and
-		 * and yet it does have the related watermark in
-		 * FW_BLC_SELF. What's going on?
-		 */
-		was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
-		val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
-			       _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
-		intel_uncore_write(&dev_priv->uncore, INSTPM, val);
-		intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
-	} else {
-		return false;
-	}
-
-	trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
-
-	drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
-		    str_enabled_disabled(enable),
-		    str_enabled_disabled(was_enabled));
-
-	return was_enabled;
-}
-
-/**
- * intel_set_memory_cxsr - Configure CxSR state
- * @dev_priv: i915 device
- * @enable: Allow vs. disallow CxSR
- *
- * Allow or disallow the system to enter a special CxSR
- * (C-state self refresh) state. What typically happens in CxSR mode
- * is that several display FIFOs may get combined into a single larger
- * FIFO for a particular plane (so called max FIFO mode) to allow the
- * system to defer memory fetches longer, and the memory will enter
- * self refresh.
- *
- * Note that enabling CxSR does not guarantee that the system enter
- * this special mode, nor does it guarantee that the system stays
- * in that mode once entered. So this just allows/disallows the system
- * to autonomously utilize the CxSR mode. Other factors such as core
- * C-states will affect when/if the system actually enters/exits the
- * CxSR mode.
- *
- * Note that on VLV/CHV this actually only controls the max FIFO mode,
- * and the system is free to enter/exit memory self refresh at any time
- * even when the use of CxSR has been disallowed.
- *
- * While the system is actually in the CxSR/max FIFO mode, some plane
- * control registers will not get latched on vblank. Thus in order to
- * guarantee the system will respond to changes in the plane registers
- * we must always disallow CxSR prior to making changes to those registers.
- * Unfortunately the system will re-evaluate the CxSR conditions at
- * frame start which happens after vblank start (which is when the plane
- * registers would get latched), so we can't proceed with the plane update
- * during the same frame where we disallowed CxSR.
- *
- * Certain platforms also have a deeper HPLL SR mode. Fortunately the
- * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
- * the hardware w.r.t. HPLL SR when writing to plane registers.
- * Disallowing just CxSR is sufficient.
- */
-bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
-{
-	bool ret;
-
-	mutex_lock(&dev_priv->display.wm.wm_mutex);
-	ret = _intel_set_memory_cxsr(dev_priv, enable);
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		dev_priv->display.wm.vlv.cxsr = enable;
-	else if (IS_G4X(dev_priv))
-		dev_priv->display.wm.g4x.cxsr = enable;
-	mutex_unlock(&dev_priv->display.wm.wm_mutex);
-
-	return ret;
-}
-
-/*
- * Latency for FIFO fetches is dependent on several factors:
- *   - memory configuration (speed, channels)
- *   - chipset
- *   - current MCH state
- * It can be fairly high in some situations, so here we assume a fairly
- * pessimal value.  It's a tradeoff between extra memory fetches (if we
- * set this value too high, the FIFO will fetch frequently to stay full)
- * and power consumption (set it too low to save power and we might see
- * FIFO underruns and display "flicker").
- *
- * A value of 5us seems to be a good balance; safe for very low end
- * platforms but not overly aggressive on lower latency configs.
- */
-static const int pessimal_latency_ns = 5000;
-
-#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
-	((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
-
-static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
-{
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
-	enum pipe pipe = crtc->pipe;
-	int sprite0_start, sprite1_start;
-	u32 dsparb, dsparb2, dsparb3;
-
-	switch (pipe) {
-	case PIPE_A:
-		dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
-		dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
-		sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
-		sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
-		break;
-	case PIPE_B:
-		dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
-		dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
-		sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
-		sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
-		break;
-	case PIPE_C:
-		dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
-		dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
-		sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
-		sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
-		break;
-	default:
-		MISSING_CASE(pipe);
-		return;
-	}
-
-	fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
-	fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
-	fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
-	fifo_state->plane[PLANE_CURSOR] = 63;
-}
-
-static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
-			      enum i9xx_plane_id i9xx_plane)
-{
-	u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
-	int size;
-
-	size = dsparb & 0x7f;
-	if (i9xx_plane == PLANE_B)
-		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
-
-	drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
-		    dsparb, plane_name(i9xx_plane), size);
-
-	return size;
-}
-
-static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
-			      enum i9xx_plane_id i9xx_plane)
-{
-	u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
-	int size;
-
-	size = dsparb & 0x1ff;
-	if (i9xx_plane == PLANE_B)
-		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
-	size >>= 1; /* Convert to cachelines */
-
-	drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
-		    dsparb, plane_name(i9xx_plane), size);
-
-	return size;
-}
-
-static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
-			      enum i9xx_plane_id i9xx_plane)
-{
-	u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
-	int size;
-
-	size = dsparb & 0x7f;
-	size >>= 2; /* Convert to cachelines */
-
-	drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
-		    dsparb, plane_name(i9xx_plane), size);
-
-	return size;
-}
-
-/* Pineview has different values for various configs */
-static const struct intel_watermark_params pnv_display_wm = {
-	.fifo_size = PINEVIEW_DISPLAY_FIFO,
-	.max_wm = PINEVIEW_MAX_WM,
-	.default_wm = PINEVIEW_DFT_WM,
-	.guard_size = PINEVIEW_GUARD_WM,
-	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params pnv_display_hplloff_wm = {
-	.fifo_size = PINEVIEW_DISPLAY_FIFO,
-	.max_wm = PINEVIEW_MAX_WM,
-	.default_wm = PINEVIEW_DFT_HPLLOFF_WM,
-	.guard_size = PINEVIEW_GUARD_WM,
-	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params pnv_cursor_wm = {
-	.fifo_size = PINEVIEW_CURSOR_FIFO,
-	.max_wm = PINEVIEW_CURSOR_MAX_WM,
-	.default_wm = PINEVIEW_CURSOR_DFT_WM,
-	.guard_size = PINEVIEW_CURSOR_GUARD_WM,
-	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
-	.fifo_size = PINEVIEW_CURSOR_FIFO,
-	.max_wm = PINEVIEW_CURSOR_MAX_WM,
-	.default_wm = PINEVIEW_CURSOR_DFT_WM,
-	.guard_size = PINEVIEW_CURSOR_GUARD_WM,
-	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i965_cursor_wm_info = {
-	.fifo_size = I965_CURSOR_FIFO,
-	.max_wm = I965_CURSOR_MAX_WM,
-	.default_wm = I965_CURSOR_DFT_WM,
-	.guard_size = 2,
-	.cacheline_size = I915_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i945_wm_info = {
-	.fifo_size = I945_FIFO_SIZE,
-	.max_wm = I915_MAX_WM,
-	.default_wm = 1,
-	.guard_size = 2,
-	.cacheline_size = I915_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i915_wm_info = {
-	.fifo_size = I915_FIFO_SIZE,
-	.max_wm = I915_MAX_WM,
-	.default_wm = 1,
-	.guard_size = 2,
-	.cacheline_size = I915_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i830_a_wm_info = {
-	.fifo_size = I855GM_FIFO_SIZE,
-	.max_wm = I915_MAX_WM,
-	.default_wm = 1,
-	.guard_size = 2,
-	.cacheline_size = I830_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i830_bc_wm_info = {
-	.fifo_size = I855GM_FIFO_SIZE,
-	.max_wm = I915_MAX_WM/2,
-	.default_wm = 1,
-	.guard_size = 2,
-	.cacheline_size = I830_FIFO_LINE_SIZE,
-};
-
-static const struct intel_watermark_params i845_wm_info = {
-	.fifo_size = I830_FIFO_SIZE,
-	.max_wm = I915_MAX_WM,
-	.default_wm = 1,
-	.guard_size = 2,
-	.cacheline_size = I830_FIFO_LINE_SIZE,
-};
-
-/**
- * intel_wm_method1 - Method 1 / "small buffer" watermark formula
- * @pixel_rate: Pipe pixel rate in kHz
- * @cpp: Plane bytes per pixel
- * @latency: Memory wakeup latency in 0.1us units
- *
- * Compute the watermark using the method 1 or "small buffer"
- * formula. The caller may additonally add extra cachelines
- * to account for TLB misses and clock crossings.
- *
- * This method is concerned with the short term drain rate
- * of the FIFO, ie. it does not account for blanking periods
- * which would effectively reduce the average drain rate across
- * a longer period. The name "small" refers to the fact the
- * FIFO is relatively small compared to the amount of data
- * fetched.
- *
- * The FIFO level vs. time graph might look something like:
- *
- *   |\   |\
- *   | \  | \
- * __---__---__ (- plane active, _ blanking)
- * -> time
- *
- * or perhaps like this:
- *
- *   |\|\  |\|\
- * __----__----__ (- plane active, _ blanking)
- * -> time
- *
- * Returns:
- * The watermark in bytes
- */
-static unsigned int intel_wm_method1(unsigned int pixel_rate,
-				     unsigned int cpp,
-				     unsigned int latency)
-{
-	u64 ret;
-
-	ret = mul_u32_u32(pixel_rate, cpp * latency);
-	ret = DIV_ROUND_UP_ULL(ret, 10000);
-
-	return ret;
-}
-
-/**
- * intel_wm_method2 - Method 2 / "large buffer" watermark formula
- * @pixel_rate: Pipe pixel rate in kHz
- * @htotal: Pipe horizontal total
- * @width: Plane width in pixels
- * @cpp: Plane bytes per pixel
- * @latency: Memory wakeup latency in 0.1us units
- *
- * Compute the watermark using the method 2 or "large buffer"
- * formula. The caller may additonally add extra cachelines
- * to account for TLB misses and clock crossings.
- *
- * This method is concerned with the long term drain rate
- * of the FIFO, ie. it does account for blanking periods
- * which effectively reduce the average drain rate across
- * a longer period. The name "large" refers to the fact the
- * FIFO is relatively large compared to the amount of data
- * fetched.
- *
- * The FIFO level vs. time graph might look something like:
- *
- *    |\___       |\___
- *    |    \___   |    \___
- *    |        \  |        \
- * __ --__--__--__--__--__--__ (- plane active, _ blanking)
- * -> time
- *
- * Returns:
- * The watermark in bytes
- */
-static unsigned int intel_wm_method2(unsigned int pixel_rate,
-				     unsigned int htotal,
-				     unsigned int width,
-				     unsigned int cpp,
-				     unsigned int latency)
-{
-	unsigned int ret;
-
-	/*
-	 * FIXME remove once all users are computing
-	 * watermarks in the correct place.
-	 */
-	if (WARN_ON_ONCE(htotal == 0))
-		htotal = 1;
-
-	ret = (latency * pixel_rate) / (htotal * 10000);
-	ret = (ret + 1) * width * cpp;
-
-	return ret;
-}
-
-/**
- * intel_calculate_wm - calculate watermark level
- * @pixel_rate: pixel clock
- * @wm: chip FIFO params
- * @fifo_size: size of the FIFO buffer
- * @cpp: bytes per pixel
- * @latency_ns: memory latency for the platform
- *
- * Calculate the watermark level (the level at which the display plane will
- * start fetching from memory again).  Each chip has a different display
- * FIFO size and allocation, so the caller needs to figure that out and pass
- * in the correct intel_watermark_params structure.
- *
- * As the pixel clock runs, the FIFO will be drained at a rate that depends
- * on the pixel size.  When it reaches the watermark level, it'll start
- * fetching FIFO line sized based chunks from memory until the FIFO fills
- * past the watermark point.  If the FIFO drains completely, a FIFO underrun
- * will occur, and a display engine hang could result.
- */
-static unsigned int intel_calculate_wm(int pixel_rate,
-				       const struct intel_watermark_params *wm,
-				       int fifo_size, int cpp,
-				       unsigned int latency_ns)
-{
-	int entries, wm_size;
-
-	/*
-	 * Note: we need to make sure we don't overflow for various clock &
-	 * latency values.
-	 * clocks go from a few thousand to several hundred thousand.
-	 * latency is usually a few thousand
-	 */
-	entries = intel_wm_method1(pixel_rate, cpp,
-				   latency_ns / 100);
-	entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
-		wm->guard_size;
-	DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
-
-	wm_size = fifo_size - entries;
-	DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
-
-	/* Don't promote wm_size to unsigned... */
-	if (wm_size > wm->max_wm)
-		wm_size = wm->max_wm;
-	if (wm_size <= 0)
-		wm_size = wm->default_wm;
-
-	/*
-	 * Bspec seems to indicate that the value shouldn't be lower than
-	 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
-	 * Lets go for 8 which is the burst size since certain platforms
-	 * already use a hardcoded 8 (which is what the spec says should be
-	 * done).
-	 */
-	if (wm_size <= 8)
-		wm_size = 8;
-
-	return wm_size;
-}
-
-static bool is_disabling(int old, int new, int threshold)
-{
-	return old >= threshold && new < threshold;
-}
-
-static bool is_enabling(int old, int new, int threshold)
-{
-	return old < threshold && new >= threshold;
-}
-
-static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
-{
-	return dev_priv->display.wm.max_level + 1;
-}
-
-bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
-			    const struct intel_plane_state *plane_state)
-{
-	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
-
-	/* FIXME check the 'enable' instead */
-	if (!crtc_state->hw.active)
-		return false;
-
-	/*
-	 * Treat cursor with fb as always visible since cursor updates
-	 * can happen faster than the vrefresh rate, and the current
-	 * watermark code doesn't handle that correctly. Cursor updates
-	 * which set/clear the fb or change the cursor size are going
-	 * to get throttled by intel_legacy_cursor_update() to work
-	 * around this problem with the watermark code.
-	 */
-	if (plane->id == PLANE_CURSOR)
-		return plane_state->hw.fb != NULL;
-	else
-		return plane_state->uapi.visible;
-}
-
-static bool intel_crtc_active(struct intel_crtc *crtc)
-{
-	/* Be paranoid as we can arrive here with only partial
-	 * state retrieved from the hardware during setup.
-	 *
-	 * We can ditch the adjusted_mode.crtc_clock check as soon
-	 * as Haswell has gained clock readout/fastboot support.
-	 *
-	 * We can ditch the crtc->primary->state->fb check as soon as we can
-	 * properly reconstruct framebuffers.
-	 *
-	 * FIXME: The intel_crtc->active here should be switched to
-	 * crtc->state->active once we have proper CRTC states wired up
-	 * for atomic.
-	 */
-	return crtc && crtc->active && crtc->base.primary->state->fb &&
-		crtc->config->hw.adjusted_mode.crtc_clock;
-}
-
-static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
-{
-	struct intel_crtc *crtc, *enabled = NULL;
-
-	for_each_intel_crtc(&dev_priv->drm, crtc) {
-		if (intel_crtc_active(crtc)) {
-			if (enabled)
-				return NULL;
-			enabled = crtc;
-		}
-	}
-
-	return enabled;
-}
-
-static void pnv_update_wm(struct drm_i915_private *dev_priv)
-{
-	struct intel_crtc *crtc;
-	const struct cxsr_latency *latency;
-	u32 reg;
-	unsigned int wm;
-
-	latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
-					 dev_priv->is_ddr3,
-					 dev_priv->fsb_freq,
-					 dev_priv->mem_freq);
-	if (!latency) {
-		drm_dbg_kms(&dev_priv->drm,
-			    "Unknown FSB/MEM found, disable CxSR\n");
-		intel_set_memory_cxsr(dev_priv, false);
-		return;
-	}
-
-	crtc = single_enabled_crtc(dev_priv);
-	if (crtc) {
-		const struct drm_framebuffer *fb =
-			crtc->base.primary->state->fb;
-		int pixel_rate = crtc->config->pixel_rate;
-		int cpp = fb->format->cpp[0];
-
-		/* Display SR */
-		wm = intel_calculate_wm(pixel_rate, &pnv_display_wm,
-					pnv_display_wm.fifo_size,
-					cpp, latency->display_sr);
-		reg = intel_uncore_read(&dev_priv->uncore, DSPFW1);
-		reg &= ~DSPFW_SR_MASK;
-		reg |= FW_WM(wm, SR);
-		intel_uncore_write(&dev_priv->uncore, DSPFW1, reg);
-		drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
-
-		/* cursor SR */
-		wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm,
-					pnv_display_wm.fifo_size,
-					4, latency->cursor_sr);
-		reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
-		reg &= ~DSPFW_CURSOR_SR_MASK;
-		reg |= FW_WM(wm, CURSOR_SR);
-		intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
-
-		/* Display HPLL off SR */
-		wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm,
-					pnv_display_hplloff_wm.fifo_size,
-					cpp, latency->display_hpll_disable);
-		reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
-		reg &= ~DSPFW_HPLL_SR_MASK;
-		reg |= FW_WM(wm, HPLL_SR);
-		intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
-
-		/* cursor HPLL off SR */
-		wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm,
-					pnv_display_hplloff_wm.fifo_size,
-					4, latency->cursor_hpll_disable);
-		reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
-		reg &= ~DSPFW_HPLL_CURSOR_MASK;
-		reg |= FW_WM(wm, HPLL_CURSOR);
-		intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
-		drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
-
-		intel_set_memory_cxsr(dev_priv, true);
-	} else {
-		intel_set_memory_cxsr(dev_priv, false);
-	}
-}
-
-/*
- * Documentation says:
- * "If the line size is small, the TLB fetches can get in the way of the
- *  data fetches, causing some lag in the pixel data return which is not
- *  accounted for in the above formulas. The following adjustment only
- *  needs to be applied if eight whole lines fit in the buffer at once.
- *  The WM is adjusted upwards by the difference between the FIFO size
- *  and the size of 8 whole lines. This adjustment is always performed
- *  in the actual pixel depth regardless of whether FBC is enabled or not."
- */
-static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
-{
-	int tlb_miss = fifo_size * 64 - width * cpp * 8;
-
-	return max(0, tlb_miss);
-}
-
-static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
-				const struct g4x_wm_values *wm)
-{
-	enum pipe pipe;
-
-	for_each_pipe(dev_priv, pipe)
-		trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
-
-	intel_uncore_write(&dev_priv->uncore, DSPFW1,
-		   FW_WM(wm->sr.plane, SR) |
-		   FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
-		   FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
-		   FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
-	intel_uncore_write(&dev_priv->uncore, DSPFW2,
-		   (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
-		   FW_WM(wm->sr.fbc, FBC_SR) |
-		   FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
-		   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
-		   FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
-		   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
-	intel_uncore_write(&dev_priv->uncore, DSPFW3,
-		   (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
-		   FW_WM(wm->sr.cursor, CURSOR_SR) |
-		   FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
-		   FW_WM(wm->hpll.plane, HPLL_SR));
-
-	intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
-}
-
-#define FW_WM_VLV(value, plane) \
-	(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
-
-static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
-				const struct vlv_wm_values *wm)
-{
-	enum pipe pipe;
-
-	for_each_pipe(dev_priv, pipe) {
-		trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
-
-		intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
-			   (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
-			   (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
-			   (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
-			   (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
-	}
-
-	/*
-	 * Zero the (unused) WM1 watermarks, and also clear all the
-	 * high order bits so that there are no out of bounds values
-	 * present in the registers during the reprogramming.
-	 */
-	intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
-	intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
-	intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
-	intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
-	intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
-
-	intel_uncore_write(&dev_priv->uncore, DSPFW1,
-		   FW_WM(wm->sr.plane, SR) |
-		   FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
-		   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
-		   FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
-	intel_uncore_write(&dev_priv->uncore, DSPFW2,
-		   FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
-		   FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
-		   FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
-	intel_uncore_write(&dev_priv->uncore, DSPFW3,
-		   FW_WM(wm->sr.cursor, CURSOR_SR));
-
-	if (IS_CHERRYVIEW(dev_priv)) {
-		intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
-			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
-			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
-		intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
-			   FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
-			   FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
-		intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
-			   FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
-			   FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
-		intel_uncore_write(&dev_priv->uncore, DSPHOWM,
-			   FW_WM(wm->sr.plane >> 9, SR_HI) |
-			   FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
-			   FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
-			   FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
-			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
-			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
-			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
-			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
-			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
-			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
-	} else {
-		intel_uncore_write(&dev_priv->uncore, DSPFW7,
-			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
-			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
-		intel_uncore_write(&dev_priv->uncore, DSPHOWM,
-			   FW_WM(wm->sr.plane >> 9, SR_HI) |
-			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
-			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
-			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
-			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
-			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
-			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
-	}
-
-	intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
-}
-
-#undef FW_WM_VLV
-
-static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
-{
-	/* all latencies in usec */
-	dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
-	dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
-	dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
-
-	dev_priv->display.wm.max_level = G4X_WM_LEVEL_HPLL;
-}
-
-static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
-{
-	/*
-	 * DSPCNTR[13] supposedly controls whether the
-	 * primary plane can use the FIFO space otherwise
-	 * reserved for the sprite plane. It's not 100% clear
-	 * what the actual FIFO size is, but it looks like we
-	 * can happily set both primary and sprite watermarks
-	 * up to 127 cachelines. So that would seem to mean
-	 * that either DSPCNTR[13] doesn't do anything, or that
-	 * the total FIFO is >= 256 cachelines in size. Either
-	 * way, we don't seem to have to worry about this
-	 * repartitioning as the maximum watermark value the
-	 * register can hold for each plane is lower than the
-	 * minimum FIFO size.
-	 */
-	switch (plane_id) {
-	case PLANE_CURSOR:
-		return 63;
-	case PLANE_PRIMARY:
-		return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
-	case PLANE_SPRITE0:
-		return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
-	default:
-		MISSING_CASE(plane_id);
-		return 0;
-	}
-}
-
-static int g4x_fbc_fifo_size(int level)
-{
-	switch (level) {
-	case G4X_WM_LEVEL_SR:
-		return 7;
-	case G4X_WM_LEVEL_HPLL:
-		return 15;
-	default:
-		MISSING_CASE(level);
-		return 0;
-	}
-}
-
-static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
-			  const struct intel_plane_state *plane_state,
-			  int level)
-{
-	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	const struct drm_display_mode *pipe_mode =
-		&crtc_state->hw.pipe_mode;
-	unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10;
-	unsigned int pixel_rate, htotal, cpp, width, wm;
-
-	if (latency == 0)
-		return USHRT_MAX;
-
-	if (!intel_wm_plane_visible(crtc_state, plane_state))
-		return 0;
-
-	cpp = plane_state->hw.fb->format->cpp[0];
-
-	/*
-	 * WaUse32BppForSRWM:ctg,elk
-	 *
-	 * The spec fails to list this restriction for the
-	 * HPLL watermark, which seems a little strange.
-	 * Let's use 32bpp for the HPLL watermark as well.
-	 */
-	if (plane->id == PLANE_PRIMARY &&
-	    level != G4X_WM_LEVEL_NORMAL)
-		cpp = max(cpp, 4u);
-
-	pixel_rate = crtc_state->pixel_rate;
-	htotal = pipe_mode->crtc_htotal;
-	width = drm_rect_width(&plane_state->uapi.src) >> 16;
-
-	if (plane->id == PLANE_CURSOR) {
-		wm = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
-	} else if (plane->id == PLANE_PRIMARY &&
-		   level == G4X_WM_LEVEL_NORMAL) {
-		wm = intel_wm_method1(pixel_rate, cpp, latency);
-	} else {
-		unsigned int small, large;
-
-		small = intel_wm_method1(pixel_rate, cpp, latency);
-		large = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
-
-		wm = min(small, large);
-	}
-
-	wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
-			      width, cpp);
-
-	wm = DIV_ROUND_UP(wm, 64) + 2;
-
-	return min_t(unsigned int, wm, USHRT_MAX);
-}
-
-static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
-				 int level, enum plane_id plane_id, u16 value)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-	bool dirty = false;
-
-	for (; level < intel_wm_num_levels(dev_priv); level++) {
-		struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
-
-		dirty |= raw->plane[plane_id] != value;
-		raw->plane[plane_id] = value;
-	}
-
-	return dirty;
-}
-
-static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
-			       int level, u16 value)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-	bool dirty = false;
-
-	/* NORMAL level doesn't have an FBC watermark */
-	level = max(level, G4X_WM_LEVEL_SR);
-
-	for (; level < intel_wm_num_levels(dev_priv); level++) {
-		struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
-
-		dirty |= raw->fbc != value;
-		raw->fbc = value;
-	}
-
-	return dirty;
-}
-
-static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
-			      const struct intel_plane_state *plane_state,
-			      u32 pri_val);
-
-static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
-				     const struct intel_plane_state *plane_state)
-{
-	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-	int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
-	enum plane_id plane_id = plane->id;
-	bool dirty = false;
-	int level;
-
-	if (!intel_wm_plane_visible(crtc_state, plane_state)) {
-		dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
-		if (plane_id == PLANE_PRIMARY)
-			dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
-		goto out;
-	}
-
-	for (level = 0; level < num_levels; level++) {
-		struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
-		int wm, max_wm;
-
-		wm = g4x_compute_wm(crtc_state, plane_state, level);
-		max_wm = g4x_plane_fifo_size(plane_id, level);
-
-		if (wm > max_wm)
-			break;
-
-		dirty |= raw->plane[plane_id] != wm;
-		raw->plane[plane_id] = wm;
-
-		if (plane_id != PLANE_PRIMARY ||
-		    level == G4X_WM_LEVEL_NORMAL)
-			continue;
-
-		wm = ilk_compute_fbc_wm(crtc_state, plane_state,
-					raw->plane[plane_id]);
-		max_wm = g4x_fbc_fifo_size(level);
-
-		/*
-		 * FBC wm is not mandatory as we
-		 * can always just disable its use.
-		 */
-		if (wm > max_wm)
-			wm = USHRT_MAX;
-
-		dirty |= raw->fbc != wm;
-		raw->fbc = wm;
-	}
-
-	/* mark watermarks as invalid */
-	dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
-
-	if (plane_id == PLANE_PRIMARY)
-		dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
-
- out:
-	if (dirty) {
-		drm_dbg_kms(&dev_priv->drm,
-			    "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
-			    plane->base.name,
-			    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
-			    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
-			    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
-
-		if (plane_id == PLANE_PRIMARY)
-			drm_dbg_kms(&dev_priv->drm,
-				    "FBC watermarks: SR=%d, HPLL=%d\n",
-				    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
-				    crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
-	}
-
-	return dirty;
-}
-
-static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
-				      enum plane_id plane_id, int level)
-{
-	const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
-
-	return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
-}
-
-static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
-				     int level)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-
-	if (level > dev_priv->display.wm.max_level)
-		return false;
-
-	return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
-		g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
-		g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
-}
-
-/* mark all levels starting from 'level' as invalid */
-static void g4x_invalidate_wms(struct intel_crtc *crtc,
-			       struct g4x_wm_state *wm_state, int level)
-{
-	if (level <= G4X_WM_LEVEL_NORMAL) {
-		enum plane_id plane_id;
-
-		for_each_plane_id_on_crtc(crtc, plane_id)
-			wm_state->wm.plane[plane_id] = USHRT_MAX;
-	}
-
-	if (level <= G4X_WM_LEVEL_SR) {
-		wm_state->cxsr = false;
-		wm_state->sr.cursor = USHRT_MAX;
-		wm_state->sr.plane = USHRT_MAX;
-		wm_state->sr.fbc = USHRT_MAX;
-	}
-
-	if (level <= G4X_WM_LEVEL_HPLL) {
-		wm_state->hpll_en = false;
-		wm_state->hpll.cursor = USHRT_MAX;
-		wm_state->hpll.plane = USHRT_MAX;
-		wm_state->hpll.fbc = USHRT_MAX;
-	}
-}
-
-static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state,
-			       int level)
-{
-	if (level < G4X_WM_LEVEL_SR)
-		return false;
-
-	if (level >= G4X_WM_LEVEL_SR &&
-	    wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
-		return false;
-
-	if (level >= G4X_WM_LEVEL_HPLL &&
-	    wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
-		return false;
-
-	return true;
-}
-
-static int _g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
-{
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-	struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
-	u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
-	const struct g4x_pipe_wm *raw;
-	enum plane_id plane_id;
-	int level;
-
-	level = G4X_WM_LEVEL_NORMAL;
-	if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
-		goto out;
-
-	raw = &crtc_state->wm.g4x.raw[level];
-	for_each_plane_id_on_crtc(crtc, plane_id)
-		wm_state->wm.plane[plane_id] = raw->plane[plane_id];
-
-	level = G4X_WM_LEVEL_SR;
-	if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
-		goto out;
-
-	raw = &crtc_state->wm.g4x.raw[level];
-	wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
-	wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
-	wm_state->sr.fbc = raw->fbc;
-
-	wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY);
-
-	level = G4X_WM_LEVEL_HPLL;
-	if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
-		goto out;
-
-	raw = &crtc_state->wm.g4x.raw[level];
-	wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
-	wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
-	wm_state->hpll.fbc = raw->fbc;
-
-	wm_state->hpll_en = wm_state->cxsr;
-
-	level++;
-
- out:
-	if (level == G4X_WM_LEVEL_NORMAL)
-		return -EINVAL;
-
-	/* invalidate the higher levels */
-	g4x_invalidate_wms(crtc, wm_state, level);
-
-	/*
-	 * Determine if the FBC watermark(s) can be used. IF
-	 * this isn't the case we prefer to disable the FBC
-	 * watermark(s) rather than disable the SR/HPLL
-	 * level(s) entirely. 'level-1' is the highest valid
-	 * level here.
-	 */
-	wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1);
-
-	return 0;
-}
-
-static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
-			       struct intel_crtc *crtc)
-{
-	struct intel_crtc_state *crtc_state =
-		intel_atomic_get_new_crtc_state(state, crtc);
-	const struct intel_plane_state *old_plane_state;
-	const struct intel_plane_state *new_plane_state;
-	struct intel_plane *plane;
-	unsigned int dirty = 0;
-	int i;
-
-	for_each_oldnew_intel_plane_in_state(state, plane,
-					     old_plane_state,
-					     new_plane_state, i) {
-		if (new_plane_state->hw.crtc != &crtc->base &&
-		    old_plane_state->hw.crtc != &crtc->base)
-			continue;
-
-		if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
-			dirty |= BIT(plane->id);
-	}
-
-	if (!dirty)
-		return 0;
-
-	return _g4x_compute_pipe_wm(crtc_state);
-}
-
-static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
-				       struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_crtc_state *new_crtc_state =
-		intel_atomic_get_new_crtc_state(state, crtc);
-	const struct intel_crtc_state *old_crtc_state =
-		intel_atomic_get_old_crtc_state(state, crtc);
-	struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
-	const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
-	const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
-	enum plane_id plane_id;
-
-	if (!new_crtc_state->hw.active ||
-	    drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
-		*intermediate = *optimal;
-
-		intermediate->cxsr = false;
-		intermediate->hpll_en = false;
-		goto out;
-	}
-
-	intermediate->cxsr = optimal->cxsr && active->cxsr &&
-		!new_crtc_state->disable_cxsr;
-	intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
-		!new_crtc_state->disable_cxsr;
-	intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
-
-	for_each_plane_id_on_crtc(crtc, plane_id) {
-		intermediate->wm.plane[plane_id] =
-			max(optimal->wm.plane[plane_id],
-			    active->wm.plane[plane_id]);
-
-		drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
-			    g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
-	}
-
-	intermediate->sr.plane = max(optimal->sr.plane,
-				     active->sr.plane);
-	intermediate->sr.cursor = max(optimal->sr.cursor,
-				      active->sr.cursor);
-	intermediate->sr.fbc = max(optimal->sr.fbc,
-				   active->sr.fbc);
-
-	intermediate->hpll.plane = max(optimal->hpll.plane,
-				       active->hpll.plane);
-	intermediate->hpll.cursor = max(optimal->hpll.cursor,
-					active->hpll.cursor);
-	intermediate->hpll.fbc = max(optimal->hpll.fbc,
-				     active->hpll.fbc);
-
-	drm_WARN_ON(&dev_priv->drm,
-		    (intermediate->sr.plane >
-		     g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
-		     intermediate->sr.cursor >
-		     g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
-		    intermediate->cxsr);
-	drm_WARN_ON(&dev_priv->drm,
-		    (intermediate->sr.plane >
-		     g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
-		     intermediate->sr.cursor >
-		     g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
-		    intermediate->hpll_en);
-
-	drm_WARN_ON(&dev_priv->drm,
-		    intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
-		    intermediate->fbc_en && intermediate->cxsr);
-	drm_WARN_ON(&dev_priv->drm,
-		    intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
-		    intermediate->fbc_en && intermediate->hpll_en);
-
-out:
-	/*
-	 * If our intermediate WM are identical to the final WM, then we can
-	 * omit the post-vblank programming; only update if it's different.
-	 */
-	if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
-		new_crtc_state->wm.need_postvbl_update = true;
-
-	return 0;
-}
-
-static void g4x_merge_wm(struct drm_i915_private *dev_priv,
-			 struct g4x_wm_values *wm)
-{
-	struct intel_crtc *crtc;
-	int num_active_pipes = 0;
-
-	wm->cxsr = true;
-	wm->hpll_en = true;
-	wm->fbc_en = true;
-
-	for_each_intel_crtc(&dev_priv->drm, crtc) {
-		const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
-
-		if (!crtc->active)
-			continue;
-
-		if (!wm_state->cxsr)
-			wm->cxsr = false;
-		if (!wm_state->hpll_en)
-			wm->hpll_en = false;
-		if (!wm_state->fbc_en)
-			wm->fbc_en = false;
-
-		num_active_pipes++;
-	}
-
-	if (num_active_pipes != 1) {
-		wm->cxsr = false;
-		wm->hpll_en = false;
-		wm->fbc_en = false;
-	}
-
-	for_each_intel_crtc(&dev_priv->drm, crtc) {
-		const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
-		enum pipe pipe = crtc->pipe;
-
-		wm->pipe[pipe] = wm_state->wm;
-		if (crtc->active && wm->cxsr)
-			wm->sr = wm_state->sr;
-		if (crtc->active && wm->hpll_en)
-			wm->hpll = wm_state->hpll;
-	}
-}
-
-static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
-{
-	struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x;
-	struct g4x_wm_values new_wm = {};
-
-	g4x_merge_wm(dev_priv, &new_wm);
-
-	if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
-		return;
-
-	if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
-		_intel_set_memory_cxsr(dev_priv, false);
-
-	g4x_write_wm_values(dev_priv, &new_wm);
-
-	if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
-		_intel_set_memory_cxsr(dev_priv, true);
-
-	*old_wm = new_wm;
-}
-
-static void g4x_initial_watermarks(struct intel_atomic_state *state,
-				   struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	const struct intel_crtc_state *crtc_state =
-		intel_atomic_get_new_crtc_state(state, crtc);
-
-	mutex_lock(&dev_priv->display.wm.wm_mutex);
-	crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
-	g4x_program_watermarks(dev_priv);
-	mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-static void g4x_optimize_watermarks(struct intel_atomic_state *state,
-				    struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	const struct intel_crtc_state *crtc_state =
-		intel_atomic_get_new_crtc_state(state, crtc);
-
-	if (!crtc_state->wm.need_postvbl_update)
-		return;
-
-	mutex_lock(&dev_priv->display.wm.wm_mutex);
-	crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
-	g4x_program_watermarks(dev_priv);
-	mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-/* latency must be in 0.1us units. */
-static unsigned int vlv_wm_method2(unsigned int pixel_rate,
-				   unsigned int htotal,
-				   unsigned int width,
-				   unsigned int cpp,
-				   unsigned int latency)
-{
-	unsigned int ret;
-
-	ret = intel_wm_method2(pixel_rate, htotal,
-			       width, cpp, latency);
-	ret = DIV_ROUND_UP(ret, 64);
-
-	return ret;
-}
-
-static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
-{
-	/* all latencies in usec */
-	dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
-
-	dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM2;
-
-	if (IS_CHERRYVIEW(dev_priv)) {
-		dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
-		dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
-
-		dev_priv->display.wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
-	}
-}
-
-static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
-				const struct intel_plane_state *plane_state,
-				int level)
-{
-	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-	const struct drm_display_mode *pipe_mode =
-		&crtc_state->hw.pipe_mode;
-	unsigned int pixel_rate, htotal, cpp, width, wm;
-
-	if (dev_priv->display.wm.pri_latency[level] == 0)
-		return USHRT_MAX;
-
-	if (!intel_wm_plane_visible(crtc_state, plane_state))
-		return 0;
-
-	cpp = plane_state->hw.fb->format->cpp[0];
-	pixel_rate = crtc_state->pixel_rate;
-	htotal = pipe_mode->crtc_htotal;
-	width = drm_rect_width(&plane_state->uapi.src) >> 16;
-
-	if (plane->id == PLANE_CURSOR) {
-		/*
-		 * FIXME the formula gives values that are
-		 * too big for the cursor FIFO, and hence we
-		 * would never be able to use cursors. For
-		 * now just hardcode the watermark.
-		 */
-		wm = 63;
-	} else {
-		wm = vlv_wm_method2(pixel_rate, htotal, width, cpp,
-				    dev_priv->display.wm.pri_latency[level] * 10);
-	}
-
-	return min_t(unsigned int, wm, USHRT_MAX);
-}
-
-static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
-{
-	return (active_planes & (BIT(PLANE_SPRITE0) |
-				 BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
-}
-
-static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
-{
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	const struct g4x_pipe_wm *raw =
-		&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
-	struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
-	u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
-	int num_active_planes = hweight8(active_planes);
-	const int fifo_size = 511;
-	int fifo_extra, fifo_left = fifo_size;
-	int sprite0_fifo_extra = 0;
-	unsigned int total_rate;
-	enum plane_id plane_id;
-
-	/*
-	 * When enabling sprite0 after sprite1 has already been enabled
-	 * we tend to get an underrun unless sprite0 already has some
-	 * FIFO space allcoated. Hence we always allocate at least one
-	 * cacheline for sprite0 whenever sprite1 is enabled.
-	 *
-	 * All other plane enable sequences appear immune to this problem.
-	 */
-	if (vlv_need_sprite0_fifo_workaround(active_planes))
-		sprite0_fifo_extra = 1;
-
-	total_rate = raw->plane[PLANE_PRIMARY] +
-		raw->plane[PLANE_SPRITE0] +
-		raw->plane[PLANE_SPRITE1] +
-		sprite0_fifo_extra;
-
-	if (total_rate > fifo_size)
-		return -EINVAL;
-
-	if (total_rate == 0)
-		total_rate = 1;
-
-	for_each_plane_id_on_crtc(crtc, plane_id) {
-		unsigned int rate;
-
-		if ((active_planes & BIT(plane_id)) == 0) {
-			fifo_state->plane[plane_id] = 0;
-			continue;
-		}
-
-		rate = raw->plane[plane_id];
-		fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
-		fifo_left -= fifo_state->plane[plane_id];
-	}
-
-	fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
-	fifo_left -= sprite0_fifo_extra;
-
-	fifo_state->plane[PLANE_CURSOR] = 63;
-
-	fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
-
-	/* spread the remainder evenly */
-	for_each_plane_id_on_crtc(crtc, plane_id) {
-		int plane_extra;
-
-		if (fifo_left == 0)
-			break;
-
-		if ((active_planes & BIT(plane_id)) == 0)
-			continue;
-
-		plane_extra = min(fifo_extra, fifo_left);
-		fifo_state->plane[plane_id] += plane_extra;
-		fifo_left -= plane_extra;
-	}
-
-	drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
-
-	/* give it all to the first plane if none are active */
-	if (active_planes == 0) {
-		drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
-		fifo_state->plane[PLANE_PRIMARY] = fifo_left;
-	}
-
-	return 0;
-}
-
-/* mark all levels starting from 'level' as invalid */
-static void vlv_invalidate_wms(struct intel_crtc *crtc,
-			       struct vlv_wm_state *wm_state, int level)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-	for (; level < intel_wm_num_levels(dev_priv); level++) {
-		enum plane_id plane_id;
-
-		for_each_plane_id_on_crtc(crtc, plane_id)
-			wm_state->wm[level].plane[plane_id] = USHRT_MAX;
-
-		wm_state->sr[level].cursor = USHRT_MAX;
-		wm_state->sr[level].plane = USHRT_MAX;
-	}
-}
-
-static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
-{
-	if (wm > fifo_size)
-		return USHRT_MAX;
-	else
-		return fifo_size - wm;
-}
-
-/*
- * Starting from 'level' set all higher
- * levels to 'value' in the "raw" watermarks.
- */
-static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
-				 int level, enum plane_id plane_id, u16 value)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-	int num_levels = intel_wm_num_levels(dev_priv);
-	bool dirty = false;
-
-	for (; level < num_levels; level++) {
-		struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
-
-		dirty |= raw->plane[plane_id] != value;
-		raw->plane[plane_id] = value;
-	}
-
-	return dirty;
-}
-
-static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
-				     const struct intel_plane_state *plane_state)
-{
-	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-	enum plane_id plane_id = plane->id;
-	int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
-	int level;
-	bool dirty = false;
-
-	if (!intel_wm_plane_visible(crtc_state, plane_state)) {
-		dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
-		goto out;
-	}
-
-	for (level = 0; level < num_levels; level++) {
-		struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
-		int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
-		int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
-
-		if (wm > max_wm)
-			break;
-
-		dirty |= raw->plane[plane_id] != wm;
-		raw->plane[plane_id] = wm;
-	}
-
-	/* mark all higher levels as invalid */
-	dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
-
-out:
-	if (dirty)
-		drm_dbg_kms(&dev_priv->drm,
-			    "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
-			    plane->base.name,
-			    crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
-			    crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
-			    crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
-
-	return dirty;
-}
-
-static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
-				      enum plane_id plane_id, int level)
-{
-	const struct g4x_pipe_wm *raw =
-		&crtc_state->wm.vlv.raw[level];
-	const struct vlv_fifo_state *fifo_state =
-		&crtc_state->wm.vlv.fifo_state;
-
-	return raw->plane[plane_id] <= fifo_state->plane[plane_id];
-}
-
-static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
-{
-	return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
-		vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
-		vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
-		vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
-}
-
-static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
-{
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
-	const struct vlv_fifo_state *fifo_state =
-		&crtc_state->wm.vlv.fifo_state;
-	u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
-	int num_active_planes = hweight8(active_planes);
-	enum plane_id plane_id;
-	int level;
-
-	/* initially allow all levels */
-	wm_state->num_levels = intel_wm_num_levels(dev_priv);
-	/*
-	 * Note that enabling cxsr with no primary/sprite planes
-	 * enabled can wedge the pipe. Hence we only allow cxsr
-	 * with exactly one enabled primary/sprite plane.
-	 */
-	wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
-
-	for (level = 0; level < wm_state->num_levels; level++) {
-		const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
-		const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
-
-		if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
-			break;
-
-		for_each_plane_id_on_crtc(crtc, plane_id) {
-			wm_state->wm[level].plane[plane_id] =
-				vlv_invert_wm_value(raw->plane[plane_id],
-						    fifo_state->plane[plane_id]);
-		}
-
-		wm_state->sr[level].plane =
-			vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
-						 raw->plane[PLANE_SPRITE0],
-						 raw->plane[PLANE_SPRITE1]),
-					    sr_fifo_size);
-
-		wm_state->sr[level].cursor =
-			vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
-					    63);
-	}
-
-	if (level == 0)
-		return -EINVAL;
-
-	/* limit to only levels we can actually handle */
-	wm_state->num_levels = level;
-
-	/* invalidate the higher levels */
-	vlv_invalidate_wms(crtc, wm_state, level);
-
-	return 0;
-}
-
-static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
-			       struct intel_crtc *crtc)
-{
-	struct intel_crtc_state *crtc_state =
-		intel_atomic_get_new_crtc_state(state, crtc);
-	bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
-	const struct intel_plane_state *old_plane_state;
-	const struct intel_plane_state *new_plane_state;
-	struct intel_plane *plane;
-	unsigned int dirty = 0;
-	int i;
-
-	for_each_oldnew_intel_plane_in_state(state, plane,
-					     old_plane_state,
-					     new_plane_state, i) {
-		if (new_plane_state->hw.crtc != &crtc->base &&
-		    old_plane_state->hw.crtc != &crtc->base)
-			continue;
-
-		if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
-			dirty |= BIT(plane->id);
-	}
-
-	/*
-	 * DSPARB registers may have been reset due to the
-	 * power well being turned off. Make sure we restore
-	 * them to a consistent state even if no primary/sprite
-	 * planes are initially active. We also force a FIFO
-	 * recomputation so that we are sure to sanitize the
-	 * FIFO setting we took over from the BIOS even if there
-	 * are no active planes on the crtc.
-	 */
-	if (needs_modeset)
-		dirty = ~0;
-
-	if (!dirty)
-		return 0;
-
-	/* cursor changes don't warrant a FIFO recompute */
-	if (dirty & ~BIT(PLANE_CURSOR)) {
-		const struct intel_crtc_state *old_crtc_state =
-			intel_atomic_get_old_crtc_state(state, crtc);
-		const struct vlv_fifo_state *old_fifo_state =
-			&old_crtc_state->wm.vlv.fifo_state;
-		const struct vlv_fifo_state *new_fifo_state =
-			&crtc_state->wm.vlv.fifo_state;
-		int ret;
-
-		ret = vlv_compute_fifo(crtc_state);
-		if (ret)
-			return ret;
-
-		if (needs_modeset ||
-		    memcmp(old_fifo_state, new_fifo_state,
-			   sizeof(*new_fifo_state)) != 0)
-			crtc_state->fifo_changed = true;
-	}
-
-	return _vlv_compute_pipe_wm(crtc_state);
-}
-
-#define VLV_FIFO(plane, value) \
-	(((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
-
-static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
-				   struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_uncore *uncore = &dev_priv->uncore;
-	const struct intel_crtc_state *crtc_state =
-		intel_atomic_get_new_crtc_state(state, crtc);
-	const struct vlv_fifo_state *fifo_state =
-		&crtc_state->wm.vlv.fifo_state;
-	int sprite0_start, sprite1_start, fifo_size;
-	u32 dsparb, dsparb2, dsparb3;
-
-	if (!crtc_state->fifo_changed)
-		return;
-
-	sprite0_start = fifo_state->plane[PLANE_PRIMARY];
-	sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
-	fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
-
-	drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
-	drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
-
-	trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
-
-	/*
-	 * uncore.lock serves a double purpose here. It allows us to
-	 * use the less expensive I915_{READ,WRITE}_FW() functions, and
-	 * it protects the DSPARB registers from getting clobbered by
-	 * parallel updates from multiple pipes.
-	 *
-	 * intel_pipe_update_start() has already disabled interrupts
-	 * for us, so a plain spin_lock() is sufficient here.
-	 */
-	spin_lock(&uncore->lock);
-
-	switch (crtc->pipe) {
-	case PIPE_A:
-		dsparb = intel_uncore_read_fw(uncore, DSPARB);
-		dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
-
-		dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
-			    VLV_FIFO(SPRITEB, 0xff));
-		dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
-			   VLV_FIFO(SPRITEB, sprite1_start));
-
-		dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
-			     VLV_FIFO(SPRITEB_HI, 0x1));
-		dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
-			   VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
-
-		intel_uncore_write_fw(uncore, DSPARB, dsparb);
-		intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
-		break;
-	case PIPE_B:
-		dsparb = intel_uncore_read_fw(uncore, DSPARB);
-		dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
-
-		dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
-			    VLV_FIFO(SPRITED, 0xff));
-		dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
-			   VLV_FIFO(SPRITED, sprite1_start));
-
-		dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
-			     VLV_FIFO(SPRITED_HI, 0xff));
-		dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
-			   VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
-
-		intel_uncore_write_fw(uncore, DSPARB, dsparb);
-		intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
-		break;
-	case PIPE_C:
-		dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
-		dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
-
-		dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
-			     VLV_FIFO(SPRITEF, 0xff));
-		dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
-			    VLV_FIFO(SPRITEF, sprite1_start));
-
-		dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
-			     VLV_FIFO(SPRITEF_HI, 0xff));
-		dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
-			   VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
-
-		intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
-		intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
-		break;
-	default:
-		break;
-	}
-
-	intel_uncore_posting_read_fw(uncore, DSPARB);
-
-	spin_unlock(&uncore->lock);
-}
-
-#undef VLV_FIFO
-
-static int vlv_compute_intermediate_wm(struct intel_atomic_state *state,
-				       struct intel_crtc *crtc)
-{
-	struct intel_crtc_state *new_crtc_state =
-		intel_atomic_get_new_crtc_state(state, crtc);
-	const struct intel_crtc_state *old_crtc_state =
-		intel_atomic_get_old_crtc_state(state, crtc);
-	struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
-	const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
-	const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
-	int level;
-
-	if (!new_crtc_state->hw.active ||
-	    drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
-		*intermediate = *optimal;
-
-		intermediate->cxsr = false;
-		goto out;
-	}
-
-	intermediate->num_levels = min(optimal->num_levels, active->num_levels);
-	intermediate->cxsr = optimal->cxsr && active->cxsr &&
-		!new_crtc_state->disable_cxsr;
-
-	for (level = 0; level < intermediate->num_levels; level++) {
-		enum plane_id plane_id;
-
-		for_each_plane_id_on_crtc(crtc, plane_id) {
-			intermediate->wm[level].plane[plane_id] =
-				min(optimal->wm[level].plane[plane_id],
-				    active->wm[level].plane[plane_id]);
-		}
-
-		intermediate->sr[level].plane = min(optimal->sr[level].plane,
-						    active->sr[level].plane);
-		intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
-						     active->sr[level].cursor);
-	}
-
-	vlv_invalidate_wms(crtc, intermediate, level);
-
-out:
-	/*
-	 * If our intermediate WM are identical to the final WM, then we can
-	 * omit the post-vblank programming; only update if it's different.
-	 */
-	if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
-		new_crtc_state->wm.need_postvbl_update = true;
-
-	return 0;
-}
-
-static void vlv_merge_wm(struct drm_i915_private *dev_priv,
-			 struct vlv_wm_values *wm)
-{
-	struct intel_crtc *crtc;
-	int num_active_pipes = 0;
-
-	wm->level = dev_priv->display.wm.max_level;
-	wm->cxsr = true;
-
-	for_each_intel_crtc(&dev_priv->drm, crtc) {
-		const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
-
-		if (!crtc->active)
-			continue;
-
-		if (!wm_state->cxsr)
-			wm->cxsr = false;
-
-		num_active_pipes++;
-		wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
-	}
-
-	if (num_active_pipes != 1)
-		wm->cxsr = false;
-
-	if (num_active_pipes > 1)
-		wm->level = VLV_WM_LEVEL_PM2;
-
-	for_each_intel_crtc(&dev_priv->drm, crtc) {
-		const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
-		enum pipe pipe = crtc->pipe;
-
-		wm->pipe[pipe] = wm_state->wm[wm->level];
-		if (crtc->active && wm->cxsr)
-			wm->sr = wm_state->sr[wm->level];
-
-		wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
-		wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
-		wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
-		wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
-	}
-}
-
-static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
-{
-	struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv;
-	struct vlv_wm_values new_wm = {};
-
-	vlv_merge_wm(dev_priv, &new_wm);
-
-	if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
-		return;
-
-	if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
-		chv_set_memory_dvfs(dev_priv, false);
-
-	if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
-		chv_set_memory_pm5(dev_priv, false);
-
-	if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
-		_intel_set_memory_cxsr(dev_priv, false);
-
-	vlv_write_wm_values(dev_priv, &new_wm);
-
-	if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
-		_intel_set_memory_cxsr(dev_priv, true);
-
-	if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
-		chv_set_memory_pm5(dev_priv, true);
-
-	if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
-		chv_set_memory_dvfs(dev_priv, true);
-
-	*old_wm = new_wm;
-}
-
-static void vlv_initial_watermarks(struct intel_atomic_state *state,
-				   struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	const struct intel_crtc_state *crtc_state =
-		intel_atomic_get_new_crtc_state(state, crtc);
-
-	mutex_lock(&dev_priv->display.wm.wm_mutex);
-	crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
-	vlv_program_watermarks(dev_priv);
-	mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-static void vlv_optimize_watermarks(struct intel_atomic_state *state,
-				    struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	const struct intel_crtc_state *crtc_state =
-		intel_atomic_get_new_crtc_state(state, crtc);
-
-	if (!crtc_state->wm.need_postvbl_update)
-		return;
-
-	mutex_lock(&dev_priv->display.wm.wm_mutex);
-	crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
-	vlv_program_watermarks(dev_priv);
-	mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-static void i965_update_wm(struct drm_i915_private *dev_priv)
-{
-	struct intel_crtc *crtc;
-	int srwm = 1;
-	int cursor_sr = 16;
-	bool cxsr_enabled;
-
-	/* Calc sr entries for one plane configs */
-	crtc = single_enabled_crtc(dev_priv);
-	if (crtc) {
-		/* self-refresh has much higher latency */
-		static const int sr_latency_ns = 12000;
-		const struct drm_display_mode *pipe_mode =
-			&crtc->config->hw.pipe_mode;
-		const struct drm_framebuffer *fb =
-			crtc->base.primary->state->fb;
-		int pixel_rate = crtc->config->pixel_rate;
-		int htotal = pipe_mode->crtc_htotal;
-		int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
-		int cpp = fb->format->cpp[0];
-		int entries;
-
-		entries = intel_wm_method2(pixel_rate, htotal,
-					   width, cpp, sr_latency_ns / 100);
-		entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
-		srwm = I965_FIFO_SIZE - entries;
-		if (srwm < 0)
-			srwm = 1;
-		srwm &= 0x1ff;
-		drm_dbg_kms(&dev_priv->drm,
-			    "self-refresh entries: %d, wm: %d\n",
-			    entries, srwm);
-
-		entries = intel_wm_method2(pixel_rate, htotal,
-					   crtc->base.cursor->state->crtc_w, 4,
-					   sr_latency_ns / 100);
-		entries = DIV_ROUND_UP(entries,
-				       i965_cursor_wm_info.cacheline_size) +
-			i965_cursor_wm_info.guard_size;
-
-		cursor_sr = i965_cursor_wm_info.fifo_size - entries;
-		if (cursor_sr > i965_cursor_wm_info.max_wm)
-			cursor_sr = i965_cursor_wm_info.max_wm;
-
-		drm_dbg_kms(&dev_priv->drm,
-			    "self-refresh watermark: display plane %d "
-			    "cursor %d\n", srwm, cursor_sr);
-
-		cxsr_enabled = true;
-	} else {
-		cxsr_enabled = false;
-		/* Turn off self refresh if both pipes are enabled */
-		intel_set_memory_cxsr(dev_priv, false);
-	}
-
-	drm_dbg_kms(&dev_priv->drm,
-		    "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
-		    srwm);
-
-	/* 965 has limitations... */
-	intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) |
-		   FW_WM(8, CURSORB) |
-		   FW_WM(8, PLANEB) |
-		   FW_WM(8, PLANEA));
-	intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) |
-		   FW_WM(8, PLANEC_OLD));
-	/* update cursor SR watermark */
-	intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
-
-	if (cxsr_enabled)
-		intel_set_memory_cxsr(dev_priv, true);
-}
-
-#undef FW_WM
-
-static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
-					       enum i9xx_plane_id i9xx_plane)
-{
-	struct intel_plane *plane;
-
-	for_each_intel_plane(&i915->drm, plane) {
-		if (plane->id == PLANE_PRIMARY &&
-		    plane->i9xx_plane == i9xx_plane)
-			return intel_crtc_for_pipe(i915, plane->pipe);
-	}
-
-	return NULL;
-}
-
-static void i9xx_update_wm(struct drm_i915_private *dev_priv)
-{
-	const struct intel_watermark_params *wm_info;
-	u32 fwater_lo;
-	u32 fwater_hi;
-	int cwm, srwm = 1;
-	int fifo_size;
-	int planea_wm, planeb_wm;
-	struct intel_crtc *crtc;
-
-	if (IS_I945GM(dev_priv))
-		wm_info = &i945_wm_info;
-	else if (DISPLAY_VER(dev_priv) != 2)
-		wm_info = &i915_wm_info;
-	else
-		wm_info = &i830_a_wm_info;
-
-	if (DISPLAY_VER(dev_priv) == 2)
-		fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
-	else
-		fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
-	crtc = intel_crtc_for_plane(dev_priv, PLANE_A);
-	if (intel_crtc_active(crtc)) {
-		const struct drm_framebuffer *fb =
-			crtc->base.primary->state->fb;
-		int cpp;
-
-		if (DISPLAY_VER(dev_priv) == 2)
-			cpp = 4;
-		else
-			cpp = fb->format->cpp[0];
-
-		planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
-					       wm_info, fifo_size, cpp,
-					       pessimal_latency_ns);
-	} else {
-		planea_wm = fifo_size - wm_info->guard_size;
-		if (planea_wm > (long)wm_info->max_wm)
-			planea_wm = wm_info->max_wm;
-	}
-
-	if (DISPLAY_VER(dev_priv) == 2)
-		wm_info = &i830_bc_wm_info;
-
-	if (DISPLAY_VER(dev_priv) == 2)
-		fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
-	else
-		fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
-	crtc = intel_crtc_for_plane(dev_priv, PLANE_B);
-	if (intel_crtc_active(crtc)) {
-		const struct drm_framebuffer *fb =
-			crtc->base.primary->state->fb;
-		int cpp;
-
-		if (DISPLAY_VER(dev_priv) == 2)
-			cpp = 4;
-		else
-			cpp = fb->format->cpp[0];
-
-		planeb_wm = intel_calculate_wm(crtc->config->pixel_rate,
-					       wm_info, fifo_size, cpp,
-					       pessimal_latency_ns);
-	} else {
-		planeb_wm = fifo_size - wm_info->guard_size;
-		if (planeb_wm > (long)wm_info->max_wm)
-			planeb_wm = wm_info->max_wm;
-	}
-
-	drm_dbg_kms(&dev_priv->drm,
-		    "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
-
-	crtc = single_enabled_crtc(dev_priv);
-	if (IS_I915GM(dev_priv) && crtc) {
-		struct drm_i915_gem_object *obj;
-
-		obj = intel_fb_obj(crtc->base.primary->state->fb);
-
-		/* self-refresh seems busted with untiled */
-		if (!i915_gem_object_is_tiled(obj))
-			crtc = NULL;
-	}
-
-	/*
-	 * Overlay gets an aggressive default since video jitter is bad.
-	 */
-	cwm = 2;
-
-	/* Play safe and disable self-refresh before adjusting watermarks. */
-	intel_set_memory_cxsr(dev_priv, false);
-
-	/* Calc sr entries for one plane configs */
-	if (HAS_FW_BLC(dev_priv) && crtc) {
-		/* self-refresh has much higher latency */
-		static const int sr_latency_ns = 6000;
-		const struct drm_display_mode *pipe_mode =
-			&crtc->config->hw.pipe_mode;
-		const struct drm_framebuffer *fb =
-			crtc->base.primary->state->fb;
-		int pixel_rate = crtc->config->pixel_rate;
-		int htotal = pipe_mode->crtc_htotal;
-		int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
-		int cpp;
-		int entries;
-
-		if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
-			cpp = 4;
-		else
-			cpp = fb->format->cpp[0];
-
-		entries = intel_wm_method2(pixel_rate, htotal, width, cpp,
-					   sr_latency_ns / 100);
-		entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
-		drm_dbg_kms(&dev_priv->drm,
-			    "self-refresh entries: %d\n", entries);
-		srwm = wm_info->fifo_size - entries;
-		if (srwm < 0)
-			srwm = 1;
-
-		if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
-			intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
-				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
-		else
-			intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
-	}
-
-	drm_dbg_kms(&dev_priv->drm,
-		    "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
-		     planea_wm, planeb_wm, cwm, srwm);
-
-	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
-	fwater_hi = (cwm & 0x1f);
-
-	/* Set request length to 8 cachelines per fetch */
-	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
-	fwater_hi = fwater_hi | (1 << 8);
-
-	intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
-	intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
-
-	if (crtc)
-		intel_set_memory_cxsr(dev_priv, true);
-}
-
-static void i845_update_wm(struct drm_i915_private *dev_priv)
-{
-	struct intel_crtc *crtc;
-	u32 fwater_lo;
-	int planea_wm;
-
-	crtc = single_enabled_crtc(dev_priv);
-	if (crtc == NULL)
-		return;
-
-	planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
-				       &i845_wm_info,
-				       i845_get_fifo_size(dev_priv, PLANE_A),
-				       4, pessimal_latency_ns);
-	fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
-	fwater_lo |= (3<<8) | planea_wm;
-
-	drm_dbg_kms(&dev_priv->drm,
-		    "Setting FIFO watermarks - A: %d\n", planea_wm);
-
-	intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
-}
-
-/* latency must be in 0.1us units. */
-static unsigned int ilk_wm_method1(unsigned int pixel_rate,
-				   unsigned int cpp,
-				   unsigned int latency)
-{
-	unsigned int ret;
-
-	ret = intel_wm_method1(pixel_rate, cpp, latency);
-	ret = DIV_ROUND_UP(ret, 64) + 2;
-
-	return ret;
-}
-
-/* latency must be in 0.1us units. */
-static unsigned int ilk_wm_method2(unsigned int pixel_rate,
-				   unsigned int htotal,
-				   unsigned int width,
-				   unsigned int cpp,
-				   unsigned int latency)
-{
-	unsigned int ret;
-
-	ret = intel_wm_method2(pixel_rate, htotal,
-			       width, cpp, latency);
-	ret = DIV_ROUND_UP(ret, 64) + 2;
-
-	return ret;
-}
-
-static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
-{
-	/*
-	 * Neither of these should be possible since this function shouldn't be
-	 * called if the CRTC is off or the plane is invisible.  But let's be
-	 * extra paranoid to avoid a potential divide-by-zero if we screw up
-	 * elsewhere in the driver.
-	 */
-	if (WARN_ON(!cpp))
-		return 0;
-	if (WARN_ON(!horiz_pixels))
-		return 0;
-
-	return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
-}
-
-struct ilk_wm_maximums {
-	u16 pri;
-	u16 spr;
-	u16 cur;
-	u16 fbc;
-};
-
-/*
- * For both WM_PIPE and WM_LP.
- * mem_value must be in 0.1us units.
- */
-static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
-			      const struct intel_plane_state *plane_state,
-			      u32 mem_value, bool is_lp)
-{
-	u32 method1, method2;
-	int cpp;
-
-	if (mem_value == 0)
-		return U32_MAX;
-
-	if (!intel_wm_plane_visible(crtc_state, plane_state))
-		return 0;
-
-	cpp = plane_state->hw.fb->format->cpp[0];
-
-	method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
-
-	if (!is_lp)
-		return method1;
-
-	method2 = ilk_wm_method2(crtc_state->pixel_rate,
-				 crtc_state->hw.pipe_mode.crtc_htotal,
-				 drm_rect_width(&plane_state->uapi.src) >> 16,
-				 cpp, mem_value);
-
-	return min(method1, method2);
-}
-
-/*
- * For both WM_PIPE and WM_LP.
- * mem_value must be in 0.1us units.
- */
-static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
-			      const struct intel_plane_state *plane_state,
-			      u32 mem_value)
-{
-	u32 method1, method2;
-	int cpp;
-
-	if (mem_value == 0)
-		return U32_MAX;
-
-	if (!intel_wm_plane_visible(crtc_state, plane_state))
-		return 0;
-
-	cpp = plane_state->hw.fb->format->cpp[0];
-
-	method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
-	method2 = ilk_wm_method2(crtc_state->pixel_rate,
-				 crtc_state->hw.pipe_mode.crtc_htotal,
-				 drm_rect_width(&plane_state->uapi.src) >> 16,
-				 cpp, mem_value);
-	return min(method1, method2);
-}
-
-/*
- * For both WM_PIPE and WM_LP.
- * mem_value must be in 0.1us units.
- */
-static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
-			      const struct intel_plane_state *plane_state,
-			      u32 mem_value)
-{
-	int cpp;
-
-	if (mem_value == 0)
-		return U32_MAX;
-
-	if (!intel_wm_plane_visible(crtc_state, plane_state))
-		return 0;
-
-	cpp = plane_state->hw.fb->format->cpp[0];
-
-	return ilk_wm_method2(crtc_state->pixel_rate,
-			      crtc_state->hw.pipe_mode.crtc_htotal,
-			      drm_rect_width(&plane_state->uapi.src) >> 16,
-			      cpp, mem_value);
-}
-
-/* Only for WM_LP. */
-static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
-			      const struct intel_plane_state *plane_state,
-			      u32 pri_val)
-{
-	int cpp;
-
-	if (!intel_wm_plane_visible(crtc_state, plane_state))
-		return 0;
-
-	cpp = plane_state->hw.fb->format->cpp[0];
-
-	return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.src) >> 16,
-			  cpp);
-}
-
-static unsigned int
-ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
-{
-	if (DISPLAY_VER(dev_priv) >= 8)
-		return 3072;
-	else if (DISPLAY_VER(dev_priv) >= 7)
-		return 768;
-	else
-		return 512;
-}
-
-static unsigned int
-ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
-		     int level, bool is_sprite)
-{
-	if (DISPLAY_VER(dev_priv) >= 8)
-		/* BDW primary/sprite plane watermarks */
-		return level == 0 ? 255 : 2047;
-	else if (DISPLAY_VER(dev_priv) >= 7)
-		/* IVB/HSW primary/sprite plane watermarks */
-		return level == 0 ? 127 : 1023;
-	else if (!is_sprite)
-		/* ILK/SNB primary plane watermarks */
-		return level == 0 ? 127 : 511;
-	else
-		/* ILK/SNB sprite plane watermarks */
-		return level == 0 ? 63 : 255;
-}
-
-static unsigned int
-ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
-{
-	if (DISPLAY_VER(dev_priv) >= 7)
-		return level == 0 ? 63 : 255;
-	else
-		return level == 0 ? 31 : 63;
-}
-
-static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
-{
-	if (DISPLAY_VER(dev_priv) >= 8)
-		return 31;
-	else
-		return 15;
-}
-
-/* Calculate the maximum primary/sprite plane watermark */
-static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
-				     int level,
-				     const struct intel_wm_config *config,
-				     enum intel_ddb_partitioning ddb_partitioning,
-				     bool is_sprite)
-{
-	unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
-
-	/* if sprites aren't enabled, sprites get nothing */
-	if (is_sprite && !config->sprites_enabled)
-		return 0;
-
-	/* HSW allows LP1+ watermarks even with multiple pipes */
-	if (level == 0 || config->num_pipes_active > 1) {
-		fifo_size /= INTEL_NUM_PIPES(dev_priv);
-
-		/*
-		 * For some reason the non self refresh
-		 * FIFO size is only half of the self
-		 * refresh FIFO size on ILK/SNB.
-		 */
-		if (DISPLAY_VER(dev_priv) <= 6)
-			fifo_size /= 2;
-	}
-
-	if (config->sprites_enabled) {
-		/* level 0 is always calculated with 1:1 split */
-		if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
-			if (is_sprite)
-				fifo_size *= 5;
-			fifo_size /= 6;
-		} else {
-			fifo_size /= 2;
-		}
-	}
-
-	/* clamp to max that the registers can hold */
-	return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
-}
-
-/* Calculate the maximum cursor plane watermark */
-static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
-				      int level,
-				      const struct intel_wm_config *config)
-{
-	/* HSW LP1+ watermarks w/ multiple pipes */
-	if (level > 0 && config->num_pipes_active > 1)
-		return 64;
-
-	/* otherwise just report max that registers can hold */
-	return ilk_cursor_wm_reg_max(dev_priv, level);
-}
-
-static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
-				    int level,
-				    const struct intel_wm_config *config,
-				    enum intel_ddb_partitioning ddb_partitioning,
-				    struct ilk_wm_maximums *max)
-{
-	max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
-	max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
-	max->cur = ilk_cursor_wm_max(dev_priv, level, config);
-	max->fbc = ilk_fbc_wm_reg_max(dev_priv);
-}
-
-static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
-					int level,
-					struct ilk_wm_maximums *max)
-{
-	max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
-	max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
-	max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
-	max->fbc = ilk_fbc_wm_reg_max(dev_priv);
-}
-
-static bool ilk_validate_wm_level(int level,
-				  const struct ilk_wm_maximums *max,
-				  struct intel_wm_level *result)
-{
-	bool ret;
-
-	/* already determined to be invalid? */
-	if (!result->enable)
-		return false;
-
-	result->enable = result->pri_val <= max->pri &&
-			 result->spr_val <= max->spr &&
-			 result->cur_val <= max->cur;
-
-	ret = result->enable;
-
-	/*
-	 * HACK until we can pre-compute everything,
-	 * and thus fail gracefully if LP0 watermarks
-	 * are exceeded...
-	 */
-	if (level == 0 && !result->enable) {
-		if (result->pri_val > max->pri)
-			DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
-				      level, result->pri_val, max->pri);
-		if (result->spr_val > max->spr)
-			DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
-				      level, result->spr_val, max->spr);
-		if (result->cur_val > max->cur)
-			DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
-				      level, result->cur_val, max->cur);
-
-		result->pri_val = min_t(u32, result->pri_val, max->pri);
-		result->spr_val = min_t(u32, result->spr_val, max->spr);
-		result->cur_val = min_t(u32, result->cur_val, max->cur);
-		result->enable = true;
-	}
-
-	return ret;
-}
-
-static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
-				 const struct intel_crtc *crtc,
-				 int level,
-				 struct intel_crtc_state *crtc_state,
-				 const struct intel_plane_state *pristate,
-				 const struct intel_plane_state *sprstate,
-				 const struct intel_plane_state *curstate,
-				 struct intel_wm_level *result)
-{
-	u16 pri_latency = dev_priv->display.wm.pri_latency[level];
-	u16 spr_latency = dev_priv->display.wm.spr_latency[level];
-	u16 cur_latency = dev_priv->display.wm.cur_latency[level];
-
-	/* WM1+ latency values stored in 0.5us units */
-	if (level > 0) {
-		pri_latency *= 5;
-		spr_latency *= 5;
-		cur_latency *= 5;
-	}
-
-	if (pristate) {
-		result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
-						     pri_latency, level);
-		result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
-	}
-
-	if (sprstate)
-		result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
-
-	if (curstate)
-		result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
-
-	result->enable = true;
-}
-
-static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
-{
-	u64 sskpd;
-
-	sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD);
-
-	wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd);
-	if (wm[0] == 0)
-		wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd);
-	wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd);
-	wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd);
-	wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd);
-	wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
-}
-
-static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
-{
-	u32 sskpd;
-
-	sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD);
-
-	wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd);
-	wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd);
-	wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd);
-	wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
-}
-
-static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
-{
-	u32 mltr;
-
-	mltr = intel_uncore_read(&i915->uncore, MLTR_ILK);
-
-	/* ILK primary LP0 latency is 700 ns */
-	wm[0] = 7;
-	wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr);
-	wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
-}
-
-static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
-				       u16 wm[5])
-{
-	/* ILK sprite LP0 latency is 1300 ns */
-	if (DISPLAY_VER(dev_priv) == 5)
-		wm[0] = 13;
-}
-
-static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
-				       u16 wm[5])
-{
-	/* ILK cursor LP0 latency is 1300 ns */
-	if (DISPLAY_VER(dev_priv) == 5)
-		wm[0] = 13;
-}
-
-int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
-{
-	/* how many WM levels are we expecting */
-	if (HAS_HW_SAGV_WM(dev_priv))
-		return 5;
-	else if (DISPLAY_VER(dev_priv) >= 9)
-		return 7;
-	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-		return 4;
-	else if (DISPLAY_VER(dev_priv) >= 6)
-		return 3;
-	else
-		return 2;
-}
-
-void intel_print_wm_latency(struct drm_i915_private *dev_priv,
-			    const char *name, const u16 wm[])
-{
-	int level, max_level = ilk_wm_max_level(dev_priv);
-
-	for (level = 0; level <= max_level; level++) {
-		unsigned int latency = wm[level];
-
-		if (latency == 0) {
-			drm_dbg_kms(&dev_priv->drm,
-				    "%s WM%d latency not provided\n",
-				    name, level);
-			continue;
-		}
-
-		/*
-		 * - latencies are in us on gen9.
-		 * - before then, WM1+ latency values are in 0.5us units
-		 */
-		if (DISPLAY_VER(dev_priv) >= 9)
-			latency *= 10;
-		else if (level > 0)
-			latency *= 5;
-
-		drm_dbg_kms(&dev_priv->drm,
-			    "%s WM%d latency %u (%u.%u usec)\n", name, level,
-			    wm[level], latency / 10, latency % 10);
-	}
-}
-
-static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
-				    u16 wm[5], u16 min)
-{
-	int level, max_level = ilk_wm_max_level(dev_priv);
-
-	if (wm[0] >= min)
-		return false;
-
-	wm[0] = max(wm[0], min);
-	for (level = 1; level <= max_level; level++)
-		wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
-
-	return true;
-}
-
-static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
-{
-	bool changed;
-
-	/*
-	 * The BIOS provided WM memory latency values are often
-	 * inadequate for high resolution displays. Adjust them.
-	 */
-	changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12);
-	changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12);
-	changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12);
-
-	if (!changed)
-		return;
-
-	drm_dbg_kms(&dev_priv->drm,
-		    "WM latency values increased to avoid potential underruns\n");
-	intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
-	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
-	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
-}
-
-static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
-{
-	/*
-	 * On some SNB machines (Thinkpad X220 Tablet at least)
-	 * LP3 usage can cause vblank interrupts to be lost.
-	 * The DEIIR bit will go high but it looks like the CPU
-	 * never gets interrupted.
-	 *
-	 * It's not clear whether other interrupt source could
-	 * be affected or if this is somehow limited to vblank
-	 * interrupts only. To play it safe we disable LP3
-	 * watermarks entirely.
-	 */
-	if (dev_priv->display.wm.pri_latency[3] == 0 &&
-	    dev_priv->display.wm.spr_latency[3] == 0 &&
-	    dev_priv->display.wm.cur_latency[3] == 0)
-		return;
-
-	dev_priv->display.wm.pri_latency[3] = 0;
-	dev_priv->display.wm.spr_latency[3] = 0;
-	dev_priv->display.wm.cur_latency[3] = 0;
-
-	drm_dbg_kms(&dev_priv->drm,
-		    "LP3 watermarks disabled due to potential for lost interrupts\n");
-	intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
-	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
-	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
-}
-
-static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
-{
-	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
-		hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
-	else if (DISPLAY_VER(dev_priv) >= 6)
-		snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
-	else
-		ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
-
-	memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency,
-	       sizeof(dev_priv->display.wm.pri_latency));
-	memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency,
-	       sizeof(dev_priv->display.wm.pri_latency));
-
-	intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency);
-	intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency);
-
-	intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
-	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
-	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
-
-	if (DISPLAY_VER(dev_priv) == 6) {
-		snb_wm_latency_quirk(dev_priv);
-		snb_wm_lp3_irq_quirk(dev_priv);
-	}
-}
-
-static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
-				 struct intel_pipe_wm *pipe_wm)
-{
-	/* LP0 watermark maximums depend on this pipe alone */
-	const struct intel_wm_config config = {
-		.num_pipes_active = 1,
-		.sprites_enabled = pipe_wm->sprites_enabled,
-		.sprites_scaled = pipe_wm->sprites_scaled,
-	};
-	struct ilk_wm_maximums max;
-
-	/* LP0 watermarks always use 1/2 DDB partitioning */
-	ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
-
-	/* At least LP0 must be valid */
-	if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
-		drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
-		return false;
-	}
-
-	return true;
-}
-
-/* Compute new watermarks for the pipe */
-static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
-			       struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-	struct intel_crtc_state *crtc_state =
-		intel_atomic_get_new_crtc_state(state, crtc);
-	struct intel_pipe_wm *pipe_wm;
-	struct intel_plane *plane;
-	const struct intel_plane_state *plane_state;
-	const struct intel_plane_state *pristate = NULL;
-	const struct intel_plane_state *sprstate = NULL;
-	const struct intel_plane_state *curstate = NULL;
-	int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
-	struct ilk_wm_maximums max;
-
-	pipe_wm = &crtc_state->wm.ilk.optimal;
-
-	intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
-		if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
-			pristate = plane_state;
-		else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
-			sprstate = plane_state;
-		else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
-			curstate = plane_state;
-	}
-
-	pipe_wm->pipe_enabled = crtc_state->hw.active;
-	pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0);
-	pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0);
-
-	usable_level = max_level;
-
-	/* ILK/SNB: LP2+ watermarks only w/o sprites */
-	if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled)
-		usable_level = 1;
-
-	/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
-	if (pipe_wm->sprites_scaled)
-		usable_level = 0;
-
-	memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
-	ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
-			     pristate, sprstate, curstate, &pipe_wm->wm[0]);
-
-	if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
-		return -EINVAL;
-
-	ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
-
-	for (level = 1; level <= usable_level; level++) {
-		struct intel_wm_level *wm = &pipe_wm->wm[level];
-
-		ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
-				     pristate, sprstate, curstate, wm);
-
-		/*
-		 * Disable any watermark level that exceeds the
-		 * register maximums since such watermarks are
-		 * always invalid.
-		 */
-		if (!ilk_validate_wm_level(level, &max, wm)) {
-			memset(wm, 0, sizeof(*wm));
-			break;
-		}
-	}
-
-	return 0;
-}
-
-/*
- * Build a set of 'intermediate' watermark values that satisfy both the old
- * state and the new state.  These can be programmed to the hardware
- * immediately.
- */
-static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
-				       struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct intel_crtc_state *new_crtc_state =
-		intel_atomic_get_new_crtc_state(state, crtc);
-	const struct intel_crtc_state *old_crtc_state =
-		intel_atomic_get_old_crtc_state(state, crtc);
-	struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate;
-	const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal;
-	int level, max_level = ilk_wm_max_level(dev_priv);
-
-	/*
-	 * Start with the final, target watermarks, then combine with the
-	 * currently active watermarks to get values that are safe both before
-	 * and after the vblank.
-	 */
-	*a = new_crtc_state->wm.ilk.optimal;
-	if (!new_crtc_state->hw.active ||
-	    drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) ||
-	    state->skip_intermediate_wm)
-		return 0;
-
-	a->pipe_enabled |= b->pipe_enabled;
-	a->sprites_enabled |= b->sprites_enabled;
-	a->sprites_scaled |= b->sprites_scaled;
-
-	for (level = 0; level <= max_level; level++) {
-		struct intel_wm_level *a_wm = &a->wm[level];
-		const struct intel_wm_level *b_wm = &b->wm[level];
-
-		a_wm->enable &= b_wm->enable;
-		a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
-		a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
-		a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
-		a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
-	}
-
-	/*
-	 * We need to make sure that these merged watermark values are
-	 * actually a valid configuration themselves.  If they're not,
-	 * there's no safe way to transition from the old state to
-	 * the new state, so we need to fail the atomic transaction.
-	 */
-	if (!ilk_validate_pipe_wm(dev_priv, a))
-		return -EINVAL;
-
-	/*
-	 * If our intermediate WM are identical to the final WM, then we can
-	 * omit the post-vblank programming; only update if it's different.
-	 */
-	if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0)
-		new_crtc_state->wm.need_postvbl_update = true;
-
-	return 0;
-}
-
-/*
- * Merge the watermarks from all active pipes for a specific level.
- */
-static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
-			       int level,
-			       struct intel_wm_level *ret_wm)
-{
-	const struct intel_crtc *crtc;
-
-	ret_wm->enable = true;
-
-	for_each_intel_crtc(&dev_priv->drm, crtc) {
-		const struct intel_pipe_wm *active = &crtc->wm.active.ilk;
-		const struct intel_wm_level *wm = &active->wm[level];
-
-		if (!active->pipe_enabled)
-			continue;
-
-		/*
-		 * The watermark values may have been used in the past,
-		 * so we must maintain them in the registers for some
-		 * time even if the level is now disabled.
-		 */
-		if (!wm->enable)
-			ret_wm->enable = false;
-
-		ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
-		ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
-		ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
-		ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
-	}
-}
-
-/*
- * Merge all low power watermarks for all active pipes.
- */
-static void ilk_wm_merge(struct drm_i915_private *dev_priv,
-			 const struct intel_wm_config *config,
-			 const struct ilk_wm_maximums *max,
-			 struct intel_pipe_wm *merged)
-{
-	int level, max_level = ilk_wm_max_level(dev_priv);
-	int last_enabled_level = max_level;
-
-	/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
-	if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
-	    config->num_pipes_active > 1)
-		last_enabled_level = 0;
-
-	/* ILK: FBC WM must be disabled always */
-	merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6;
-
-	/* merge each WM1+ level */
-	for (level = 1; level <= max_level; level++) {
-		struct intel_wm_level *wm = &merged->wm[level];
-
-		ilk_merge_wm_level(dev_priv, level, wm);
-
-		if (level > last_enabled_level)
-			wm->enable = false;
-		else if (!ilk_validate_wm_level(level, max, wm))
-			/* make sure all following levels get disabled */
-			last_enabled_level = level - 1;
-
-		/*
-		 * The spec says it is preferred to disable
-		 * FBC WMs instead of disabling a WM level.
-		 */
-		if (wm->fbc_val > max->fbc) {
-			if (wm->enable)
-				merged->fbc_wm_enabled = false;
-			wm->fbc_val = 0;
-		}
-	}
-
-	/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
-	if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
-	    dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) {
-		for (level = 2; level <= max_level; level++) {
-			struct intel_wm_level *wm = &merged->wm[level];
-
-			wm->enable = false;
-		}
-	}
-}
-
-static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
-{
-	/* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
-	return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
-}
-
-/* The value we need to program into the WM_LPx latency field */
-static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
-				      int level)
-{
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-		return 2 * level;
-	else
-		return dev_priv->display.wm.pri_latency[level];
-}
-
-static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
-				   const struct intel_pipe_wm *merged,
-				   enum intel_ddb_partitioning partitioning,
-				   struct ilk_wm_values *results)
-{
-	struct intel_crtc *crtc;
-	int level, wm_lp;
-
-	results->enable_fbc_wm = merged->fbc_wm_enabled;
-	results->partitioning = partitioning;
-
-	/* LP1+ register values */
-	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
-		const struct intel_wm_level *r;
-
-		level = ilk_wm_lp_to_level(wm_lp, merged);
-
-		r = &merged->wm[level];
-
-		/*
-		 * Maintain the watermark values even if the level is
-		 * disabled. Doing otherwise could cause underruns.
-		 */
-		results->wm_lp[wm_lp - 1] =
-			WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) |
-			WM_LP_PRIMARY(r->pri_val) |
-			WM_LP_CURSOR(r->cur_val);
-
-		if (r->enable)
-			results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE;
-
-		if (DISPLAY_VER(dev_priv) >= 8)
-			results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val);
-		else
-			results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val);
-
-		results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val);
-
-		/*
-		 * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the
-		 * level is disabled. Doing otherwise could cause underruns.
-		 */
-		if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) {
-			drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
-			results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
-		}
-	}
-
-	/* LP0 register values */
-	for_each_intel_crtc(&dev_priv->drm, crtc) {
-		enum pipe pipe = crtc->pipe;
-		const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk;
-		const struct intel_wm_level *r = &pipe_wm->wm[0];
-
-		if (drm_WARN_ON(&dev_priv->drm, !r->enable))
-			continue;
-
-		results->wm_pipe[pipe] =
-			WM0_PIPE_PRIMARY(r->pri_val) |
-			WM0_PIPE_SPRITE(r->spr_val) |
-			WM0_PIPE_CURSOR(r->cur_val);
-	}
-}
-
-/* Find the result with the highest level enabled. Check for enable_fbc_wm in
- * case both are at the same level. Prefer r1 in case they're the same. */
-static struct intel_pipe_wm *
-ilk_find_best_result(struct drm_i915_private *dev_priv,
-		     struct intel_pipe_wm *r1,
-		     struct intel_pipe_wm *r2)
-{
-	int level, max_level = ilk_wm_max_level(dev_priv);
-	int level1 = 0, level2 = 0;
-
-	for (level = 1; level <= max_level; level++) {
-		if (r1->wm[level].enable)
-			level1 = level;
-		if (r2->wm[level].enable)
-			level2 = level;
-	}
-
-	if (level1 == level2) {
-		if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
-			return r2;
-		else
-			return r1;
-	} else if (level1 > level2) {
-		return r1;
-	} else {
-		return r2;
-	}
-}
-
-/* dirty bits used to track which watermarks need changes */
-#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
-#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
-#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
-#define WM_DIRTY_FBC (1 << 24)
-#define WM_DIRTY_DDB (1 << 25)
-
-static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
-					 const struct ilk_wm_values *old,
-					 const struct ilk_wm_values *new)
-{
-	unsigned int dirty = 0;
-	enum pipe pipe;
-	int wm_lp;
-
-	for_each_pipe(dev_priv, pipe) {
-		if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
-			dirty |= WM_DIRTY_PIPE(pipe);
-			/* Must disable LP1+ watermarks too */
-			dirty |= WM_DIRTY_LP_ALL;
-		}
-	}
-
-	if (old->enable_fbc_wm != new->enable_fbc_wm) {
-		dirty |= WM_DIRTY_FBC;
-		/* Must disable LP1+ watermarks too */
-		dirty |= WM_DIRTY_LP_ALL;
-	}
-
-	if (old->partitioning != new->partitioning) {
-		dirty |= WM_DIRTY_DDB;
-		/* Must disable LP1+ watermarks too */
-		dirty |= WM_DIRTY_LP_ALL;
-	}
-
-	/* LP1+ watermarks already deemed dirty, no need to continue */
-	if (dirty & WM_DIRTY_LP_ALL)
-		return dirty;
-
-	/* Find the lowest numbered LP1+ watermark in need of an update... */
-	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
-		if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
-		    old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
-			break;
-	}
-
-	/* ...and mark it and all higher numbered LP1+ watermarks as dirty */
-	for (; wm_lp <= 3; wm_lp++)
-		dirty |= WM_DIRTY_LP(wm_lp);
-
-	return dirty;
-}
-
-static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
-			       unsigned int dirty)
-{
-	struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
-	bool changed = false;
-
-	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
-		previous->wm_lp[2] &= ~WM_LP_ENABLE;
-		intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
-		changed = true;
-	}
-	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) {
-		previous->wm_lp[1] &= ~WM_LP_ENABLE;
-		intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
-		changed = true;
-	}
-	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) {
-		previous->wm_lp[0] &= ~WM_LP_ENABLE;
-		intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
-		changed = true;
-	}
-
-	/*
-	 * Don't touch WM_LP_SPRITE_ENABLE here.
-	 * Doing so could cause underruns.
-	 */
-
-	return changed;
-}
-
-/*
- * The spec says we shouldn't write when we don't need, because every write
- * causes WMs to be re-evaluated, expending some power.
- */
-static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
-				struct ilk_wm_values *results)
-{
-	struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
-	unsigned int dirty;
-	u32 val;
-
-	dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
-	if (!dirty)
-		return;
-
-	_ilk_disable_lp_wm(dev_priv, dirty);
-
-	if (dirty & WM_DIRTY_PIPE(PIPE_A))
-		intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
-	if (dirty & WM_DIRTY_PIPE(PIPE_B))
-		intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
-	if (dirty & WM_DIRTY_PIPE(PIPE_C))
-		intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
-
-	if (dirty & WM_DIRTY_DDB) {
-		if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-			val = intel_uncore_read(&dev_priv->uncore, WM_MISC);
-			if (results->partitioning == INTEL_DDB_PART_1_2)
-				val &= ~WM_MISC_DATA_PARTITION_5_6;
-			else
-				val |= WM_MISC_DATA_PARTITION_5_6;
-			intel_uncore_write(&dev_priv->uncore, WM_MISC, val);
-		} else {
-			val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
-			if (results->partitioning == INTEL_DDB_PART_1_2)
-				val &= ~DISP_DATA_PARTITION_5_6;
-			else
-				val |= DISP_DATA_PARTITION_5_6;
-			intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
-		}
-	}
-
-	if (dirty & WM_DIRTY_FBC) {
-		val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL);
-		if (results->enable_fbc_wm)
-			val &= ~DISP_FBC_WM_DIS;
-		else
-			val |= DISP_FBC_WM_DIS;
-		intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, val);
-	}
-
-	if (dirty & WM_DIRTY_LP(1) &&
-	    previous->wm_lp_spr[0] != results->wm_lp_spr[0])
-		intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
-
-	if (DISPLAY_VER(dev_priv) >= 7) {
-		if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
-			intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
-		if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
-			intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
-	}
-
-	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
-		intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
-	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
-		intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
-	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
-		intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
-
-	dev_priv->display.wm.hw = *results;
-}
-
-bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
-{
-	return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
-}
-
-static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
-				  struct intel_wm_config *config)
-{
-	struct intel_crtc *crtc;
-
-	/* Compute the currently _active_ config */
-	for_each_intel_crtc(&dev_priv->drm, crtc) {
-		const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
-
-		if (!wm->pipe_enabled)
-			continue;
-
-		config->sprites_enabled |= wm->sprites_enabled;
-		config->sprites_scaled |= wm->sprites_scaled;
-		config->num_pipes_active++;
-	}
-}
-
-static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
-{
-	struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
-	struct ilk_wm_maximums max;
-	struct intel_wm_config config = {};
-	struct ilk_wm_values results = {};
-	enum intel_ddb_partitioning partitioning;
-
-	ilk_compute_wm_config(dev_priv, &config);
-
-	ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
-	ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
-
-	/* 5/6 split only in single pipe config on IVB+ */
-	if (DISPLAY_VER(dev_priv) >= 7 &&
-	    config.num_pipes_active == 1 && config.sprites_enabled) {
-		ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
-		ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
-
-		best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
-	} else {
-		best_lp_wm = &lp_wm_1_2;
-	}
-
-	partitioning = (best_lp_wm == &lp_wm_1_2) ?
-		       INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
-
-	ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
-
-	ilk_write_wm_values(dev_priv, &results);
-}
-
-static void ilk_initial_watermarks(struct intel_atomic_state *state,
-				   struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	const struct intel_crtc_state *crtc_state =
-		intel_atomic_get_new_crtc_state(state, crtc);
-
-	mutex_lock(&dev_priv->display.wm.wm_mutex);
-	crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
-	ilk_program_watermarks(dev_priv);
-	mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-static void ilk_optimize_watermarks(struct intel_atomic_state *state,
-				    struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	const struct intel_crtc_state *crtc_state =
-		intel_atomic_get_new_crtc_state(state, crtc);
-
-	if (!crtc_state->wm.need_postvbl_update)
-		return;
-
-	mutex_lock(&dev_priv->display.wm.wm_mutex);
-	crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
-	ilk_program_watermarks(dev_priv);
-	mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
-{
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
-	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
-	struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
-	enum pipe pipe = crtc->pipe;
-
-	hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
-
-	memset(active, 0, sizeof(*active));
-
-	active->pipe_enabled = crtc->active;
-
-	if (active->pipe_enabled) {
-		u32 tmp = hw->wm_pipe[pipe];
-
-		/*
-		 * For active pipes LP0 watermark is marked as
-		 * enabled, and LP1+ watermaks as disabled since
-		 * we can't really reverse compute them in case
-		 * multiple pipes are active.
-		 */
-		active->wm[0].enable = true;
-		active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp);
-		active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp);
-		active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp);
-	} else {
-		int level, max_level = ilk_wm_max_level(dev_priv);
-
-		/*
-		 * For inactive pipes, all watermark levels
-		 * should be marked as enabled but zeroed,
-		 * which is what we'd compute them to.
-		 */
-		for (level = 0; level <= max_level; level++)
-			active->wm[level].enable = true;
-	}
-
-	crtc->wm.active.ilk = *active;
-}
-
-#define _FW_WM(value, plane) \
-	(((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
-#define _FW_WM_VLV(value, plane) \
-	(((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
-
-static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
-			       struct g4x_wm_values *wm)
-{
-	u32 tmp;
-
-	tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
-	wm->sr.plane = _FW_WM(tmp, SR);
-	wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
-	wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
-	wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
-
-	tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
-	wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
-	wm->sr.fbc = _FW_WM(tmp, FBC_SR);
-	wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
-	wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
-	wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
-	wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
-
-	tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
-	wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
-	wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
-	wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
-	wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
-}
-
-static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
-			       struct vlv_wm_values *wm)
-{
-	enum pipe pipe;
-	u32 tmp;
-
-	for_each_pipe(dev_priv, pipe) {
-		tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
-
-		wm->ddl[pipe].plane[PLANE_PRIMARY] =
-			(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
-		wm->ddl[pipe].plane[PLANE_CURSOR] =
-			(tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
-		wm->ddl[pipe].plane[PLANE_SPRITE0] =
-			(tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
-		wm->ddl[pipe].plane[PLANE_SPRITE1] =
-			(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
-	}
-
-	tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
-	wm->sr.plane = _FW_WM(tmp, SR);
-	wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
-	wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
-	wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
-
-	tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
-	wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
-	wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
-	wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
-
-	tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
-	wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
-
-	if (IS_CHERRYVIEW(dev_priv)) {
-		tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
-		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
-		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
-
-		tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
-		wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
-		wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
-
-		tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
-		wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
-		wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
-
-		tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
-		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
-		wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
-		wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
-		wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
-		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
-		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
-		wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
-		wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
-		wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
-		wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
-	} else {
-		tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
-		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
-		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
-
-		tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
-		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
-		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
-		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
-		wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
-		wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
-		wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
-		wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
-	}
-}
-
-#undef _FW_WM
-#undef _FW_WM_VLV
-
-void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
-{
-	struct g4x_wm_values *wm = &dev_priv->display.wm.g4x;
-	struct intel_crtc *crtc;
-
-	g4x_read_wm_values(dev_priv, wm);
-
-	wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
-
-	for_each_intel_crtc(&dev_priv->drm, crtc) {
-		struct intel_crtc_state *crtc_state =
-			to_intel_crtc_state(crtc->base.state);
-		struct g4x_wm_state *active = &crtc->wm.active.g4x;
-		struct g4x_pipe_wm *raw;
-		enum pipe pipe = crtc->pipe;
-		enum plane_id plane_id;
-		int level, max_level;
-
-		active->cxsr = wm->cxsr;
-		active->hpll_en = wm->hpll_en;
-		active->fbc_en = wm->fbc_en;
-
-		active->sr = wm->sr;
-		active->hpll = wm->hpll;
-
-		for_each_plane_id_on_crtc(crtc, plane_id) {
-			active->wm.plane[plane_id] =
-				wm->pipe[pipe].plane[plane_id];
-		}
-
-		if (wm->cxsr && wm->hpll_en)
-			max_level = G4X_WM_LEVEL_HPLL;
-		else if (wm->cxsr)
-			max_level = G4X_WM_LEVEL_SR;
-		else
-			max_level = G4X_WM_LEVEL_NORMAL;
-
-		level = G4X_WM_LEVEL_NORMAL;
-		raw = &crtc_state->wm.g4x.raw[level];
-		for_each_plane_id_on_crtc(crtc, plane_id)
-			raw->plane[plane_id] = active->wm.plane[plane_id];
-
-		level = G4X_WM_LEVEL_SR;
-		if (level > max_level)
-			goto out;
-
-		raw = &crtc_state->wm.g4x.raw[level];
-		raw->plane[PLANE_PRIMARY] = active->sr.plane;
-		raw->plane[PLANE_CURSOR] = active->sr.cursor;
-		raw->plane[PLANE_SPRITE0] = 0;
-		raw->fbc = active->sr.fbc;
-
-		level = G4X_WM_LEVEL_HPLL;
-		if (level > max_level)
-			goto out;
-
-		raw = &crtc_state->wm.g4x.raw[level];
-		raw->plane[PLANE_PRIMARY] = active->hpll.plane;
-		raw->plane[PLANE_CURSOR] = active->hpll.cursor;
-		raw->plane[PLANE_SPRITE0] = 0;
-		raw->fbc = active->hpll.fbc;
-
-		level++;
-	out:
-		for_each_plane_id_on_crtc(crtc, plane_id)
-			g4x_raw_plane_wm_set(crtc_state, level,
-					     plane_id, USHRT_MAX);
-		g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
-
-		g4x_invalidate_wms(crtc, active, level);
-
-		crtc_state->wm.g4x.optimal = *active;
-		crtc_state->wm.g4x.intermediate = *active;
-
-		drm_dbg_kms(&dev_priv->drm,
-			    "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
-			    pipe_name(pipe),
-			    wm->pipe[pipe].plane[PLANE_PRIMARY],
-			    wm->pipe[pipe].plane[PLANE_CURSOR],
-			    wm->pipe[pipe].plane[PLANE_SPRITE0]);
-	}
-
-	drm_dbg_kms(&dev_priv->drm,
-		    "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
-		    wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
-	drm_dbg_kms(&dev_priv->drm,
-		    "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
-		    wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
-	drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
-		    str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en),
-		    str_yes_no(wm->fbc_en));
-}
-
-void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
-{
-	struct intel_plane *plane;
-	struct intel_crtc *crtc;
-
-	mutex_lock(&dev_priv->display.wm.wm_mutex);
-
-	for_each_intel_plane(&dev_priv->drm, plane) {
-		struct intel_crtc *crtc =
-			intel_crtc_for_pipe(dev_priv, plane->pipe);
-		struct intel_crtc_state *crtc_state =
-			to_intel_crtc_state(crtc->base.state);
-		struct intel_plane_state *plane_state =
-			to_intel_plane_state(plane->base.state);
-		enum plane_id plane_id = plane->id;
-		int level, num_levels = intel_wm_num_levels(dev_priv);
-
-		if (plane_state->uapi.visible)
-			continue;
-
-		for (level = 0; level < num_levels; level++) {
-			struct g4x_pipe_wm *raw =
-				&crtc_state->wm.g4x.raw[level];
-
-			raw->plane[plane_id] = 0;
-
-			if (plane_id == PLANE_PRIMARY)
-				raw->fbc = 0;
-		}
-	}
-
-	for_each_intel_crtc(&dev_priv->drm, crtc) {
-		struct intel_crtc_state *crtc_state =
-			to_intel_crtc_state(crtc->base.state);
-		int ret;
-
-		ret = _g4x_compute_pipe_wm(crtc_state);
-		drm_WARN_ON(&dev_priv->drm, ret);
-
-		crtc_state->wm.g4x.intermediate =
-			crtc_state->wm.g4x.optimal;
-		crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
-	}
-
-	g4x_program_watermarks(dev_priv);
-
-	mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
-{
-	struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
-	struct intel_crtc *crtc;
-	u32 val;
-
-	vlv_read_wm_values(dev_priv, wm);
-
-	wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
-	wm->level = VLV_WM_LEVEL_PM2;
-
-	if (IS_CHERRYVIEW(dev_priv)) {
-		vlv_punit_get(dev_priv);
-
-		val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
-		if (val & DSP_MAXFIFO_PM5_ENABLE)
-			wm->level = VLV_WM_LEVEL_PM5;
-
-		/*
-		 * If DDR DVFS is disabled in the BIOS, Punit
-		 * will never ack the request. So if that happens
-		 * assume we don't have to enable/disable DDR DVFS
-		 * dynamically. To test that just set the REQ_ACK
-		 * bit to poke the Punit, but don't change the
-		 * HIGH/LOW bits so that we don't actually change
-		 * the current state.
-		 */
-		val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
-		val |= FORCE_DDR_FREQ_REQ_ACK;
-		vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
-
-		if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
-			      FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
-			drm_dbg_kms(&dev_priv->drm,
-				    "Punit not acking DDR DVFS request, "
-				    "assuming DDR DVFS is disabled\n");
-			dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM5;
-		} else {
-			val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
-			if ((val & FORCE_DDR_HIGH_FREQ) == 0)
-				wm->level = VLV_WM_LEVEL_DDR_DVFS;
-		}
-
-		vlv_punit_put(dev_priv);
-	}
-
-	for_each_intel_crtc(&dev_priv->drm, crtc) {
-		struct intel_crtc_state *crtc_state =
-			to_intel_crtc_state(crtc->base.state);
-		struct vlv_wm_state *active = &crtc->wm.active.vlv;
-		const struct vlv_fifo_state *fifo_state =
-			&crtc_state->wm.vlv.fifo_state;
-		enum pipe pipe = crtc->pipe;
-		enum plane_id plane_id;
-		int level;
-
-		vlv_get_fifo_size(crtc_state);
-
-		active->num_levels = wm->level + 1;
-		active->cxsr = wm->cxsr;
-
-		for (level = 0; level < active->num_levels; level++) {
-			struct g4x_pipe_wm *raw =
-				&crtc_state->wm.vlv.raw[level];
-
-			active->sr[level].plane = wm->sr.plane;
-			active->sr[level].cursor = wm->sr.cursor;
-
-			for_each_plane_id_on_crtc(crtc, plane_id) {
-				active->wm[level].plane[plane_id] =
-					wm->pipe[pipe].plane[plane_id];
-
-				raw->plane[plane_id] =
-					vlv_invert_wm_value(active->wm[level].plane[plane_id],
-							    fifo_state->plane[plane_id]);
-			}
-		}
-
-		for_each_plane_id_on_crtc(crtc, plane_id)
-			vlv_raw_plane_wm_set(crtc_state, level,
-					     plane_id, USHRT_MAX);
-		vlv_invalidate_wms(crtc, active, level);
-
-		crtc_state->wm.vlv.optimal = *active;
-		crtc_state->wm.vlv.intermediate = *active;
-
-		drm_dbg_kms(&dev_priv->drm,
-			    "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
-			    pipe_name(pipe),
-			    wm->pipe[pipe].plane[PLANE_PRIMARY],
-			    wm->pipe[pipe].plane[PLANE_CURSOR],
-			    wm->pipe[pipe].plane[PLANE_SPRITE0],
-			    wm->pipe[pipe].plane[PLANE_SPRITE1]);
-	}
-
-	drm_dbg_kms(&dev_priv->drm,
-		    "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
-		    wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
-}
-
-void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
-{
-	struct intel_plane *plane;
-	struct intel_crtc *crtc;
-
-	mutex_lock(&dev_priv->display.wm.wm_mutex);
-
-	for_each_intel_plane(&dev_priv->drm, plane) {
-		struct intel_crtc *crtc =
-			intel_crtc_for_pipe(dev_priv, plane->pipe);
-		struct intel_crtc_state *crtc_state =
-			to_intel_crtc_state(crtc->base.state);
-		struct intel_plane_state *plane_state =
-			to_intel_plane_state(plane->base.state);
-		enum plane_id plane_id = plane->id;
-		int level, num_levels = intel_wm_num_levels(dev_priv);
-
-		if (plane_state->uapi.visible)
-			continue;
-
-		for (level = 0; level < num_levels; level++) {
-			struct g4x_pipe_wm *raw =
-				&crtc_state->wm.vlv.raw[level];
-
-			raw->plane[plane_id] = 0;
-		}
-	}
-
-	for_each_intel_crtc(&dev_priv->drm, crtc) {
-		struct intel_crtc_state *crtc_state =
-			to_intel_crtc_state(crtc->base.state);
-		int ret;
-
-		ret = _vlv_compute_pipe_wm(crtc_state);
-		drm_WARN_ON(&dev_priv->drm, ret);
-
-		crtc_state->wm.vlv.intermediate =
-			crtc_state->wm.vlv.optimal;
-		crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
-	}
-
-	vlv_program_watermarks(dev_priv);
-
-	mutex_unlock(&dev_priv->display.wm.wm_mutex);
-}
-
-/*
- * FIXME should probably kill this and improve
- * the real watermark readout/sanitation instead
- */
-static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
-{
-	intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK) & ~WM_LP_ENABLE);
-	intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK) & ~WM_LP_ENABLE);
-	intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK) & ~WM_LP_ENABLE);
-
-	/*
-	 * Don't touch WM_LP_SPRITE_ENABLE here.
-	 * Doing so could cause underruns.
-	 */
-}
-
-void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
-{
-	struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
-	struct intel_crtc *crtc;
-
-	ilk_init_lp_watermarks(dev_priv);
-
-	for_each_intel_crtc(&dev_priv->drm, crtc)
-		ilk_pipe_wm_get_hw_state(crtc);
-
-	hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
-	hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
-	hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
-
-	hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
-	if (DISPLAY_VER(dev_priv) >= 7) {
-		hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
-		hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
-	}
-
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-		hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
-			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
-	else if (IS_IVYBRIDGE(dev_priv))
-		hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
-			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
-
-	hw->enable_fbc_wm =
-		!(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
-}
-
-static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	/*
-	 * On Ibex Peak and Cougar Point, we need to disable clock
-	 * gating for the panel power sequencer or it will fail to
-	 * start up when no ports are active.
-	 */
-	intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
-{
-	enum pipe pipe;
-
-	for_each_pipe(dev_priv, pipe) {
-		intel_uncore_write(&dev_priv->uncore, DSPCNTR(pipe),
-			   intel_uncore_read(&dev_priv->uncore, DSPCNTR(pipe)) |
-			   DISP_TRICKLE_FEED_DISABLE);
-
-		intel_uncore_write(&dev_priv->uncore, DSPSURF(pipe), intel_uncore_read(&dev_priv->uncore, DSPSURF(pipe)));
-		intel_uncore_posting_read(&dev_priv->uncore, DSPSURF(pipe));
-	}
-}
-
-static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
-
-	/*
-	 * Required for FBC
-	 * WaFbcDisableDpfcClockGating:ilk
-	 */
-	dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
-		   ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
-		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
-
-	intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS0,
-		   MARIUNIT_CLOCK_GATE_DISABLE |
-		   SVSMUNIT_CLOCK_GATE_DISABLE);
-	intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS1,
-		   VFMUNIT_CLOCK_GATE_DISABLE);
-
-	/*
-	 * According to the spec the following bits should be set in
-	 * order to enable memory self-refresh
-	 * The bit 22/21 of 0x42004
-	 * The bit 5 of 0x42020
-	 * The bit 15 of 0x45000
-	 */
-	intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
-		   (intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
-		    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
-	dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
-	intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL,
-		   (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
-		    DISP_FBC_WM_DIS));
-
-	/*
-	 * Based on the document from hardware guys the following bits
-	 * should be set unconditionally in order to enable FBC.
-	 * The bit 22 of 0x42000
-	 * The bit 22 of 0x42004
-	 * The bit 7,8,9 of 0x42020.
-	 */
-	if (IS_IRONLAKE_M(dev_priv)) {
-		/* WaFbcAsynchFlipDisableFbcQueue:ilk */
-		intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
-			   intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
-			   ILK_FBCQ_DIS);
-		intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
-			   intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
-			   ILK_DPARB_GATE);
-	}
-
-	intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
-
-	intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
-		   intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
-		   ILK_ELPIN_409_SELECT);
-
-	g4x_disable_trickle_feed(dev_priv);
-
-	ibx_init_clock_gating(dev_priv);
-}
-
-static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	enum pipe pipe;
-	u32 val;
-
-	/*
-	 * On Ibex Peak and Cougar Point, we need to disable clock
-	 * gating for the panel power sequencer or it will fail to
-	 * start up when no ports are active.
-	 */
-	intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
-		   PCH_DPLUNIT_CLOCK_GATE_DISABLE |
-		   PCH_CPUNIT_CLOCK_GATE_DISABLE);
-	intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN2, intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN2) |
-		   DPLS_EDP_PPS_FIX_DIS);
-	/* The below fixes the weird display corruption, a few pixels shifted
-	 * downward, on (only) LVDS of some HP laptops with IVY.
-	 */
-	for_each_pipe(dev_priv, pipe) {
-		val = intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN2(pipe));
-		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
-		val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
-		if (dev_priv->display.vbt.fdi_rx_polarity_inverted)
-			val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
-		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
-		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
-		intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN2(pipe), val);
-	}
-	/* WADP0ClockGatingDisable */
-	for_each_pipe(dev_priv, pipe) {
-		intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(pipe),
-			   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
-	}
-}
-
-static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
-{
-	u32 tmp;
-
-	tmp = intel_uncore_read(&dev_priv->uncore, MCH_SSKPD);
-	if (REG_FIELD_GET(SSKPD_WM0_MASK_SNB, tmp) != 12)
-		drm_dbg_kms(&dev_priv->drm,
-			    "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
-			    tmp);
-}
-
-static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
-
-	intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
-
-	intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
-		   intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
-		   ILK_ELPIN_409_SELECT);
-
-	intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1,
-		   intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
-		   GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
-		   GEN6_CSUNIT_CLOCK_GATE_DISABLE);
-
-	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
-	 * gating disable must be set.  Failure to set it results in
-	 * flickering pixels due to Z write ordering failures after
-	 * some amount of runtime in the Mesa "fire" demo, and Unigine
-	 * Sanctuary and Tropics, and apparently anything else with
-	 * alpha test or pixel discard.
-	 *
-	 * According to the spec, bit 11 (RCCUNIT) must also be set,
-	 * but we didn't debug actual testcases to find it out.
-	 *
-	 * WaDisableRCCUnitClockGating:snb
-	 * WaDisableRCPBUnitClockGating:snb
-	 */
-	intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
-		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
-		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
-
-	/*
-	 * According to the spec the following bits should be
-	 * set in order to enable memory self-refresh and fbc:
-	 * The bit21 and bit22 of 0x42000
-	 * The bit21 and bit22 of 0x42004
-	 * The bit5 and bit7 of 0x42020
-	 * The bit14 of 0x70180
-	 * The bit14 of 0x71180
-	 *
-	 * WaFbcAsynchFlipDisableFbcQueue:snb
-	 */
-	intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
-		   intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
-		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
-	intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
-		   intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
-		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
-	intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D,
-		   intel_uncore_read(&dev_priv->uncore, ILK_DSPCLK_GATE_D) |
-		   ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
-		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
-
-	g4x_disable_trickle_feed(dev_priv);
-
-	cpt_init_clock_gating(dev_priv);
-
-	gen6_check_mch_setup(dev_priv);
-}
-
-static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	/*
-	 * TODO: this bit should only be enabled when really needed, then
-	 * disabled when not needed anymore in order to save power.
-	 */
-	if (HAS_PCH_LPT_LP(dev_priv))
-		intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D,
-			   intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) |
-			   PCH_LP_PARTITION_LEVEL_DISABLE);
-
-	/* WADPOClockGatingDisable:hsw */
-	intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A),
-		   intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A)) |
-		   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
-}
-
-static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
-{
-	if (HAS_PCH_LPT_LP(dev_priv)) {
-		u32 val = intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D);
-
-		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
-		intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, val);
-	}
-}
-
-static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
-				   int general_prio_credits,
-				   int high_prio_credits)
-{
-	u32 misccpctl;
-	u32 val;
-
-	/* WaTempDisableDOPClkGating:bdw */
-	misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
-	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
-
-	val = intel_uncore_read(&dev_priv->uncore, GEN8_L3SQCREG1);
-	val &= ~L3_PRIO_CREDITS_MASK;
-	val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
-	val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
-	intel_uncore_write(&dev_priv->uncore, GEN8_L3SQCREG1, val);
-
-	/*
-	 * Wait at least 100 clocks before re-enabling clock gating.
-	 * See the definition of L3SQCREG1 in BSpec.
-	 */
-	intel_uncore_posting_read(&dev_priv->uncore, GEN8_L3SQCREG1);
-	udelay(1);
-	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
-}
-
-static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	/* Wa_1409120013:icl,ehl */
-	intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
-			   DPFC_CHICKEN_COMP_DUMMY_PIXEL);
-
-	/*Wa_14010594013:icl, ehl */
-	intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
-			 0, ICL_DELAY_PMRSP);
-}
-
-static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	/* Wa_1409120013 */
-	if (DISPLAY_VER(dev_priv) == 12)
-		intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
-				   DPFC_CHICKEN_COMP_DUMMY_PIXEL);
-
-	/* Wa_1409825376:tgl (pre-prod)*/
-	if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
-		intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
-			   TGL_VRH_GATING_DIS);
-
-	/* Wa_14013723622:tgl,rkl,dg1,adl-s */
-	if (DISPLAY_VER(dev_priv) == 12)
-		intel_uncore_rmw(&dev_priv->uncore, CLKREQ_POLICY,
-				 CLKREQ_POLICY_MEM_UP_OVRD, 0);
-}
-
-static void adlp_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	gen12lp_init_clock_gating(dev_priv);
-
-	/* Wa_22011091694:adlp */
-	intel_de_rmw(dev_priv, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS);
-
-	/* Bspec/49189 Initialize Sequence */
-	intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0);
-}
-
-static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	gen12lp_init_clock_gating(dev_priv);
-
-	/* Wa_1409836686:dg1[a0] */
-	if (IS_DG1_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0))
-		intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
-			   DPT_GATING_DIS);
-}
-
-static void xehpsdv_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	/* Wa_22010146351:xehpsdv */
-	if (IS_XEHPSDV_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0))
-		intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS);
-}
-
-static void dg2_init_clock_gating(struct drm_i915_private *i915)
-{
-	/* Wa_22010954014:dg2 */
-	intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0,
-			 SGSI_SIDECLK_DIS);
-
-	/*
-	 * Wa_14010733611:dg2_g10
-	 * Wa_22010146351:dg2_g10
-	 */
-	if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0))
-		intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0,
-				 SGR_DIS | SGGI_DIS);
-}
-
-static void pvc_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	/* Wa_14012385139:pvc */
-	if (IS_PVC_BD_STEP(dev_priv, STEP_A0, STEP_B0))
-		intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS);
-
-	/* Wa_22010954014:pvc */
-	if (IS_PVC_BD_STEP(dev_priv, STEP_A0, STEP_B0))
-		intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS);
-}
-
-static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	if (!HAS_PCH_CNP(dev_priv))
-		return;
-
-	/* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
-	intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) |
-		   CNP_PWM_CGE_GATING_DISABLE);
-}
-
-static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	cnp_init_clock_gating(dev_priv);
-	gen9_init_clock_gating(dev_priv);
-
-	/* WAC6entrylatency:cfl */
-	intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
-		   FBC_LLC_FULLY_OPEN);
-
-	/*
-	 * WaFbcTurnOffFbcWatermark:cfl
-	 * Display WA #0562: cfl
-	 */
-	intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
-		   DISP_FBC_WM_DIS);
-
-	/*
-	 * WaFbcNukeOnHostModify:cfl
-	 * Display WA #0873: cfl
-	 */
-	intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
-			   intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) |
-			   DPFC_NUKE_ON_ANY_MODIFICATION);
-}
-
-static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	gen9_init_clock_gating(dev_priv);
-
-	/* WAC6entrylatency:kbl */
-	intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
-		   FBC_LLC_FULLY_OPEN);
-
-	/* WaDisableSDEUnitClockGating:kbl */
-	if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0))
-		intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
-			   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
-
-	/* WaDisableGamClockGating:kbl */
-	if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0))
-		intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
-			   GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
-
-	/*
-	 * WaFbcTurnOffFbcWatermark:kbl
-	 * Display WA #0562: kbl
-	 */
-	intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
-		   DISP_FBC_WM_DIS);
-
-	/*
-	 * WaFbcNukeOnHostModify:kbl
-	 * Display WA #0873: kbl
-	 */
-	intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
-			   intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) |
-			   DPFC_NUKE_ON_ANY_MODIFICATION);
-}
-
-static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	gen9_init_clock_gating(dev_priv);
-
-	/* WaDisableDopClockGating:skl */
-	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL) &
-		   ~GEN7_DOP_CLOCK_GATE_ENABLE);
-
-	/* WAC6entrylatency:skl */
-	intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
-		   FBC_LLC_FULLY_OPEN);
-
-	/*
-	 * WaFbcTurnOffFbcWatermark:skl
-	 * Display WA #0562: skl
-	 */
-	intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
-		   DISP_FBC_WM_DIS);
-
-	/*
-	 * WaFbcNukeOnHostModify:skl
-	 * Display WA #0873: skl
-	 */
-	intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
-			   intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) |
-			   DPFC_NUKE_ON_ANY_MODIFICATION);
-
-	/*
-	 * WaFbcHighMemBwCorruptionAvoidance:skl
-	 * Display WA #0883: skl
-	 */
-	intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
-			   intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) |
-			   DPFC_DISABLE_DUMMY0);
-}
-
-static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	enum pipe pipe;
-
-	/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
-	intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
-		   intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
-		   HSW_FBCQ_DIS);
-
-	/* WaSwitchSolVfFArbitrationPriority:bdw */
-	intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
-
-	/* WaPsrDPAMaskVBlankInSRD:bdw */
-	intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
-		   intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
-
-	for_each_pipe(dev_priv, pipe) {
-		/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
-		intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
-			   intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe)) |
-			   BDW_DPRS_MASK_VBLANK_SRD);
-	}
-
-	/* WaVSRefCountFullforceMissDisable:bdw */
-	/* WaDSRefCountFullforceMissDisable:bdw */
-	intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
-		   intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) &
-		   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
-
-	intel_uncore_write(&dev_priv->uncore, RING_PSMI_CTL(RENDER_RING_BASE),
-		   _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
-
-	/* WaDisableSDEUnitClockGating:bdw */
-	intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
-		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
-
-	/* WaProgramL3SqcReg1Default:bdw */
-	gen8_set_l3sqc_credits(dev_priv, 30, 2);
-
-	/* WaKVMNotificationOnConfigChange:bdw */
-	intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR2_1, intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR2_1)
-		   | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
-
-	lpt_init_clock_gating(dev_priv);
-
-	/* WaDisableDopClockGating:bdw
-	 *
-	 * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
-	 * clock gating.
-	 */
-	intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1,
-		   intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
-	intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
-		   intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
-		   HSW_FBCQ_DIS);
-
-	/* This is required by WaCatErrorRejectionIssue:hsw */
-	intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
-		   intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
-		   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
-
-	/* WaSwitchSolVfFArbitrationPriority:hsw */
-	intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
-
-	lpt_init_clock_gating(dev_priv);
-}
-
-static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	u32 snpcr;
-
-	intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
-
-	/* WaFbcAsynchFlipDisableFbcQueue:ivb */
-	intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
-		   intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
-		   ILK_FBCQ_DIS);
-
-	/* WaDisableBackToBackFlipFix:ivb */
-	intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
-		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
-		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
-
-	if (IS_IVB_GT1(dev_priv))
-		intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
-			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
-	else {
-		/* must write both registers */
-		intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
-			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
-		intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2_GT2,
-			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
-	}
-
-	/*
-	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
-	 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
-	 */
-	intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
-		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
-
-	/* This is required by WaCatErrorRejectionIssue:ivb */
-	intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
-			intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
-			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
-
-	g4x_disable_trickle_feed(dev_priv);
-
-	snpcr = intel_uncore_read(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR);
-	snpcr &= ~GEN6_MBC_SNPCR_MASK;
-	snpcr |= GEN6_MBC_SNPCR_MED;
-	intel_uncore_write(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR, snpcr);
-
-	if (!HAS_PCH_NOP(dev_priv))
-		cpt_init_clock_gating(dev_priv);
-
-	gen6_check_mch_setup(dev_priv);
-}
-
-static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	/* WaDisableBackToBackFlipFix:vlv */
-	intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
-		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
-		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
-
-	/* WaDisableDopClockGating:vlv */
-	intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
-		   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
-
-	/* This is required by WaCatErrorRejectionIssue:vlv */
-	intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
-		   intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
-		   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
-
-	/*
-	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
-	 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
-	 */
-	intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
-		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
-
-	/* WaDisableL3Bank2xClockGate:vlv
-	 * Disabling L3 clock gating- MMIO 940c[25] = 1
-	 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
-	intel_uncore_write(&dev_priv->uncore, GEN7_UCGCTL4,
-		   intel_uncore_read(&dev_priv->uncore, GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
-
-	/*
-	 * WaDisableVLVClockGating_VBIIssue:vlv
-	 * Disable clock gating on th GCFG unit to prevent a delay
-	 * in the reporting of vblank events.
-	 */
-	intel_uncore_write(&dev_priv->uncore, VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
-}
-
-static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	/* WaVSRefCountFullforceMissDisable:chv */
-	/* WaDSRefCountFullforceMissDisable:chv */
-	intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
-		   intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) &
-		   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
-
-	/* WaDisableSemaphoreAndSyncFlipWait:chv */
-	intel_uncore_write(&dev_priv->uncore, RING_PSMI_CTL(RENDER_RING_BASE),
-		   _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
-
-	/* WaDisableCSUnitClockGating:chv */
-	intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
-		   GEN6_CSUNIT_CLOCK_GATE_DISABLE);
-
-	/* WaDisableSDEUnitClockGating:chv */
-	intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
-		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
-
-	/*
-	 * WaProgramL3SqcReg1Default:chv
-	 * See gfxspecs/Related Documents/Performance Guide/
-	 * LSQC Setting Recommendations.
-	 */
-	gen8_set_l3sqc_credits(dev_priv, 38, 2);
-}
-
-static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	u32 dspclk_gate;
-
-	intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, 0);
-	intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
-		   GS_UNIT_CLOCK_GATE_DISABLE |
-		   CL_UNIT_CLOCK_GATE_DISABLE);
-	intel_uncore_write(&dev_priv->uncore, RAMCLK_GATE_D, 0);
-	dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
-		OVRUNIT_CLOCK_GATE_DISABLE |
-		OVCUNIT_CLOCK_GATE_DISABLE;
-	if (IS_GM45(dev_priv))
-		dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
-	intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D(dev_priv), dspclk_gate);
-
-	g4x_disable_trickle_feed(dev_priv);
-}
-
-static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	struct intel_uncore *uncore = &dev_priv->uncore;
-
-	intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
-	intel_uncore_write(uncore, RENCLK_GATE_D2, 0);
-	intel_uncore_write(uncore, DSPCLK_GATE_D(dev_priv), 0);
-	intel_uncore_write(uncore, RAMCLK_GATE_D, 0);
-	intel_uncore_write16(uncore, DEUC, 0);
-	intel_uncore_write(uncore,
-			   MI_ARB_STATE,
-			   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
-}
-
-static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
-		   I965_RCC_CLOCK_GATE_DISABLE |
-		   I965_RCPB_CLOCK_GATE_DISABLE |
-		   I965_ISC_CLOCK_GATE_DISABLE |
-		   I965_FBC_CLOCK_GATE_DISABLE);
-	intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, 0);
-	intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
-		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
-}
-
-static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	u32 dstate = intel_uncore_read(&dev_priv->uncore, D_STATE);
-
-	dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
-		DSTATE_DOT_CLOCK_GATING;
-	intel_uncore_write(&dev_priv->uncore, D_STATE, dstate);
-
-	if (IS_PINEVIEW(dev_priv))
-		intel_uncore_write(&dev_priv->uncore, ECOSKPD(RENDER_RING_BASE),
-				   _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
-
-	/* IIR "flip pending" means done if this bit is set */
-	intel_uncore_write(&dev_priv->uncore, ECOSKPD(RENDER_RING_BASE),
-			   _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
-
-	/* interrupts should cause a wake up from C3 */
-	intel_uncore_write(&dev_priv->uncore, INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
-
-	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
-	intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
-
-	intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
-		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
-}
-
-static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
-
-	/* interrupts should cause a wake up from C3 */
-	intel_uncore_write(&dev_priv->uncore, MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
-		   _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
-
-	intel_uncore_write(&dev_priv->uncore, MEM_MODE,
-		   _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
-
-	/*
-	 * Have FBC ignore 3D activity since we use software
-	 * render tracking, and otherwise a pure 3D workload
-	 * (even if it just renders a single frame and then does
-	 * abosultely nothing) would not allow FBC to recompress
-	 * until a 2D blit occurs.
-	 */
-	intel_uncore_write(&dev_priv->uncore, SCPD0,
-		   _MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D));
-}
-
-static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	intel_uncore_write(&dev_priv->uncore, MEM_MODE,
-		   _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
-		   _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
-}
-
-void intel_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	dev_priv->clock_gating_funcs->init_clock_gating(dev_priv);
-}
-
-void intel_suspend_hw(struct drm_i915_private *dev_priv)
-{
-	if (HAS_PCH_LPT(dev_priv))
-		lpt_suspend_hw(dev_priv);
-}
-
-static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-	drm_dbg_kms(&dev_priv->drm,
-		    "No clock gating settings or workarounds applied.\n");
-}
-
-#define CG_FUNCS(platform)						\
-static const struct drm_i915_clock_gating_funcs platform##_clock_gating_funcs = { \
-	.init_clock_gating = platform##_init_clock_gating,		\
-}
-
-CG_FUNCS(pvc);
-CG_FUNCS(dg2);
-CG_FUNCS(xehpsdv);
-CG_FUNCS(adlp);
-CG_FUNCS(dg1);
-CG_FUNCS(gen12lp);
-CG_FUNCS(icl);
-CG_FUNCS(cfl);
-CG_FUNCS(skl);
-CG_FUNCS(kbl);
-CG_FUNCS(bxt);
-CG_FUNCS(glk);
-CG_FUNCS(bdw);
-CG_FUNCS(chv);
-CG_FUNCS(hsw);
-CG_FUNCS(ivb);
-CG_FUNCS(vlv);
-CG_FUNCS(gen6);
-CG_FUNCS(ilk);
-CG_FUNCS(g4x);
-CG_FUNCS(i965gm);
-CG_FUNCS(i965g);
-CG_FUNCS(gen3);
-CG_FUNCS(i85x);
-CG_FUNCS(i830);
-CG_FUNCS(nop);
-#undef CG_FUNCS
-
-/**
- * intel_init_clock_gating_hooks - setup the clock gating hooks
- * @dev_priv: device private
- *
- * Setup the hooks that configure which clocks of a given platform can be
- * gated and also apply various GT and display specific workarounds for these
- * platforms. Note that some GT specific workarounds are applied separately
- * when GPU contexts or batchbuffers start their execution.
- */
-void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
-{
-	if (IS_PONTEVECCHIO(dev_priv))
-		dev_priv->clock_gating_funcs = &pvc_clock_gating_funcs;
-	else if (IS_DG2(dev_priv))
-		dev_priv->clock_gating_funcs = &dg2_clock_gating_funcs;
-	else if (IS_XEHPSDV(dev_priv))
-		dev_priv->clock_gating_funcs = &xehpsdv_clock_gating_funcs;
-	else if (IS_ALDERLAKE_P(dev_priv))
-		dev_priv->clock_gating_funcs = &adlp_clock_gating_funcs;
-	else if (IS_DG1(dev_priv))
-		dev_priv->clock_gating_funcs = &dg1_clock_gating_funcs;
-	else if (GRAPHICS_VER(dev_priv) == 12)
-		dev_priv->clock_gating_funcs = &gen12lp_clock_gating_funcs;
-	else if (GRAPHICS_VER(dev_priv) == 11)
-		dev_priv->clock_gating_funcs = &icl_clock_gating_funcs;
-	else if (IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv))
-		dev_priv->clock_gating_funcs = &cfl_clock_gating_funcs;
-	else if (IS_SKYLAKE(dev_priv))
-		dev_priv->clock_gating_funcs = &skl_clock_gating_funcs;
-	else if (IS_KABYLAKE(dev_priv))
-		dev_priv->clock_gating_funcs = &kbl_clock_gating_funcs;
-	else if (IS_BROXTON(dev_priv))
-		dev_priv->clock_gating_funcs = &bxt_clock_gating_funcs;
-	else if (IS_GEMINILAKE(dev_priv))
-		dev_priv->clock_gating_funcs = &glk_clock_gating_funcs;
-	else if (IS_BROADWELL(dev_priv))
-		dev_priv->clock_gating_funcs = &bdw_clock_gating_funcs;
-	else if (IS_CHERRYVIEW(dev_priv))
-		dev_priv->clock_gating_funcs = &chv_clock_gating_funcs;
-	else if (IS_HASWELL(dev_priv))
-		dev_priv->clock_gating_funcs = &hsw_clock_gating_funcs;
-	else if (IS_IVYBRIDGE(dev_priv))
-		dev_priv->clock_gating_funcs = &ivb_clock_gating_funcs;
-	else if (IS_VALLEYVIEW(dev_priv))
-		dev_priv->clock_gating_funcs = &vlv_clock_gating_funcs;
-	else if (GRAPHICS_VER(dev_priv) == 6)
-		dev_priv->clock_gating_funcs = &gen6_clock_gating_funcs;
-	else if (GRAPHICS_VER(dev_priv) == 5)
-		dev_priv->clock_gating_funcs = &ilk_clock_gating_funcs;
-	else if (IS_G4X(dev_priv))
-		dev_priv->clock_gating_funcs = &g4x_clock_gating_funcs;
-	else if (IS_I965GM(dev_priv))
-		dev_priv->clock_gating_funcs = &i965gm_clock_gating_funcs;
-	else if (IS_I965G(dev_priv))
-		dev_priv->clock_gating_funcs = &i965g_clock_gating_funcs;
-	else if (GRAPHICS_VER(dev_priv) == 3)
-		dev_priv->clock_gating_funcs = &gen3_clock_gating_funcs;
-	else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
-		dev_priv->clock_gating_funcs = &i85x_clock_gating_funcs;
-	else if (GRAPHICS_VER(dev_priv) == 2)
-		dev_priv->clock_gating_funcs = &i830_clock_gating_funcs;
-	else {
-		MISSING_CASE(INTEL_DEVID(dev_priv));
-		dev_priv->clock_gating_funcs = &nop_clock_gating_funcs;
-	}
-}
-
-static const struct intel_wm_funcs ilk_wm_funcs = {
-	.compute_pipe_wm = ilk_compute_pipe_wm,
-	.compute_intermediate_wm = ilk_compute_intermediate_wm,
-	.initial_watermarks = ilk_initial_watermarks,
-	.optimize_watermarks = ilk_optimize_watermarks,
-};
-
-static const struct intel_wm_funcs vlv_wm_funcs = {
-	.compute_pipe_wm = vlv_compute_pipe_wm,
-	.compute_intermediate_wm = vlv_compute_intermediate_wm,
-	.initial_watermarks = vlv_initial_watermarks,
-	.optimize_watermarks = vlv_optimize_watermarks,
-	.atomic_update_watermarks = vlv_atomic_update_fifo,
-};
-
-static const struct intel_wm_funcs g4x_wm_funcs = {
-	.compute_pipe_wm = g4x_compute_pipe_wm,
-	.compute_intermediate_wm = g4x_compute_intermediate_wm,
-	.initial_watermarks = g4x_initial_watermarks,
-	.optimize_watermarks = g4x_optimize_watermarks,
-};
-
-static const struct intel_wm_funcs pnv_wm_funcs = {
-	.update_wm = pnv_update_wm,
-};
-
-static const struct intel_wm_funcs i965_wm_funcs = {
-	.update_wm = i965_update_wm,
-};
-
-static const struct intel_wm_funcs i9xx_wm_funcs = {
-	.update_wm = i9xx_update_wm,
-};
-
-static const struct intel_wm_funcs i845_wm_funcs = {
-	.update_wm = i845_update_wm,
-};
-
-static const struct intel_wm_funcs nop_funcs = {
-};
-
-/* Set up chip specific power management-related functions */
-void intel_init_pm(struct drm_i915_private *dev_priv)
-{
-	if (DISPLAY_VER(dev_priv) >= 9) {
-		skl_wm_init(dev_priv);
-		return;
-	}
-
-	/* For cxsr */
-	if (IS_PINEVIEW(dev_priv))
-		pnv_get_mem_freq(dev_priv);
-	else if (GRAPHICS_VER(dev_priv) == 5)
-		ilk_get_mem_freq(dev_priv);
-
-	/* For FIFO watermark updates */
-	if (HAS_PCH_SPLIT(dev_priv)) {
-		ilk_setup_wm_latency(dev_priv);
-
-		if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->display.wm.pri_latency[1] &&
-		     dev_priv->display.wm.spr_latency[1] && dev_priv->display.wm.cur_latency[1]) ||
-		    (DISPLAY_VER(dev_priv) != 5 && dev_priv->display.wm.pri_latency[0] &&
-		     dev_priv->display.wm.spr_latency[0] && dev_priv->display.wm.cur_latency[0])) {
-			dev_priv->display.funcs.wm = &ilk_wm_funcs;
-		} else {
-			drm_dbg_kms(&dev_priv->drm,
-				    "Failed to read display plane latency. "
-				    "Disable CxSR\n");
-			dev_priv->display.funcs.wm = &nop_funcs;
-		}
-	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		vlv_setup_wm_latency(dev_priv);
-		dev_priv->display.funcs.wm = &vlv_wm_funcs;
-	} else if (IS_G4X(dev_priv)) {
-		g4x_setup_wm_latency(dev_priv);
-		dev_priv->display.funcs.wm = &g4x_wm_funcs;
-	} else if (IS_PINEVIEW(dev_priv)) {
-		if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
-					    dev_priv->is_ddr3,
-					    dev_priv->fsb_freq,
-					    dev_priv->mem_freq)) {
-			drm_info(&dev_priv->drm,
-				 "failed to find known CxSR latency "
-				 "(found ddr%s fsb freq %d, mem freq %d), "
-				 "disabling CxSR\n",
-				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
-				 dev_priv->fsb_freq, dev_priv->mem_freq);
-			/* Disable CxSR and never update its watermark again */
-			intel_set_memory_cxsr(dev_priv, false);
-			dev_priv->display.funcs.wm = &nop_funcs;
-		} else
-			dev_priv->display.funcs.wm = &pnv_wm_funcs;
-	} else if (DISPLAY_VER(dev_priv) == 4) {
-		dev_priv->display.funcs.wm = &i965_wm_funcs;
-	} else if (DISPLAY_VER(dev_priv) == 3) {
-		dev_priv->display.funcs.wm = &i9xx_wm_funcs;
-	} else if (DISPLAY_VER(dev_priv) == 2) {
-		if (INTEL_NUM_PIPES(dev_priv) == 1)
-			dev_priv->display.funcs.wm = &i845_wm_funcs;
-		else
-			dev_priv->display.funcs.wm = &i9xx_wm_funcs;
-	} else {
-		drm_err(&dev_priv->drm,
-			"unexpected fall-through in %s\n", __func__);
-		dev_priv->display.funcs.wm = &nop_funcs;
-	}
-}
-
-void intel_pm_setup(struct drm_i915_private *dev_priv)
-{
-	dev_priv->runtime_pm.suspended = false;
-	atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
-}
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2022-10-18 10:39 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-09-29 13:17 [Intel-gfx] [PATCH 0/5] drm/i915/mtl: Add C10 phy support Mika Kahola
2022-09-29 13:17 ` [Intel-gfx] [PATCH 1/5] drm/i915/mtl: Add Support for C10, C20 PHY Message Bus Mika Kahola
2022-09-30  9:04   ` Jani Nikula
2022-10-06 10:04     ` Kahola, Mika
2022-10-11  0:00   ` Lucas De Marchi
2022-09-29 13:17 ` [Intel-gfx] [PATCH 2/5] drm/i915/mtl: Add PLL programming support for C10 phy Mika Kahola
2022-09-30  9:19   ` Jani Nikula
2022-09-29 13:17 ` [Intel-gfx] [PATCH 3/5] drm/i915/mtl: Add support for C10 phy programming Mika Kahola
2022-09-30  9:32   ` Jani Nikula
2022-10-14 12:44     ` Kahola, Mika
2022-10-18 10:39       ` Jani Nikula
2022-09-29 13:17 ` [Intel-gfx] [PATCH 4/5] drm/i915/mtl: Add C10 phy programming for HDMI Mika Kahola
2022-09-29 13:17 ` [Intel-gfx] [PATCH 5/5] drm/i915/mtl: Add vswing programming for C10 phys Mika Kahola
2022-09-29 19:46 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915/mtl: Add C10 phy support Patchwork
2022-09-29 20:08 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2022-09-30 21:08 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
  -- strict thread matches above, loose matches on Subject: below --
2022-09-29 11:16 [Intel-gfx] [PATCH 0/5] drm/i915/mtl: Add C10 support Mika Kahola
2022-09-29 11:16 ` [Intel-gfx] [PATCH 1/5] drm/i915/mtl: Add Support for C10, C20 PHY Message Bus Mika Kahola

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.