All of lore.kernel.org
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/4] net/ice: support IEEE 1588
@ 2021-08-06  1:34 Simei Su
  2021-08-06  1:34 ` [dpdk-dev] [PATCH 1/4] net/ice/base: add 1588 capability probe Simei Su
                   ` (4 more replies)
  0 siblings, 5 replies; 13+ messages in thread
From: Simei Su @ 2021-08-06  1:34 UTC (permalink / raw)
  To: qi.z.zhang; +Cc: dev, haiyue.wang, Simei Su

[PATCH 1/4] add 1588 capability probe.
[PATCH 2/4] add low level functions for device clock control.
[PATCH 3/4] add clock initialization function.
[PATCH 4/4] add ethdev APIs to enable 1588 timesync.

Qi Zhang (3):
  net/ice/base: add 1588 capability probe
  net/ice/base: add low level functions for device clock control
  net/ice/base: add clock initialization function

Simei Su (1):
  net/ice: support IEEE 1588 PTP

 drivers/net/ice/base/ice_adminq_cmd.h |    2 +
 drivers/net/ice/base/ice_cgu_regs.h   |  117 ++
 drivers/net/ice/base/ice_common.c     |  254 ++++
 drivers/net/ice/base/ice_common.h     |   11 +
 drivers/net/ice/base/ice_controlq.c   |   52 +-
 drivers/net/ice/base/ice_controlq.h   |    2 +
 drivers/net/ice/base/ice_ptp_consts.h |  160 +++
 drivers/net/ice/base/ice_ptp_hw.c     | 2369 +++++++++++++++++++++++++++++++++
 drivers/net/ice/base/ice_ptp_hw.h     |  400 ++++++
 drivers/net/ice/base/ice_type.h       |   75 ++
 drivers/net/ice/base/meson.build      |    1 +
 drivers/net/ice/ice_ethdev.c          |  226 +++-
 drivers/net/ice/ice_ethdev.h          |    5 +
 drivers/net/ice/ice_rxtx.c            |   42 +-
 drivers/net/ice/ice_rxtx.h            |    1 +
 15 files changed, 3714 insertions(+), 3 deletions(-)
 create mode 100644 drivers/net/ice/base/ice_cgu_regs.h
 create mode 100644 drivers/net/ice/base/ice_ptp_consts.h
 create mode 100644 drivers/net/ice/base/ice_ptp_hw.c
 create mode 100644 drivers/net/ice/base/ice_ptp_hw.h

-- 
2.9.5


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [dpdk-dev] [PATCH 1/4] net/ice/base: add 1588 capability probe
  2021-08-06  1:34 [dpdk-dev] [PATCH 0/4] net/ice: support IEEE 1588 Simei Su
@ 2021-08-06  1:34 ` Simei Su
  2021-08-06  1:34 ` [dpdk-dev] [PATCH 2/4] net/ice/base: add low level functions for device clock control Simei Su
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 13+ messages in thread
From: Simei Su @ 2021-08-06  1:34 UTC (permalink / raw)
  To: qi.z.zhang; +Cc: dev, haiyue.wang, Jacob Keller

From: Qi Zhang <qi.z.zhang@intel.com>

Parse 1588 timesync capability during device capability probing.

Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/ice/base/ice_adminq_cmd.h |   1 +
 drivers/net/ice/base/ice_common.c     | 111 ++++++++++++++++++++++++++++++++++
 drivers/net/ice/base/ice_type.h       |  72 ++++++++++++++++++++++
 3 files changed, 184 insertions(+)

diff --git a/drivers/net/ice/base/ice_adminq_cmd.h b/drivers/net/ice/base/ice_adminq_cmd.h
index 3805fc9..a0af35c 100644
--- a/drivers/net/ice/base/ice_adminq_cmd.h
+++ b/drivers/net/ice/base/ice_adminq_cmd.h
@@ -108,6 +108,7 @@ struct ice_aqc_list_caps_elem {
 #define ICE_AQC_CAPS_TXQS				0x0042
 #define ICE_AQC_CAPS_MSIX				0x0043
 #define ICE_AQC_CAPS_FD					0x0045
+#define ICE_AQC_CAPS_1588				0x0046
 #define ICE_AQC_CAPS_MAX_MTU				0x0047
 #define ICE_AQC_CAPS_IWARP				0x0051
 #define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE		0x0076
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index cf0a7d4..56a4696 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -2091,6 +2091,60 @@ ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
 }
 
 /**
+ * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for ICE_AQC_CAPS_1588.
+ */
+static void
+ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
+			 struct ice_aqc_list_caps_elem *cap)
+{
+	struct ice_ts_func_info *info = &func_p->ts_func_info;
+	u32 number = LE32_TO_CPU(cap->number);
+
+	info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
+	func_p->common_cap.ieee_1588 = info->ena;
+
+	info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
+	info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
+	info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
+	info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
+
+	info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
+	info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
+
+	if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
+		info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
+	} else {
+		/* Unknown clock frequency, so assume a (probably incorrect)
+		 * default to avoid out-of-bounds look ups of frequency
+		 * related information.
+		 */
+		ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
+			  info->clk_freq);
+		info->time_ref = ICE_TIME_REF_FREQ_25_000;
+	}
+
+	ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
+		  func_p->common_cap.ieee_1588);
+	ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
+		  info->src_tmr_owned);
+	ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
+		  info->tmr_ena);
+	ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
+		  info->tmr_index_owned);
+	ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
+		  info->tmr_index_assoc);
+	ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
+		  info->clk_freq);
+	ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
+		  info->clk_src);
+}
+
+/**
  * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
  * @hw: pointer to the HW struct
  * @func_p: pointer to function capabilities structure
@@ -2155,6 +2209,9 @@ ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
 		case ICE_AQC_CAPS_VSI:
 			ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
 			break;
+		case ICE_AQC_CAPS_1588:
+			ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
+			break;
 		case ICE_AQC_CAPS_FD:
 			ice_parse_fdir_func_caps(hw, func_p);
 			break;
@@ -2209,6 +2266,57 @@ ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
 }
 
 /**
+ * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_1588 for device capabilities.
+ */
+static void
+ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+			struct ice_aqc_list_caps_elem *cap)
+{
+	struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
+	u32 logical_id = LE32_TO_CPU(cap->logical_id);
+	u32 phys_id = LE32_TO_CPU(cap->phys_id);
+	u32 number = LE32_TO_CPU(cap->number);
+
+	info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
+	dev_p->common_cap.ieee_1588 = info->ena;
+
+	info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
+	info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
+	info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
+
+	info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
+	info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
+	info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
+
+	info->ena_ports = logical_id;
+	info->tmr_own_map = phys_id;
+
+	ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
+		  dev_p->common_cap.ieee_1588);
+	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
+		  info->tmr0_owner);
+	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
+		  info->tmr0_owned);
+	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
+		  info->tmr0_ena);
+	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
+		  info->tmr1_owner);
+	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
+		  info->tmr1_owned);
+	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
+		  info->tmr1_ena);
+	ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
+		  info->ena_ports);
+	ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
+		  info->tmr_own_map);
+}
+
+/**
  * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
  * @hw: pointer to the HW struct
  * @dev_p: pointer to device capabilities structure
@@ -2266,6 +2374,9 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
 		case ICE_AQC_CAPS_VSI:
 			ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
 			break;
+		case ICE_AQC_CAPS_1588:
+			ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
+			break;
 		case  ICE_AQC_CAPS_FD:
 			ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
 			break;
diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h
index ce508a0..2d21c21 100644
--- a/drivers/net/ice/base/ice_type.h
+++ b/drivers/net/ice/base/ice_type.h
@@ -435,6 +435,7 @@ struct ice_hw_common_caps {
 
 	u8 dcb;
 	u8 iscsi;
+	u8 ieee_1588;
 	u8 mgmt_cem;
 
 	/* WoL and APM support */
@@ -465,12 +466,82 @@ struct ice_hw_common_caps {
 #define ICE_EXT_TOPO_DEV_IMG_PROG_EN	BIT(1)
 };
 
+/* IEEE 1588 TIME_SYNC specific info */
+/* Function specific definitions */
+#define ICE_TS_FUNC_ENA_M		BIT(0)
+#define ICE_TS_SRC_TMR_OWND_M		BIT(1)
+#define ICE_TS_TMR_ENA_M		BIT(2)
+#define ICE_TS_TMR_IDX_OWND_S		4
+#define ICE_TS_TMR_IDX_OWND_M		BIT(4)
+#define ICE_TS_CLK_FREQ_S		16
+#define ICE_TS_CLK_FREQ_M		MAKEMASK(0x7, ICE_TS_CLK_FREQ_S)
+#define ICE_TS_CLK_SRC_S		20
+#define ICE_TS_CLK_SRC_M		BIT(20)
+#define ICE_TS_TMR_IDX_ASSOC_S		24
+#define ICE_TS_TMR_IDX_ASSOC_M		BIT(24)
+
+/* TIME_REF clock rate specification */
+enum ice_time_ref_freq {
+	ICE_TIME_REF_FREQ_25_000	= 0,
+	ICE_TIME_REF_FREQ_122_880	= 1,
+	ICE_TIME_REF_FREQ_125_000	= 2,
+	ICE_TIME_REF_FREQ_153_600	= 3,
+	ICE_TIME_REF_FREQ_156_250	= 4,
+	ICE_TIME_REF_FREQ_245_760	= 5,
+
+	NUM_ICE_TIME_REF_FREQ
+};
+
+/* Clock source specification */
+enum ice_clk_src {
+	ICE_CLK_SRC_TCX0	= 0, /* Temperature compensated oscillator  */
+	ICE_CLK_SRC_TIME_REF	= 1, /* Use TIME_REF reference clock */
+
+	NUM_ICE_CLK_SRC
+};
+
+struct ice_ts_func_info {
+	/* Function specific info */
+	enum ice_time_ref_freq time_ref;
+	u8 clk_freq;
+	u8 clk_src;
+	u8 tmr_index_assoc;
+	u8 ena;
+	u8 tmr_index_owned;
+	u8 src_tmr_owned;
+	u8 tmr_ena;
+};
+
+/* Device specific definitions */
+#define ICE_TS_TMR0_OWNR_M		0x7
+#define ICE_TS_TMR0_OWND_M		BIT(3)
+#define ICE_TS_TMR1_OWNR_S		4
+#define ICE_TS_TMR1_OWNR_M		MAKEMASK(0x7, ICE_TS_TMR1_OWNR_S)
+#define ICE_TS_TMR1_OWND_M		BIT(7)
+#define ICE_TS_DEV_ENA_M		BIT(24)
+#define ICE_TS_TMR0_ENA_M		BIT(25)
+#define ICE_TS_TMR1_ENA_M		BIT(26)
+
+struct ice_ts_dev_info {
+	/* Device specific info */
+	u32 ena_ports;
+	u32 tmr_own_map;
+	u32 tmr0_owner;
+	u32 tmr1_owner;
+	u8 tmr0_owned;
+	u8 tmr1_owned;
+	u8 ena;
+	u8 tmr0_ena;
+	u8 tmr1_ena;
+};
+
 /* Function specific capabilities */
 struct ice_hw_func_caps {
 	struct ice_hw_common_caps common_cap;
 	u32 guar_num_vsi;
 	u32 fd_fltr_guar;		/* Number of filters guaranteed */
 	u32 fd_fltr_best_effort;	/* Number of best effort filters */
+	struct ice_ts_func_info ts_func_info;
 };
 
 /* Device wide capabilities */
@@ -478,6 +549,7 @@ struct ice_hw_dev_caps {
 	struct ice_hw_common_caps common_cap;
 	u32 num_vsi_allocd_to_host;	/* Excluding EMP VSI */
 	u32 num_flow_director_fltr;	/* Number of FD filters available */
+	struct ice_ts_dev_info ts_dev_info;
 	u32 num_funcs;
 };
 
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [dpdk-dev] [PATCH 2/4] net/ice/base: add low level functions for device clock control
  2021-08-06  1:34 [dpdk-dev] [PATCH 0/4] net/ice: support IEEE 1588 Simei Su
  2021-08-06  1:34 ` [dpdk-dev] [PATCH 1/4] net/ice/base: add 1588 capability probe Simei Su
@ 2021-08-06  1:34 ` Simei Su
  2021-08-06  1:34 ` [dpdk-dev] [PATCH 3/4] net/ice/base: add clock initialization function Simei Su
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 13+ messages in thread
From: Simei Su @ 2021-08-06  1:34 UTC (permalink / raw)
  To: qi.z.zhang; +Cc: dev, haiyue.wang, Jacob Keller

From: Qi Zhang <qi.z.zhang@intel.com>

The ice hardware supports exposing a hardware clock for high precision
timestamping. This is primarily intended for accelerating the Precision
Time Protocol.

Add several low level functions intended to be used as the basis for
enabling the device clock, and ensuring that the port timers are
synchronized properly.

Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/ice/base/ice_adminq_cmd.h |    1 +
 drivers/net/ice/base/ice_common.c     |  143 +++
 drivers/net/ice/base/ice_common.h     |   11 +
 drivers/net/ice/base/ice_controlq.c   |   52 +-
 drivers/net/ice/base/ice_controlq.h   |    2 +
 drivers/net/ice/base/ice_ptp_consts.h |   86 ++
 drivers/net/ice/base/ice_ptp_hw.c     | 2023 +++++++++++++++++++++++++++++++++
 drivers/net/ice/base/ice_ptp_hw.h     |  376 ++++++
 drivers/net/ice/base/ice_type.h       |    3 +
 drivers/net/ice/base/meson.build      |    1 +
 10 files changed, 2697 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/base/ice_ptp_consts.h
 create mode 100644 drivers/net/ice/base/ice_ptp_hw.c
 create mode 100644 drivers/net/ice/base/ice_ptp_hw.h

diff --git a/drivers/net/ice/base/ice_adminq_cmd.h b/drivers/net/ice/base/ice_adminq_cmd.h
index a0af35c..470aa89 100644
--- a/drivers/net/ice/base/ice_adminq_cmd.h
+++ b/drivers/net/ice/base/ice_adminq_cmd.h
@@ -3120,6 +3120,7 @@ enum ice_adminq_opc {
 	ice_aqc_opc_set_event_mask			= 0x0613,
 	ice_aqc_opc_set_mac_lb				= 0x0620,
 	ice_aqc_opc_get_link_topo			= 0x06E0,
+	ice_aqc_opc_get_link_topo_pin			= 0x06E1,
 	ice_aqc_opc_read_i2c				= 0x06E2,
 	ice_aqc_opc_write_i2c				= 0x06E3,
 	ice_aqc_opc_set_port_id_led			= 0x06E9,
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index 56a4696..4748ca5 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -65,6 +65,28 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
 }
 
 /**
+ * ice_is_generic_mac
+ * @hw: pointer to the hardware structure
+ *
+ * returns true if mac_type is ICE_MAC_GENERIC, false if not
+ */
+bool ice_is_generic_mac(struct ice_hw *hw)
+{
+	return hw->mac_type == ICE_MAC_GENERIC;
+}
+
+/**
+ * ice_is_e810
+ * @hw: pointer to the hardware structure
+ *
+ * returns true if the device is E810 based, false if not.
+ */
+bool ice_is_e810(struct ice_hw *hw)
+{
+	return hw->mac_type == ICE_MAC_E810;
+}
+
+/**
  * ice_clear_pf_cfg - Clear PF configuration
  * @hw: pointer to the hardware structure
  *
@@ -1354,6 +1376,127 @@ ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
 	return ICE_SUCCESS;
 }
 
+/* Sideband Queue command wrappers */
+
+/**
+ * ice_get_sbq - returns the right control queue to use for sideband
+ * @hw: pointer to the hardware structure
+ */
+static struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
+{
+	if (!ice_is_generic_mac(hw))
+		return &hw->adminq;
+	return &hw->sbq;
+}
+
+/**
+ * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ * @cd: pointer to command details structure
+ */
+static enum ice_status
+ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
+		 void *buf, u16 buf_size, struct ice_sq_cd *cd)
+{
+	return ice_sq_send_cmd(hw, ice_get_sbq(hw), (struct ice_aq_desc *)desc,
+			       buf, buf_size, cd);
+}
+
+/**
+ * ice_sbq_send_cmd_nolock - send Sideband Queue command to Sideband Queue
+ *                           but do not lock sq_lock
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ * @cd: pointer to command details structure
+ */
+static enum ice_status
+ice_sbq_send_cmd_nolock(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
+			void *buf, u16 buf_size, struct ice_sq_cd *cd)
+{
+	return ice_sq_send_cmd_nolock(hw, ice_get_sbq(hw),
+				      (struct ice_aq_desc *)desc, buf,
+				      buf_size, cd);
+}
+
+/**
+ * ice_sbq_rw_reg_lp - Fill Sideband Queue command, with lock parameter
+ * @hw: pointer to the HW struct
+ * @in: message info to be filled in descriptor
+ * @lock: true to lock the sq_lock (the usual case); false if the sq_lock has
+ *        already been locked at a higher level
+ */
+enum ice_status ice_sbq_rw_reg_lp(struct ice_hw *hw,
+				  struct ice_sbq_msg_input *in, bool lock)
+{
+	struct ice_sbq_cmd_desc desc = {0};
+	struct ice_sbq_msg_req msg = {0};
+	enum ice_status status;
+	u16 msg_len;
+
+	msg_len = sizeof(msg);
+
+	msg.dest_dev = in->dest_dev;
+	msg.opcode = in->opcode;
+	msg.flags = ICE_SBQ_MSG_FLAGS;
+	msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
+	msg.msg_addr_low = CPU_TO_LE16(in->msg_addr_low);
+	msg.msg_addr_high = CPU_TO_LE32(in->msg_addr_high);
+
+	if (in->opcode)
+		msg.data = CPU_TO_LE32(in->data);
+	else
+		/* data read comes back in completion, so shorten the struct by
+		 * sizeof(msg.data)
+		 */
+		msg_len -= sizeof(msg.data);
+
+	desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
+	desc.opcode = CPU_TO_LE16(ice_sbq_opc_neigh_dev_req);
+	desc.param0.cmd_len = CPU_TO_LE16(msg_len);
+	if (lock)
+		status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
+	else
+		status = ice_sbq_send_cmd_nolock(hw, &desc, &msg, msg_len,
+						 NULL);
+	if (!status && !in->opcode)
+		in->data = LE32_TO_CPU
+			(((struct ice_sbq_msg_cmpl *)&msg)->data);
+	return status;
+}
+
+/**
+ * ice_sbq_rw_reg - Fill Sideband Queue command
+ * @hw: pointer to the HW struct
+ * @in: message info to be filled in descriptor
+ */
+enum ice_status ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
+{
+	return ice_sbq_rw_reg_lp(hw, in, true);
+}
+
+/**
+ * ice_sbq_lock - Lock the sideband queue's sq_lock
+ * @hw: pointer to the HW struct
+ */
+void ice_sbq_lock(struct ice_hw *hw)
+{
+	ice_acquire_lock(&ice_get_sbq(hw)->sq_lock);
+}
+
+/**
+ * ice_sbq_unlock - Unlock the sideband queue's sq_lock
+ * @hw: pointer to the HW struct
+ */
+void ice_sbq_unlock(struct ice_hw *hw)
+{
+	ice_release_lock(&ice_get_sbq(hw)->sq_lock);
+}
+
 /* FW Admin Queue command wrappers */
 
 /**
diff --git a/drivers/net/ice/base/ice_common.h b/drivers/net/ice/base/ice_common.h
index 22ea89c..de7592b 100644
--- a/drivers/net/ice/base/ice_common.h
+++ b/drivers/net/ice/base/ice_common.h
@@ -51,6 +51,10 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
 		      struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
 		      enum ice_adminq_opc opc, struct ice_sq_cd *cd);
 enum ice_status
+ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+		       struct ice_aq_desc *desc, void *buf, u16 buf_size,
+		       struct ice_sq_cd *cd);
+enum ice_status
 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
 		struct ice_aq_desc *desc, void *buf, u16 buf_size,
 		struct ice_sq_cd *cd);
@@ -215,6 +219,11 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
 void ice_replay_post(struct ice_hw *hw);
 struct ice_q_ctx *
 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
+enum ice_status ice_sbq_rw_reg_lp(struct ice_hw *hw,
+				  struct ice_sbq_msg_input *in, bool lock);
+void ice_sbq_lock(struct ice_hw *hw);
+void ice_sbq_unlock(struct ice_hw *hw);
+enum ice_status ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in);
 void
 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
 		  u64 *prev_stat, u64 *cur_stat);
@@ -226,6 +235,8 @@ ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
 		     struct ice_eth_stats *cur_stats);
 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw);
 void ice_print_rollback_msg(struct ice_hw *hw);
+bool ice_is_generic_mac(struct ice_hw *hw);
+bool ice_is_e810(struct ice_hw *hw);
 enum ice_status
 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
 		     struct ice_aqc_txsched_elem_data *buf);
diff --git a/drivers/net/ice/base/ice_controlq.c b/drivers/net/ice/base/ice_controlq.c
index 93f7bc0..cdd067c 100644
--- a/drivers/net/ice/base/ice_controlq.c
+++ b/drivers/net/ice/base/ice_controlq.c
@@ -55,6 +55,21 @@ static void ice_mailbox_init_regs(struct ice_hw *hw)
 }
 
 /**
+ * ice_sb_init_regs - Initialize Sideband registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_sq and alloc_rq functions have already been called
+ */
+static void ice_sb_init_regs(struct ice_hw *hw)
+{
+	struct ice_ctl_q_info *cq = &hw->sbq;
+
+	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+	ICE_CQ_INIT_REGS(cq, PF_SB);
+}
+
+/**
  * ice_check_sq_alive
  * @hw: pointer to the HW struct
  * @cq: pointer to the specific Control queue
@@ -584,6 +599,10 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
 		ice_adminq_init_regs(hw);
 		cq = &hw->adminq;
 		break;
+	case ICE_CTL_Q_SB:
+		ice_sb_init_regs(hw);
+		cq = &hw->sbq;
+		break;
 	case ICE_CTL_Q_MAILBOX:
 		ice_mailbox_init_regs(hw);
 		cq = &hw->mailboxq;
@@ -621,6 +640,18 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
 }
 
 /**
+ * ice_is_sbq_supported - is the sideband queue supported
+ * @hw: pointer to the hardware structure
+ *
+ * Returns true if the sideband control queue interface is
+ * supported for the device, false otherwise
+ */
+static bool ice_is_sbq_supported(struct ice_hw *hw)
+{
+	return ice_is_generic_mac(hw);
+}
+
+/**
  * ice_shutdown_ctrlq - shutdown routine for any control queue
  * @hw: pointer to the hardware structure
  * @q_type: specific Control queue type
@@ -639,6 +670,9 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
 		if (ice_check_sq_alive(hw, cq))
 			ice_aq_q_shutdown(hw, true);
 		break;
+	case ICE_CTL_Q_SB:
+		cq = &hw->sbq;
+		break;
 	case ICE_CTL_Q_MAILBOX:
 		cq = &hw->mailboxq;
 		break;
@@ -663,6 +697,9 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)
 	ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
 	/* Shutdown FW admin queue */
 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+	/* Shutdown PHY Sideband */
+	if (ice_is_sbq_supported(hw))
+		ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB);
 	/* Shutdown PF-VF Mailbox */
 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
 }
@@ -704,6 +741,15 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
 
 	if (status)
 		return status;
+	/* sideband control queue (SBQ) interface is not supported on some
+	 * devices. Initialize if supported, else fallback to the admin queue
+	 * interface
+	 */
+	if (ice_is_sbq_supported(hw)) {
+		status = ice_init_ctrlq(hw, ICE_CTL_Q_SB);
+		if (status)
+			return status;
+	}
 	/* Init Mailbox queue */
 	return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
 }
@@ -739,6 +785,8 @@ static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
 {
 	ice_init_ctrlq_locks(&hw->adminq);
+	if (ice_is_sbq_supported(hw))
+		ice_init_ctrlq_locks(&hw->sbq);
 	ice_init_ctrlq_locks(&hw->mailboxq);
 
 	return ice_init_all_ctrlq(hw);
@@ -771,6 +819,8 @@ void ice_destroy_all_ctrlq(struct ice_hw *hw)
 	ice_shutdown_all_ctrlq(hw);
 
 	ice_destroy_ctrlq_locks(&hw->adminq);
+	if (ice_is_sbq_supported(hw))
+		ice_destroy_ctrlq_locks(&hw->sbq);
 	ice_destroy_ctrlq_locks(&hw->mailboxq);
 }
 
@@ -882,7 +932,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
  * This is the main send command routine for the ATQ. It runs the queue,
  * cleans the queue, etc.
  */
-static enum ice_status
+enum ice_status
 ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
 		       struct ice_aq_desc *desc, void *buf, u16 buf_size,
 		       struct ice_sq_cd *cd)
diff --git a/drivers/net/ice/base/ice_controlq.h b/drivers/net/ice/base/ice_controlq.h
index 0d54e71..840fb5e 100644
--- a/drivers/net/ice/base/ice_controlq.h
+++ b/drivers/net/ice/base/ice_controlq.h
@@ -10,6 +10,7 @@
 /* Maximum buffer lengths for all control queue types */
 #define ICE_AQ_MAX_BUF_LEN 4096
 #define ICE_MBXQ_MAX_BUF_LEN 4096
+#define ICE_SBQ_MAX_BUF_LEN 512
 
 #define ICE_CTL_Q_DESC(R, i) \
 	(&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
@@ -30,6 +31,7 @@ enum ice_ctl_q {
 	ICE_CTL_Q_UNKNOWN = 0,
 	ICE_CTL_Q_ADMIN,
 	ICE_CTL_Q_MAILBOX,
+	ICE_CTL_Q_SB,
 };
 
 /* Control Queue timeout settings - max delay 1s */
diff --git a/drivers/net/ice/base/ice_ptp_consts.h b/drivers/net/ice/base/ice_ptp_consts.h
new file mode 100644
index 0000000..2bd338c
--- /dev/null
+++ b/drivers/net/ice/base/ice_ptp_consts.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2021 Intel Corporation
+ */
+
+#ifndef _ICE_PTP_CONSTS_H_
+#define _ICE_PTP_CONSTS_H_
+
+/* Constant definitions related to the hardware clock used for PTP 1588
+ * features and functionality.
+ */
+/* Constants defined for the PTP 1588 clock hardware. */
+
+/*
+ * struct ice_time_ref_info_e822
+ *
+ * E822 hardware can use different sources as the reference for the PTP
+ * hardware clock. Each clock has different characteristics such as a slightly
+ * different frequency, etc.
+ *
+ * This lookup table defines several constants that depend on the current time
+ * reference. See the struct ice_time_ref_info_e822 for information about the
+ * meaning of each constant.
+ */
+const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = {
+	/* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
+	{
+		/* pll_freq */
+		823437500, /* 823.4375 MHz PLL */
+		/* nominal_incval */
+		0x136e44fabULL,
+		/* pps_delay */
+		11,
+	},
+
+	/* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
+	{
+		/* pll_freq */
+		783360000, /* 783.36 MHz */
+		/* nominal_incval */
+		0x146cc2177ULL,
+		/* pps_delay */
+		12,
+	},
+
+	/* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
+	{
+		/* pll_freq */
+		796875000, /* 796.875 MHz */
+		/* nominal_incval */
+		0x141414141ULL,
+		/* pps_delay */
+		12,
+	},
+
+	/* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
+	{
+		/* pll_freq */
+		816000000, /* 816 MHz */
+		/* nominal_incval */
+		0x139b9b9baULL,
+		/* pps_delay */
+		12,
+	},
+
+	/* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
+	{
+		/* pll_freq */
+		830078125, /* 830.78125 MHz */
+		/* nominal_incval */
+		0x134679aceULL,
+		/* pps_delay */
+		11,
+	},
+
+	/* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
+	{
+		/* pll_freq */
+		783360000, /* 783.36 MHz */
+		/* nominal_incval */
+		0x146cc2177ULL,
+		/* pps_delay */
+		12,
+	},
+};
+
+#endif /* _ICE_PTP_CONSTS_H_ */
diff --git a/drivers/net/ice/base/ice_ptp_hw.c b/drivers/net/ice/base/ice_ptp_hw.c
new file mode 100644
index 0000000..8aefcf9
--- /dev/null
+++ b/drivers/net/ice/base/ice_ptp_hw.c
@@ -0,0 +1,2023 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2021 Intel Corporation
+ */
+
+#include "ice_type.h"
+#include "ice_common.h"
+#include "ice_ptp_hw.h"
+#include "ice_ptp_consts.h"
+
+
+/* Low level functions for interacting with and managing the device clock used
+ * for the Precision Time Protocol.
+ *
+ * The ice hardware represents the current time using three registers:
+ *
+ *    GLTSYN_TIME_H     GLTSYN_TIME_L     GLTSYN_TIME_R
+ *  +---------------+ +---------------+ +---------------+
+ *  |    32 bits    | |    32 bits    | |    32 bits    |
+ *  +---------------+ +---------------+ +---------------+
+ *
+ * The registers are incremented every clock tick using a 40bit increment
+ * value defined over two registers:
+ *
+ *                     GLTSYN_INCVAL_H   GLTSYN_INCVAL_L
+ *                    +---------------+ +---------------+
+ *                    |    8 bit s    | |    32 bits    |
+ *                    +---------------+ +---------------+
+ *
+ * The increment value is added to the GLSTYN_TIME_R and GLSTYN_TIME_L
+ * registers every clock source tick. Depending on the specific device
+ * configuration, the clock source frequency could be one of a number of
+ * values.
+ *
+ * For E810 devices, the increment frequency is 812.5 MHz
+ *
+ * For E822 devices the clock can be derived from different sources, and the
+ * increment has an effective frequency of one of the following:
+ * - 823.4375 MHz
+ * - 783.36 MHz
+ * - 796.875 MHz
+ * - 816 MHz
+ * - 830.078125 MHz
+ * - 783.36 MHz
+ *
+ * The hardware captures timestamps in the PHY for incoming packets, and for
+ * outgoing packets on request. To support this, the PHY maintains a timer
+ * that matches the lower 64 bits of the global source timer.
+ *
+ * In order to ensure that the PHY timers and the source timer are equivalent,
+ * shadow registers are used to prepare the desired initial values. A special
+ * sync command is issued to trigger copying from the shadow registers into
+ * the appropriate source and PHY registers simultaneously.
+ *
+ * The driver supports devices which have different PHYs with subtly different
+ * mechanisms to program and control the timers. We divide the devices into
+ * families named after the first major device, E810 and similar devices, and
+ * E822 and similar devices.
+ *
+ * - E822 based devices have additional support for fine grained Vernier
+ *   calibration which requires significant setup
+ * - The layout of timestamp data in the PHY register blocks is different
+ * - The way timer synchronization commands are issued is different.
+ *
+ * To support this, very low level functions have an e810 or e822 suffix
+ * indicating what type of device they work on. Higher level abstractions for
+ * tasks that can be done on both devices do not have the suffix and will
+ * correctly look up the appropriate low level function when running.
+ *
+ * Functions which only make sense on a single device family may not have
+ * a suitable generic implementation
+ */
+
+/**
+ * ice_get_ptp_src_clock_index - determine source clock index
+ * @hw: pointer to HW struct
+ *
+ * Determine the source clock index currently in use, based on device
+ * capabilities reported during initialization.
+ */
+u8 ice_get_ptp_src_clock_index(struct ice_hw *hw)
+{
+	return hw->func_caps.ts_func_info.tmr_index_assoc;
+}
+
+/**
+ * ice_ptp_read_src_incval - Read source timer increment value
+ * @hw: pointer to HW struct
+ *
+ * Read the increment value of the source timer and return it.
+ */
+u64 ice_ptp_read_src_incval(struct ice_hw *hw)
+{
+	u32 lo, hi;
+	u8 tmr_idx;
+
+	tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+	lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
+	hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+
+	return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo;
+}
+
+/**
+ * ice_ptp_exec_tmr_cmd - Execute all prepared timer commands
+ * @hw: pointer to HW struct
+ *
+ * Write the SYNC_EXEC_CMD bit to the GLTSYN_CMD_SYNC register, and flush the
+ * write immediately. This triggers the hardware to begin executing all of the
+ * source and PHY timer commands synchronously.
+ */
+static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
+{
+	wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
+	ice_flush(hw);
+}
+
+/* E822 family functions
+ *
+ * The following functions operate on the E822 family of devices.
+ */
+
+/**
+ * ice_fill_phy_msg_e822 - Fill message data for a PHY register access
+ * @msg: the PHY message buffer to fill in
+ * @port: the port to access
+ * @offset: the register offset
+ */
+static void
+ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
+{
+	int phy_port, phy, quadtype;
+
+	phy_port = port % ICE_PORTS_PER_PHY;
+	phy = port / ICE_PORTS_PER_PHY;
+	quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_NUM_QUAD_TYPE;
+
+	if (quadtype == 0) {
+		msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
+		msg->msg_addr_high = P_Q0_H(P_0_BASE + offset, phy_port);
+	} else {
+		msg->msg_addr_low = P_Q1_L(P_4_BASE + offset, phy_port);
+		msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
+	}
+
+	if (phy == 0)
+		msg->dest_dev = rmn_0;
+	else if (phy == 1)
+		msg->dest_dev = rmn_1;
+	else
+		msg->dest_dev = rmn_2;
+}
+
+/**
+ * ice_is_64b_phy_reg_e822 - Check if this is a 64bit PHY register
+ * @low_addr: the low address to check
+ * @high_addr: on return, contains the high address of the 64bit register
+ *
+ * Checks if the provided low address is one of the known 64bit PHY values
+ * represented as two 32bit registers. If it is, return the appropriate high
+ * register offset to use.
+ */
+static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
+{
+	switch (low_addr) {
+	case P_REG_PAR_PCS_TX_OFFSET_L:
+		*high_addr = P_REG_PAR_PCS_TX_OFFSET_U;
+		return true;
+	case P_REG_PAR_PCS_RX_OFFSET_L:
+		*high_addr = P_REG_PAR_PCS_RX_OFFSET_U;
+		return true;
+	case P_REG_PAR_TX_TIME_L:
+		*high_addr = P_REG_PAR_TX_TIME_U;
+		return true;
+	case P_REG_PAR_RX_TIME_L:
+		*high_addr = P_REG_PAR_RX_TIME_U;
+		return true;
+	case P_REG_TOTAL_TX_OFFSET_L:
+		*high_addr = P_REG_TOTAL_TX_OFFSET_U;
+		return true;
+	case P_REG_TOTAL_RX_OFFSET_L:
+		*high_addr = P_REG_TOTAL_RX_OFFSET_U;
+		return true;
+	case P_REG_UIX66_10G_40G_L:
+		*high_addr = P_REG_UIX66_10G_40G_U;
+		return true;
+	case P_REG_UIX66_25G_100G_L:
+		*high_addr = P_REG_UIX66_25G_100G_U;
+		return true;
+	case P_REG_TX_CAPTURE_L:
+		*high_addr = P_REG_TX_CAPTURE_U;
+		return true;
+	case P_REG_RX_CAPTURE_L:
+		*high_addr = P_REG_RX_CAPTURE_U;
+		return true;
+	case P_REG_TX_TIMER_INC_PRE_L:
+		*high_addr = P_REG_TX_TIMER_INC_PRE_U;
+		return true;
+	case P_REG_RX_TIMER_INC_PRE_L:
+		*high_addr = P_REG_RX_TIMER_INC_PRE_U;
+		return true;
+	default:
+		return false;
+	}
+}
+
+/**
+ * ice_is_40b_phy_reg_e822 - Check if this is a 40bit PHY register
+ * @low_addr: the low address to check
+ * @high_addr: on return, contains the high address of the 40bit value
+ *
+ * Checks if the provided low address is one of the known 40bit PHY values
+ * split into two registers with the lower 8 bits in the low register and the
+ * upper 32 bits in the high register. If it is, return the appropriate high
+ * register offset to use.
+ */
+static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
+{
+	switch (low_addr) {
+	case P_REG_TIMETUS_L:
+		*high_addr = P_REG_TIMETUS_U;
+		return true;
+	case P_REG_PAR_RX_TUS_L:
+		*high_addr = P_REG_PAR_RX_TUS_U;
+		return true;
+	case P_REG_PAR_TX_TUS_L:
+		*high_addr = P_REG_PAR_TX_TUS_U;
+		return true;
+	case P_REG_PCS_RX_TUS_L:
+		*high_addr = P_REG_PCS_RX_TUS_U;
+		return true;
+	case P_REG_PCS_TX_TUS_L:
+		*high_addr = P_REG_PCS_TX_TUS_U;
+		return true;
+	case P_REG_DESK_PAR_RX_TUS_L:
+		*high_addr = P_REG_DESK_PAR_RX_TUS_U;
+		return true;
+	case P_REG_DESK_PAR_TX_TUS_L:
+		*high_addr = P_REG_DESK_PAR_TX_TUS_U;
+		return true;
+	case P_REG_DESK_PCS_RX_TUS_L:
+		*high_addr = P_REG_DESK_PCS_RX_TUS_U;
+		return true;
+	case P_REG_DESK_PCS_TX_TUS_L:
+		*high_addr = P_REG_DESK_PCS_TX_TUS_U;
+		return true;
+	default:
+		return false;
+	}
+}
+
+/**
+ * ice_read_phy_reg_e822_lp - Read a PHY register
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @offset: PHY register offset to read
+ * @val: on return, the contents read from the PHY
+ * @lock_sbq: true if the sideband queue lock must be acquired
+ *
+ * Read a PHY register for the given port over the device sideband queue.
+ */
+static enum ice_status
+ice_read_phy_reg_e822_lp(struct ice_hw *hw, u8 port, u16 offset, u32 *val,
+			 bool lock_sbq)
+{
+	struct ice_sbq_msg_input msg = {0};
+	enum ice_status status;
+
+	ice_fill_phy_msg_e822(&msg, port, offset);
+	msg.opcode = ice_sbq_msg_rd;
+
+	status = ice_sbq_rw_reg_lp(hw, &msg, lock_sbq);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to phy, status %d\n",
+			  status);
+		return status;
+	}
+
+	*val = msg.data;
+
+	return ICE_SUCCESS;
+}
+
+enum ice_status
+ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
+{
+	return ice_read_phy_reg_e822_lp(hw, port, offset, val, true);
+}
+
+/**
+ * ice_read_40b_phy_reg_e822 - Read a 40bit value from PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: on return, the contents of the 40bit value from the PHY registers
+ *
+ * Reads the two registers associated with a 40bit value and returns it in the
+ * val pointer. The offset always specifies the lower register offset to use.
+ * The high offset is looked up. This function only operates on registers
+ * known to be split into a lower 8 bit chunk and an upper 32 bit chunk.
+ */
+static enum ice_status
+ice_read_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
+{
+	enum ice_status status;
+	u32 low, high;
+	u16 high_addr;
+
+	/* Only operate on registers known to be split into two 32bit
+	 * registers.
+	 */
+	if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) {
+		ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
+			  low_addr);
+		return ICE_ERR_PARAM;
+	}
+
+	status = ice_read_phy_reg_e822(hw, port, low_addr, &low);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, status %d",
+			  low_addr, status);
+		return status;
+	}
+
+	status = ice_read_phy_reg_e822(hw, port, high_addr, &high);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, status %d",
+			  high_addr, status);
+		return status;
+	}
+
+	*val = (u64)high << P_REG_40B_HIGH_S | (low & P_REG_40B_LOW_M);
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_read_64b_phy_reg_e822 - Read a 64bit value from PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: on return, the contents of the 64bit value from the PHY registers
+ *
+ * Reads the two registers associated with a 64bit value and returns it in the
+ * val pointer. The offset always specifies the lower register offset to use.
+ * The high offset is looked up. This function only operates on registers
+ * known to be two parts of a 64bit value.
+ */
+static enum ice_status
+ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
+{
+	enum ice_status status;
+	u32 low, high;
+	u16 high_addr;
+
+	/* Only operate on registers known to be split into two 32bit
+	 * registers.
+	 */
+	if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
+		ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
+			  low_addr);
+		return ICE_ERR_PARAM;
+	}
+
+	status = ice_read_phy_reg_e822(hw, port, low_addr, &low);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, status %d",
+			  low_addr, status);
+		return status;
+	}
+
+	status = ice_read_phy_reg_e822(hw, port, high_addr, &high);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, status %d",
+			  high_addr, status);
+		return status;
+	}
+
+	*val = (u64)high << 32 | low;
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_write_phy_reg_e822_lp - Write a PHY register
+ * @hw: pointer to the HW struct
+ * @port: PHY port to write to
+ * @offset: PHY register offset to write
+ * @val: The value to write to the register
+ * @lock_sbq: true if the sideband queue lock must be acquired
+ *
+ * Write a PHY register for the given port over the device sideband queue.
+ */
+static enum ice_status
+ice_write_phy_reg_e822_lp(struct ice_hw *hw, u8 port, u16 offset, u32 val,
+			  bool lock_sbq)
+{
+	struct ice_sbq_msg_input msg = {0};
+	enum ice_status status;
+
+	ice_fill_phy_msg_e822(&msg, port, offset);
+	msg.opcode = ice_sbq_msg_wr;
+	msg.data = val;
+
+	status = ice_sbq_rw_reg_lp(hw, &msg, lock_sbq);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to phy, status %d\n",
+			  status);
+		return status;
+	}
+
+	return ICE_SUCCESS;
+}
+
+enum ice_status
+ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
+{
+	return ice_write_phy_reg_e822_lp(hw, port, offset, val, true);
+}
+
+/**
+ * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY
+ * @hw: pointer to the HW struct
+ * @port: port to write to
+ * @low_addr: offset of the low register
+ * @val: 40b value to write
+ *
+ * Write the provided 40b value to the two associated registers by splitting
+ * it up into two chunks, the lower 8 bits and the upper 32 bits.
+ */
+static enum ice_status
+ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
+{
+	enum ice_status status;
+	u32 low, high;
+	u16 high_addr;
+
+	/* Only operate on registers known to be split into a lower 8 bit
+	 * register and an upper 32 bit register.
+	 */
+	if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) {
+		ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n",
+			  low_addr);
+		return ICE_ERR_PARAM;
+	}
+
+	low = (u32)(val & P_REG_40B_LOW_M);
+	high = (u32)(val >> P_REG_40B_HIGH_S);
+
+	status = ice_write_phy_reg_e822(hw, port, low_addr, low);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, status %d",
+			  low_addr, status);
+		return status;
+	}
+
+	status = ice_write_phy_reg_e822(hw, port, high_addr, high);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, status %d",
+			  high_addr, status);
+		return status;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: the contents of the 64bit value to write to PHY
+ *
+ * Write the 64bit value to the two associated 32bit PHY registers. The offset
+ * is always specified as the lower register, and the high address is looked
+ * up. This function only operates on registers known to be two parts of
+ * a 64bit value.
+ */
+static enum ice_status
+ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
+{
+	enum ice_status status;
+	u32 low, high;
+	u16 high_addr;
+
+	/* Only operate on registers known to be split into two 32bit
+	 * registers.
+	 */
+	if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
+		ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
+			  low_addr);
+		return ICE_ERR_PARAM;
+	}
+
+	low = ICE_LO_DWORD(val);
+	high = ICE_HI_DWORD(val);
+
+	status = ice_write_phy_reg_e822(hw, port, low_addr, low);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, status %d",
+			  low_addr, status);
+		return status;
+	}
+
+	status = ice_write_phy_reg_e822(hw, port, high_addr, high);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, status %d",
+			  high_addr, status);
+		return status;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_fill_quad_msg_e822 - Fill message data for quad register access
+ * @msg: the PHY message buffer to fill in
+ * @quad: the quad to access
+ * @offset: the register offset
+ *
+ * Fill a message buffer for accessing a register in a quad shared between
+ * multiple PHYs.
+ */
+static void
+ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
+{
+	u32 addr;
+
+	msg->dest_dev = rmn_0;
+
+	if ((quad % ICE_NUM_QUAD_TYPE) == 0)
+		addr = Q_0_BASE + offset;
+	else
+		addr = Q_1_BASE + offset;
+
+	msg->msg_addr_low = ICE_LO_WORD(addr);
+	msg->msg_addr_high = ICE_HI_WORD(addr);
+}
+
+/**
+ * ice_read_quad_reg_e822_lp - Read a PHY quad register
+ * @hw: pointer to the HW struct
+ * @quad: quad to read from
+ * @offset: quad register offset to read
+ * @val: on return, the contents read from the quad
+ * @lock_sbq: true if the sideband queue lock must be acquired
+ *
+ * Read a quad register over the device sideband queue. Quad registers are
+ * shared between multiple PHYs.
+ */
+static enum ice_status
+ice_read_quad_reg_e822_lp(struct ice_hw *hw, u8 quad, u16 offset, u32 *val,
+			  bool lock_sbq)
+{
+	struct ice_sbq_msg_input msg = {0};
+	enum ice_status status;
+
+	if (quad >= ICE_MAX_QUAD)
+		return ICE_ERR_PARAM;
+
+	ice_fill_quad_msg_e822(&msg, quad, offset);
+	msg.opcode = ice_sbq_msg_rd;
+
+	status = ice_sbq_rw_reg_lp(hw, &msg, lock_sbq);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to phy, status %d\n",
+			  status);
+		return status;
+	}
+
+	*val = msg.data;
+
+	return ICE_SUCCESS;
+}
+
+enum ice_status
+ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
+{
+	return ice_read_quad_reg_e822_lp(hw, quad, offset, val, true);
+}
+
+/**
+ * ice_write_quad_reg_e822_lp - Write a PHY quad register
+ * @hw: pointer to the HW struct
+ * @quad: quad to write to
+ * @offset: quad register offset to write
+ * @val: The value to write to the register
+ * @lock_sbq: true if the sideband queue lock must be acquired
+ *
+ * Write a quad register over the device sideband queue. Quad registers are
+ * shared between multiple PHYs.
+ */
+static enum ice_status
+ice_write_quad_reg_e822_lp(struct ice_hw *hw, u8 quad, u16 offset, u32 val,
+			   bool lock_sbq)
+{
+	struct ice_sbq_msg_input msg = {0};
+	enum ice_status status;
+
+	if (quad >= ICE_MAX_QUAD)
+		return ICE_ERR_PARAM;
+
+	ice_fill_quad_msg_e822(&msg, quad, offset);
+	msg.opcode = ice_sbq_msg_wr;
+	msg.data = val;
+
+	status = ice_sbq_rw_reg_lp(hw, &msg, lock_sbq);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to phy, status %d\n",
+			  status);
+		return status;
+	}
+
+	return ICE_SUCCESS;
+}
+
+enum ice_status
+ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
+{
+	return ice_write_quad_reg_e822_lp(hw, quad, offset, val, true);
+}
+
+/**
+ * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block
+ * @hw: pointer to the HW struct
+ * @quad: the quad to read from
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the two associated registers in the
+ * quad memory block that is shared between the internal PHYs of the E822
+ * family of devices.
+ */
+static enum ice_status
+ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
+{
+	enum ice_status status;
+	u16 lo_addr, hi_addr;
+	u32 lo, hi;
+
+	lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
+	hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
+
+	status = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, status %d\n",
+			  status);
+		return status;
+	}
+
+	status = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, status %d\n",
+			  status);
+		return status;
+	}
+
+	/* For E822 based internal PHYs, the timestamp is reported with the
+	 * lower 8 bits in the low register, and the upper 32 bits in the high
+	 * register.
+	 */
+	*tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block
+ * @hw: pointer to the HW struct
+ * @quad: the quad to read from
+ * @idx: the timestamp index to reset
+ *
+ * Clear a timestamp, resetting its valid bit, from the PHY quad block that is
+ * shared between the internal PHYs on the E822 devices.
+ */
+static enum ice_status
+ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
+{
+	enum ice_status status;
+	u16 lo_addr, hi_addr;
+
+	lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
+	hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
+
+	status = ice_write_quad_reg_e822(hw, quad, lo_addr, 0);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, status %d\n",
+			  status);
+		return status;
+	}
+
+	status = ice_write_quad_reg_e822(hw, quad, hi_addr, 0);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, status %d\n",
+			  status);
+		return status;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time
+ * @hw: pointer to the HW struct
+ * @time: Time to initialize the PHY port clocks to
+ *
+ * Program the PHY port registers with a new initial time value. The port
+ * clock will be initialized once the driver issues an INIT_TIME sync
+ * command. The time value is the upper 32 bits of the PHY timer, usually in
+ * units of nominal nanoseconds.
+ */
+static enum ice_status
+ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time)
+{
+	enum ice_status status;
+	u64 phy_time;
+	u8 port;
+
+	/* The time represents the upper 32 bits of the PHY timer, so we need
+	 * to shift to account for this when programming.
+	 */
+	phy_time = (u64)time << 32;
+
+	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+
+		/* Tx case */
+		status = ice_write_64b_phy_reg_e822(hw, port,
+						    P_REG_TX_TIMER_INC_PRE_L,
+						    phy_time);
+		if (status)
+			goto exit_err;
+
+		/* Rx case */
+		status = ice_write_64b_phy_reg_e822(hw, port,
+						    P_REG_RX_TIMER_INC_PRE_L,
+						    phy_time);
+		if (status)
+			goto exit_err;
+	}
+
+	return ICE_SUCCESS;
+
+exit_err:
+	ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, status %d\n",
+		  port, status);
+
+	return status;
+}
+
+/**
+ * ice_ptp_prep_port_adj_e822 - Prepare a single port for time adjust
+ * @hw: pointer to HW struct
+ * @port: Port number to be programmed
+ * @time: time in cycles to adjust the port Tx and Rx clocks
+ * @lock_sbq: true to lock the sbq sq_lock (the usual case); false if the
+ *            sq_lock has already been locked at a higher level
+ *
+ * Program the port for an atomic adjustment by writing the Tx and Rx timer
+ * registers. The atomic adjustment won't be completed until the driver issues
+ * an ADJ_TIME command.
+ *
+ * Note that time is not in units of nanoseconds. It is in clock time
+ * including the lower sub-nanosecond portion of the port timer.
+ *
+ * Negative adjustments are supported using 2s complement arithmetic.
+ */
+enum ice_status
+ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time,
+			   bool lock_sbq)
+{
+	enum ice_status status;
+	u32 l_time, u_time;
+
+	l_time = ICE_LO_DWORD(time);
+	u_time = ICE_HI_DWORD(time);
+
+	/* Tx case */
+	status = ice_write_phy_reg_e822_lp(hw, port, P_REG_TX_TIMER_INC_PRE_L,
+					   l_time, lock_sbq);
+	if (status)
+		goto exit_err;
+
+	status = ice_write_phy_reg_e822_lp(hw, port, P_REG_TX_TIMER_INC_PRE_U,
+					   u_time, lock_sbq);
+	if (status)
+		goto exit_err;
+
+	/* Rx case */
+	status = ice_write_phy_reg_e822_lp(hw, port, P_REG_RX_TIMER_INC_PRE_L,
+					   l_time, lock_sbq);
+	if (status)
+		goto exit_err;
+
+	status = ice_write_phy_reg_e822_lp(hw, port, P_REG_RX_TIMER_INC_PRE_U,
+					   u_time, lock_sbq);
+	if (status)
+		goto exit_err;
+
+	return ICE_SUCCESS;
+
+exit_err:
+	ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, status %d\n",
+		  port, status);
+	return status;
+}
+
+/**
+ * ice_ptp_prep_phy_adj_e822 - Prep PHY ports for a time adjustment
+ * @hw: pointer to HW struct
+ * @adj: adjustment in nanoseconds
+ * @lock_sbq: true to lock the sbq sq_lock (the usual case); false if the
+ *            sq_lock has already been locked at a higher level
+ *
+ * Prepare the PHY ports for an atomic time adjustment by programming the PHY
+ * Tx and Rx port registers. The actual adjustment is completed by issuing an
+ * ADJ_TIME or ADJ_TIME_AT_TIME sync command.
+ */
+static enum ice_status
+ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj, bool lock_sbq)
+{
+	s64 cycles;
+	u8 port;
+
+	/* The port clock supports adjustment of the sub-nanosecond portion of
+	 * the clock. We shift the provided adjustment in nanoseconds to
+	 * calculate the appropriate adjustment to program into the PHY ports.
+	 */
+	if (adj > 0)
+		cycles = (s64)adj << 32;
+	else
+		cycles = -(((s64)-adj) << 32);
+
+	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+		enum ice_status status;
+
+		status = ice_ptp_prep_port_adj_e822(hw, port, cycles,
+						    lock_sbq);
+		if (status)
+			return status;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptp_prep_phy_incval_e822 - Prepare PHY ports for time adjustment
+ * @hw: pointer to HW struct
+ * @incval: new increment value to prepare
+ *
+ * Prepare each of the PHY ports for a new increment value by programming the
+ * port's TIMETUS registers. The new increment value will be updated after
+ * issuing an INIT_INCVAL command.
+ */
+static enum ice_status
+ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval)
+{
+	enum ice_status status;
+	u8 port;
+
+	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+		status = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L,
+						    incval);
+		if (status)
+			goto exit_err;
+	}
+
+	return ICE_SUCCESS;
+
+exit_err:
+	ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, status %d\n",
+		  port, status);
+
+	return status;
+}
+
+/**
+ * ice_ptp_read_phy_incval_e822 - Read a PHY port's current incval
+ * @hw: pointer to the HW struct
+ * @port: the port to read
+ * @incval: on return, the time_clk_cyc incval for this port
+ *
+ * Read the time_clk_cyc increment value for a given PHY port.
+ */
+enum ice_status
+ice_ptp_read_phy_incval_e822(struct ice_hw *hw, u8 port, u64 *incval)
+{
+	enum ice_status status;
+
+	status = ice_read_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to read TIMETUS_L, status %d\n",
+			  status);
+		return status;
+	}
+
+	ice_debug(hw, ICE_DBG_PTP, "read INCVAL = 0x%016llx\n",
+		  (unsigned long long)*incval);
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptp_prep_phy_adj_target_e822 - Prepare PHY for adjust at target time
+ * @hw: pointer to HW struct
+ * @target_time: target time to program
+ *
+ * Program the PHY port Tx and Rx TIMER_CNT_ADJ registers used for the
+ * ADJ_TIME_AT_TIME command. This should be used in conjunction with
+ * ice_ptp_prep_phy_adj_e822 to program an atomic adjustment that is
+ * delayed until a specified target time.
+ *
+ * Note that a target time adjustment is not currently supported on E810
+ * devices.
+ */
+static enum ice_status
+ice_ptp_prep_phy_adj_target_e822(struct ice_hw *hw, u32 target_time)
+{
+	enum ice_status status;
+	u8 port;
+
+	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+
+		/* Tx case */
+		/* No sub-nanoseconds data */
+		status = ice_write_phy_reg_e822_lp(hw, port,
+						   P_REG_TX_TIMER_CNT_ADJ_L,
+						   0, true);
+		if (status)
+			goto exit_err;
+
+		status = ice_write_phy_reg_e822_lp(hw, port,
+						   P_REG_TX_TIMER_CNT_ADJ_U,
+						   target_time, true);
+		if (status)
+			goto exit_err;
+
+		/* Rx case */
+		/* No sub-nanoseconds data */
+		status = ice_write_phy_reg_e822_lp(hw, port,
+						   P_REG_RX_TIMER_CNT_ADJ_L,
+						   0, true);
+		if (status)
+			goto exit_err;
+
+		status = ice_write_phy_reg_e822_lp(hw, port,
+						   P_REG_RX_TIMER_CNT_ADJ_U,
+						   target_time, true);
+		if (status)
+			goto exit_err;
+	}
+
+	return ICE_SUCCESS;
+
+exit_err:
+	ice_debug(hw, ICE_DBG_PTP, "Failed to write target time for port %u, status %d\n",
+		  port, status);
+
+	return status;
+}
+
+/**
+ * ice_ptp_read_port_capture - Read a port's local time capture
+ * @hw: pointer to HW struct
+ * @port: Port number to read
+ * @tx_ts: on return, the Tx port time capture
+ * @rx_ts: on return, the Rx port time capture
+ *
+ * Read the port's Tx and Rx local time capture values.
+ *
+ * Note this has no equivalent for the E810 devices.
+ */
+enum ice_status
+ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
+{
+	enum ice_status status;
+
+	/* Tx case */
+	status = ice_read_64b_phy_reg_e822(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, status %d\n",
+			  status);
+		return status;
+	}
+
+	ice_debug(hw, ICE_DBG_PTP, "tx_init = 0x%016llx\n",
+		  (unsigned long long)*tx_ts);
+
+	/* Rx case */
+	status = ice_read_64b_phy_reg_e822(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, status %d\n",
+			  status);
+		return status;
+	}
+
+	ice_debug(hw, ICE_DBG_PTP, "rx_init = 0x%016llx\n",
+		  (unsigned long long)*rx_ts);
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptp_one_port_cmd - Prepare a single PHY port for a timer command
+ * @hw: pointer to HW struct
+ * @port: Port to which cmd has to be sent
+ * @cmd: Command to be sent to the port
+ * @lock_sbq: true if the sideband queue lock must be acquired
+ *
+ * Prepare the requested port for an upcoming timer sync command.
+ *
+ * Note there is no equivalent of this operation on E810, as that device
+ * always handles all external PHYs internally.
+ */
+enum ice_status
+ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd,
+		     bool lock_sbq)
+{
+	enum ice_status status;
+	u32 cmd_val, val;
+	u8 tmr_idx;
+
+	tmr_idx = ice_get_ptp_src_clock_index(hw);
+	cmd_val = tmr_idx << SEL_PHY_SRC;
+	switch (cmd) {
+	case INIT_TIME:
+		cmd_val |= PHY_CMD_INIT_TIME;
+		break;
+	case INIT_INCVAL:
+		cmd_val |= PHY_CMD_INIT_INCVAL;
+		break;
+	case ADJ_TIME:
+		cmd_val |= PHY_CMD_ADJ_TIME;
+		break;
+	case ADJ_TIME_AT_TIME:
+		cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
+		break;
+	case READ_TIME:
+		cmd_val |= PHY_CMD_READ_TIME;
+		break;
+	default:
+		ice_warn(hw, "Unknown timer command %u\n", cmd);
+		return ICE_ERR_PARAM;
+	}
+
+	/* Tx case */
+	/* Read, modify, write */
+	status = ice_read_phy_reg_e822_lp(hw, port, P_REG_TX_TMR_CMD, &val,
+					  lock_sbq);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, status %d\n",
+			  status);
+		return status;
+	}
+
+	/* Modify necessary bits only and perform write */
+	val &= ~TS_CMD_MASK;
+	val |= cmd_val;
+
+	status = ice_write_phy_reg_e822_lp(hw, port, P_REG_TX_TMR_CMD, val,
+					   lock_sbq);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, status %d\n",
+			  status);
+		return status;
+	}
+
+	/* Rx case */
+	/* Read, modify, write */
+	status = ice_read_phy_reg_e822_lp(hw, port, P_REG_RX_TMR_CMD, &val,
+					  lock_sbq);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, status %d\n",
+			  status);
+		return status;
+	}
+
+	/* Modify necessary bits only and perform write */
+	val &= ~TS_CMD_MASK;
+	val |= cmd_val;
+
+	status = ice_write_phy_reg_e822_lp(hw, port, P_REG_RX_TMR_CMD, val,
+					   lock_sbq);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, status %d\n",
+			  status);
+		return status;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command
+ * @hw: pointer to the HW struct
+ * @cmd: timer command to prepare
+ * @lock_sbq: true if the sideband queue lock must  be acquired
+ *
+ * Prepare all ports connected to this device for an upcoming timer sync
+ * command.
+ */
+static enum ice_status
+ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd,
+		      bool lock_sbq)
+{
+	u8 port;
+
+	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+		enum ice_status status;
+
+		status = ice_ptp_one_port_cmd(hw, port, cmd, lock_sbq);
+		if (status)
+			return status;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/* E822 Vernier calibration functions
+ *
+ * The following functions are used as part of the vernier calibration of
+ * a port. This calibration increases the precision of the timestamps on the
+ * port.
+ */
+
+/**
+ * ice_ptp_set_vernier_wl - Set the window length for vernier calibration
+ * @hw: pointer to the HW struct
+ *
+ * Set the window length used for the vernier port calibration process.
+ */
+enum ice_status ice_ptp_set_vernier_wl(struct ice_hw *hw)
+{
+	u8 port;
+
+	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+		enum ice_status status;
+
+		status = ice_write_phy_reg_e822_lp(hw, port, P_REG_WL,
+						   PTP_VERNIER_WL, true);
+		if (status) {
+			ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, status %d\n",
+				  port, status);
+			return status;
+		}
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode
+ * @hw: pointer to HW struct
+ * @port: the port to read from
+ * @link_out: if non-NULL, holds link speed on success
+ * @fec_out: if non-NULL, holds FEC algorithm on success
+ *
+ * Read the serdes data for the PHY port and extract the link speed and FEC
+ * algorithm.
+ */
+enum ice_status
+ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
+			       enum ice_ptp_link_spd *link_out,
+			       enum ice_ptp_fec_mode *fec_out)
+{
+	enum ice_ptp_link_spd link;
+	enum ice_ptp_fec_mode fec;
+	enum ice_status status;
+	u32 serdes;
+
+	status = ice_read_phy_reg_e822(hw, port, P_REG_LINK_SPEED, &serdes);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n");
+		return status;
+	}
+
+	/* Determine the FEC algorithm */
+	fec = (enum ice_ptp_fec_mode)P_REG_LINK_SPEED_FEC_MODE(serdes);
+
+	serdes &= P_REG_LINK_SPEED_SERDES_M;
+
+	/* Determine the link speed */
+	if (fec == ICE_PTP_FEC_MODE_RS_FEC) {
+		switch (serdes) {
+		case ICE_PTP_SERDES_25G:
+			link = ICE_PTP_LNK_SPD_25G_RS;
+			break;
+		case ICE_PTP_SERDES_50G:
+			link = ICE_PTP_LNK_SPD_50G_RS;
+			break;
+		case ICE_PTP_SERDES_100G:
+			link = ICE_PTP_LNK_SPD_100G_RS;
+			break;
+		default:
+			return ICE_ERR_OUT_OF_RANGE;
+		}
+	} else {
+		switch (serdes) {
+		case ICE_PTP_SERDES_1G:
+			link = ICE_PTP_LNK_SPD_1G;
+			break;
+		case ICE_PTP_SERDES_10G:
+			link = ICE_PTP_LNK_SPD_10G;
+			break;
+		case ICE_PTP_SERDES_25G:
+			link = ICE_PTP_LNK_SPD_25G;
+			break;
+		case ICE_PTP_SERDES_40G:
+			link = ICE_PTP_LNK_SPD_40G;
+			break;
+		case ICE_PTP_SERDES_50G:
+			link = ICE_PTP_LNK_SPD_50G;
+			break;
+		default:
+			return ICE_ERR_OUT_OF_RANGE;
+		}
+	}
+
+	if (link_out)
+		*link_out = link;
+	if (fec_out)
+		*fec_out = fec;
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_phy_cfg_lane_e822 - Configure PHY quad for single/multi-lane timestamp
+ * @hw: pointer to HW struct
+ * @port: to configure the quad for
+ */
+void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
+{
+	enum ice_ptp_link_spd link_spd;
+	enum ice_status status;
+	u32 val;
+	u8 quad;
+
+	status = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, status %d\n",
+			  status);
+		return;
+	}
+
+	quad = port / ICE_PORTS_PER_QUAD;
+
+	status = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, status %d\n",
+			  status);
+		return;
+	}
+
+	if (link_spd >= ICE_PTP_LNK_SPD_40G)
+		val &= ~Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
+	else
+		val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
+
+	status = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, status %d\n",
+			  status);
+		return;
+	}
+}
+
+/* E810 functions
+ *
+ * The following functions operate on the E810 series devices which use
+ * a separate external PHY.
+ */
+
+/**
+ * ice_read_phy_reg_e810_lp - Read register from external PHY on E810
+ * @hw: pointer to the HW struct
+ * @addr: the address to read from
+ * @val: On return, the value read from the PHY
+ * @lock_sbq: true if the sideband queue lock must be acquired
+ *
+ * Read a register from the external PHY on the E810 device.
+ */
+static enum ice_status
+ice_read_phy_reg_e810_lp(struct ice_hw *hw, u32 addr, u32 *val, bool lock_sbq)
+{
+	struct ice_sbq_msg_input msg = {0};
+	enum ice_status status;
+
+	msg.msg_addr_low = ICE_LO_WORD(addr);
+	msg.msg_addr_high = ICE_HI_WORD(addr);
+	msg.opcode = ice_sbq_msg_rd;
+	msg.dest_dev = rmn_0;
+
+	status = ice_sbq_rw_reg_lp(hw, &msg, lock_sbq);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to phy, status %d\n",
+			  status);
+		return status;
+	}
+
+	*val = msg.data;
+
+	return ICE_SUCCESS;
+}
+
+static enum ice_status
+ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
+{
+	return ice_read_phy_reg_e810_lp(hw, addr, val, true);
+}
+
+/**
+ * ice_write_phy_reg_e810_lp - Write register on external PHY on E810
+ * @hw: pointer to the HW struct
+ * @addr: the address to writem to
+ * @val: the value to write to the PHY
+ * @lock_sbq: true if the sideband queue lock must be acquired
+ *
+ * Write a value to a register of the external PHY on the E810 device.
+ */
+static enum ice_status
+ice_write_phy_reg_e810_lp(struct ice_hw *hw, u32 addr, u32 val, bool lock_sbq)
+{
+	struct ice_sbq_msg_input msg = {0};
+	enum ice_status status;
+
+	msg.msg_addr_low = ICE_LO_WORD(addr);
+	msg.msg_addr_high = ICE_HI_WORD(addr);
+	msg.opcode = ice_sbq_msg_wr;
+	msg.dest_dev = rmn_0;
+	msg.data = val;
+
+	status = ice_sbq_rw_reg_lp(hw, &msg, lock_sbq);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to phy, status %d\n",
+			  status);
+		return status;
+	}
+
+	return ICE_SUCCESS;
+}
+
+static enum ice_status
+ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
+{
+	return ice_write_phy_reg_e810_lp(hw, addr, val, true);
+}
+
+/**
+ * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
+ * @hw: pointer to the HW struct
+ * @lport: the lport to read from
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the timestamp block of the external PHY
+ * on the E810 device.
+ */
+static enum ice_status
+ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
+{
+	enum ice_status status;
+	u32 lo_addr, hi_addr, lo, hi;
+
+	lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
+	hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
+
+	status = ice_read_phy_reg_e810(hw, lo_addr, &lo);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, status %d\n",
+			  status);
+		return status;
+	}
+
+	status = ice_read_phy_reg_e810(hw, hi_addr, &hi);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, status %d\n",
+			  status);
+		return status;
+	}
+
+	/* For E810 devices, the timestamp is reported with the lower 32 bits
+	 * in the low register, and the upper 8 bits in the high register.
+	 */
+	*tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M);
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_clear_phy_tstamp_e810 - Clear a timestamp from the external PHY
+ * @hw: pointer to the HW struct
+ * @lport: the lport to read from
+ * @idx: the timestamp index to reset
+ *
+ * Clear a timestamp, resetting its valid bit, from the timestamp block of the
+ * external PHY on the E810 device.
+ */
+static enum ice_status
+ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
+{
+	enum ice_status status;
+	u32 lo_addr, hi_addr;
+
+	lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
+	hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
+
+	status = ice_write_phy_reg_e810(hw, lo_addr, 0);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, status %d\n",
+			  status);
+		return status;
+	}
+
+	status = ice_write_phy_reg_e810(hw, hi_addr, 0);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, status %d\n",
+			  status);
+		return status;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY
+ * @hw: pointer to HW struct
+ *
+ * Enable the timesync PTP functionality for the external PHY connected to
+ * this function.
+ *
+ * Note there is no equivalent function needed on E822 based devices.
+ */
+enum ice_status ice_ptp_init_phy_e810(struct ice_hw *hw)
+{
+	enum ice_status status;
+	u8 tmr_idx;
+
+	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+	status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
+					GLTSYN_ENA_TSYN_ENA_M);
+	if (status)
+		ice_debug(hw, ICE_DBG_PTP, "PTP failed in ena_phy_time_syn %d\n",
+			  status);
+
+	return status;
+}
+
+/**
+ * ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time
+ * @hw: Board private structure
+ * @time: Time to initialize the PHY port clock to
+ *
+ * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
+ * initial clock time. The time will not actually be programmed until the
+ * driver issues an INIT_TIME command.
+ *
+ * The time value is the upper 32 bits of the PHY timer, usually in units of
+ * nominal nanoseconds.
+ */
+static enum ice_status ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
+{
+	enum ice_status status;
+	u8 tmr_idx;
+
+	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+	status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, status %d\n",
+			  status);
+		return status;
+	}
+
+	status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, status %d\n",
+			  status);
+		return status;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment
+ * @hw: pointer to HW struct
+ * @adj: adjustment value to program
+ * @lock_sbq: true if the sideband queue luck must be acquired
+ *
+ * Prepare the PHY port for an atomic adjustment by programming the PHY
+ * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment
+ * is completed by issuing an ADJ_TIME sync command.
+ *
+ * The adjustment value only contains the portion used for the upper 32bits of
+ * the PHY timer, usually in units of nominal nanoseconds. Negative
+ * adjustments are supported using 2s complement arithmetic.
+ */
+static enum ice_status
+ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj, bool lock_sbq)
+{
+	enum ice_status status;
+	u8 tmr_idx;
+
+	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+	/* Adjustments are represented as signed 2's complement values in
+	 * nanoseconds. Sub-nanosecond adjustment is not supported.
+	 */
+	status = ice_write_phy_reg_e810_lp(hw, ETH_GLTSYN_SHADJ_L(tmr_idx),
+					   0, lock_sbq);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, status %d\n",
+			  status);
+		return status;
+	}
+
+	status = ice_write_phy_reg_e810_lp(hw, ETH_GLTSYN_SHADJ_H(tmr_idx),
+					   adj, lock_sbq);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, status %d\n",
+			  status);
+		return status;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change
+ * @hw: pointer to HW struct
+ * @incval: The new 40bit increment value to prepare
+ *
+ * Prepare the PHY port for a new increment value by programming the PHY
+ * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is
+ * completed by issuing an INIT_INCVAL command.
+ */
+static enum ice_status
+ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
+{
+	enum ice_status status;
+	u32 high, low;
+	u8 tmr_idx;
+
+	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+	low = ICE_LO_DWORD(incval);
+	high = ICE_HI_DWORD(incval);
+
+	status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, status %d\n",
+			  status);
+		return status;
+	}
+
+	status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, status %d\n",
+			  status);
+		return status;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptp_prep_phy_adj_target_e810 - Prepare PHY port with adjust target
+ * @hw: Board private structure
+ * @target_time: Time to trigger the clock adjustment at
+ *
+ * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation for
+ * a target time adjust, which will trigger an adjustment of the clock in the
+ * future. The actual adjustment will occur the next time the PHY port timer
+ * crosses over the provided value after the driver issues an ADJ_TIME_AT_TIME
+ * command.
+ *
+ * The time value is the upper 32 bits of the PHY timer, usually in units of
+ * nominal nanoseconds.
+ */
+static enum ice_status
+ice_ptp_prep_phy_adj_target_e810(struct ice_hw *hw, u32 target_time)
+{
+	enum ice_status status;
+	u8 tmr_idx;
+
+	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+	status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to write target time to SHTIME_0, status %d\n",
+			  status);
+		return status;
+	}
+
+	status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx),
+					target_time);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to write target time to SHTIME_L, status %d\n",
+			  status);
+		return status;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptp_port_cmd_e810 - Prepare all external PHYs for a timer command
+ * @hw: pointer to HW struct
+ * @cmd: Command to be sent to the port
+ * @lock_sbq: true if the sideband queue lock must be acquired
+ *
+ * Prepare the external PHYs connected to this device for a timer sync
+ * command.
+ */
+static enum ice_status
+ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd,
+		      bool lock_sbq)
+{
+	enum ice_status status;
+	u32 cmd_val, val;
+
+	switch (cmd) {
+	case INIT_TIME:
+		cmd_val = GLTSYN_CMD_INIT_TIME;
+		break;
+	case INIT_INCVAL:
+		cmd_val = GLTSYN_CMD_INIT_INCVAL;
+		break;
+	case ADJ_TIME:
+		cmd_val = GLTSYN_CMD_ADJ_TIME;
+		break;
+	case ADJ_TIME_AT_TIME:
+		cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
+		break;
+	case READ_TIME:
+		cmd_val = GLTSYN_CMD_READ_TIME;
+		break;
+	default:
+		ice_warn(hw, "Unknown timer command %u\n", cmd);
+		return ICE_ERR_PARAM;
+	}
+
+	/* Read, modify, write */
+	status = ice_read_phy_reg_e810_lp(hw, ETH_GLTSYN_CMD, &val, lock_sbq);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, status %d\n",
+			  status);
+		return status;
+	}
+
+	/* Modify necessary bits only and perform write */
+	val &= ~TS_CMD_MASK_E810;
+	val |= cmd_val;
+
+	status = ice_write_phy_reg_e810_lp(hw, ETH_GLTSYN_CMD, val, lock_sbq);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, status %d\n",
+			  status);
+		return status;
+	}
+
+	return ICE_SUCCESS;
+}
+
+/* Device agnostic functions
+ *
+ * The following functions implement shared behavior common to both E822 and
+ * E810 devices, possibly calling a device specific implementation where
+ * necessary.
+ */
+
+/**
+ * ice_ptp_lock - Acquire PTP global semaphore register lock
+ * @hw: pointer to the HW struct
+ *
+ * Acquire the global PTP hardware semaphore lock. Returns true if the lock
+ * was acquired, false otherwise.
+ *
+ * The PFTSYN_SEM register sets the busy bit on read, returning the previous
+ * value. If software sees the busy bit cleared, this means that this function
+ * acquired the lock (and the busy bit is now set). If software sees the busy
+ * bit set, it means that another function acquired the lock.
+ *
+ * Software must clear the busy bit with a write to release the lock for other
+ * functions when done.
+ */
+bool ice_ptp_lock(struct ice_hw *hw)
+{
+	u32 hw_lock;
+	int i;
+
+#define MAX_TRIES 5
+
+	for (i = 0; i < MAX_TRIES; i++) {
+		hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
+		hw_lock = hw_lock & PFTSYN_SEM_BUSY_M;
+		if (hw_lock) {
+			/* Somebody is holding the lock */
+			ice_msec_delay(10, true);
+			continue;
+		} else {
+			break;
+		}
+	}
+
+	return !hw_lock;
+}
+
+/**
+ * ice_ptp_unlock - Release PTP global semaphore register lock
+ * @hw: pointer to the HW struct
+ *
+ * Release the global PTP hardware semaphore lock. This is done by writing to
+ * the PFTSYN_SEM register.
+ */
+void ice_ptp_unlock(struct ice_hw *hw)
+{
+	wr32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), 0);
+}
+
+/**
+ * ice_ptp_src_cmd - Prepare source timer for a timer command
+ * @hw: pointer to HW structure
+ * @cmd: Timer command
+ *
+ * Prepare the source timer for an upcoming timer sync command.
+ */
+void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+	u32 cmd_val;
+	u8 tmr_idx;
+
+	tmr_idx = ice_get_ptp_src_clock_index(hw);
+	cmd_val = tmr_idx << SEL_CPK_SRC;
+
+	switch (cmd) {
+	case INIT_TIME:
+		cmd_val |= GLTSYN_CMD_INIT_TIME;
+		break;
+	case INIT_INCVAL:
+		cmd_val |= GLTSYN_CMD_INIT_INCVAL;
+		break;
+	case ADJ_TIME:
+		cmd_val |= GLTSYN_CMD_ADJ_TIME;
+		break;
+	case ADJ_TIME_AT_TIME:
+		cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
+		break;
+	case READ_TIME:
+		cmd_val |= GLTSYN_CMD_READ_TIME;
+		break;
+	default:
+		ice_warn(hw, "Unknown timer command %u\n", cmd);
+		return;
+	}
+
+	wr32(hw, GLTSYN_CMD, cmd_val);
+}
+
+/**
+ * ice_ptp_tmr_cmd - Prepare and trigger a timer sync command
+ * @hw: pointer to HW struct
+ * @cmd: the command to issue
+ * @lock_sbq: true if the sideband queue lock must be acquired
+ *
+ * Prepare the source timer and PHY timers and then trigger the requested
+ * command. This causes the shadow registers previously written in preparation
+ * for the command to be synchronously applied to both the source and PHY
+ * timers.
+ */
+static enum ice_status
+ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd, bool lock_sbq)
+{
+	enum ice_status status;
+
+	/* First, prepare the source timer */
+	ice_ptp_src_cmd(hw, cmd);
+
+	/* Next, prepare the ports */
+	if (ice_is_e810(hw))
+		status = ice_ptp_port_cmd_e810(hw, cmd, lock_sbq);
+	else
+		status = ice_ptp_port_cmd_e822(hw, cmd, lock_sbq);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, status %d\n",
+			  cmd, status);
+		return status;
+	}
+
+	/* Write the sync command register to drive both source and PHY timer
+	 * commands synchronously
+	 */
+	ice_ptp_exec_tmr_cmd(hw);
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptp_init_time - Initialize device time to provided value
+ * @hw: pointer to HW struct
+ * @time: 64bits of time (GLTSYN_TIME_L and GLTSYN_TIME_H)
+ *
+ * Initialize the device to the specified time provided. This requires a three
+ * step process:
+ *
+ * 1) write the new init time to the source timer shadow registers
+ * 2) write the new init time to the phy timer shadow registers
+ * 3) issue an init_time timer command to synchronously switch both the source
+ *    and port timers to the new init time value at the next clock cycle.
+ */
+enum ice_status ice_ptp_init_time(struct ice_hw *hw, u64 time)
+{
+	enum ice_status status;
+	u8 tmr_idx;
+
+	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+	/* Source timers */
+	wr32(hw, GLTSYN_SHTIME_L(tmr_idx), ICE_LO_DWORD(time));
+	wr32(hw, GLTSYN_SHTIME_H(tmr_idx), ICE_HI_DWORD(time));
+	wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0);
+
+	/* PHY Clks */
+	/* Fill Rx and Tx ports and send msg to PHY */
+	if (ice_is_e810(hw))
+		status = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
+	else
+		status = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF);
+	if (status)
+		return status;
+
+	return ice_ptp_tmr_cmd(hw, INIT_TIME, true);
+}
+
+/**
+ * ice_ptp_write_incval - Program PHC with new increment value
+ * @hw: pointer to HW struct
+ * @incval: Source timer increment value per clock cycle
+ *
+ * Program the PHC with a new increment value. This requires a three-step
+ * process:
+ *
+ * 1) Write the increment value to the source timer shadow registers
+ * 2) Write the increment value to the PHY timer shadow registers
+ * 3) Issue an INIT_INCVAL timer command to synchronously switch both the
+ *    source and port timers to the new increment value at the next clock
+ *    cycle.
+ */
+enum ice_status ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
+{
+	enum ice_status status;
+	u8 tmr_idx;
+
+	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+	/* Shadow Adjust */
+	wr32(hw, GLTSYN_SHADJ_L(tmr_idx), ICE_LO_DWORD(incval));
+	wr32(hw, GLTSYN_SHADJ_H(tmr_idx), ICE_HI_DWORD(incval));
+
+	if (ice_is_e810(hw))
+		status = ice_ptp_prep_phy_incval_e810(hw, incval);
+	else
+		status = ice_ptp_prep_phy_incval_e822(hw, incval);
+	if (status)
+		return status;
+
+	return ice_ptp_tmr_cmd(hw, INIT_INCVAL, true);
+}
+
+/**
+ * ice_ptp_write_incval_locked - Program new incval while holding semaphore
+ * @hw: pointer to HW struct
+ * @incval: Source timer increment value per clock cycle
+ *
+ * Program a new PHC incval while holding the PTP semaphore.
+ */
+enum ice_status ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
+{
+	enum ice_status status;
+
+	if (!ice_ptp_lock(hw))
+		return ICE_ERR_NOT_READY;
+
+	status = ice_ptp_write_incval(hw, incval);
+
+	ice_ptp_unlock(hw);
+
+	return status;
+}
+
+/**
+ * ice_ptp_adj_clock - Adjust PHC clock time atomically
+ * @hw: pointer to HW struct
+ * @adj: Adjustment in nanoseconds
+ * @lock_sbq: true to lock the sbq sq_lock (the usual case); false if the
+ *            sq_lock has already been locked at a higher level
+ *
+ * Perform an atomic adjustment of the PHC time by the specified number of
+ * nanoseconds. This requires a three-step process:
+ *
+ * 1) Write the adjustment to the source timer shadow registers
+ * 2) Write the adjustment to the PHY timer shadow registers
+ * 3) Issue an ADJ_TIME timer command to synchronously apply the adjustment to
+ *    both the source and port timers at the next clock cycle.
+ */
+enum ice_status ice_ptp_adj_clock(struct ice_hw *hw, s32 adj, bool lock_sbq)
+{
+	enum ice_status status;
+	u8 tmr_idx;
+
+	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+	/* Write the desired clock adjustment into the GLTSYN_SHADJ register.
+	 * For an ADJ_TIME command, this set of registers represents the value
+	 * to add to the clock time. It supports subtraction by interpreting
+	 * the value as a 2's complement integer.
+	 */
+	wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
+	wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
+
+	if (ice_is_e810(hw))
+		status = ice_ptp_prep_phy_adj_e810(hw, adj, lock_sbq);
+	else
+		status = ice_ptp_prep_phy_adj_e822(hw, adj, lock_sbq);
+	if (status)
+		return status;
+
+	return ice_ptp_tmr_cmd(hw, ADJ_TIME, lock_sbq);
+}
+
+/**
+ * ice_ptp_adj_clock_at_time - Adjust PHC atomically at specified time
+ * @hw: pointer to HW struct
+ * @at_time: Time in nanoseconds at which to perform the adjustment
+ * @adj: Adjustment in nanoseconds
+ *
+ * Perform an atomic adjustment to the PHC clock at the specified time. This
+ * requires a five-step process:
+ *
+ * 1) Write the adjustment to the source timer shadow adjust registers
+ * 2) Write the target time to the source timer shadow time registers
+ * 3) Write the adjustment to the PHY timers shadow adjust registers
+ * 4) Write the target time to the PHY timers shadow adjust registers
+ * 5) Issue an ADJ_TIME_AT_TIME command to initiate the atomic adjustment.
+ */
+enum ice_status
+ice_ptp_adj_clock_at_time(struct ice_hw *hw, u64 at_time, s32 adj)
+{
+	enum ice_status status;
+	u32 time_lo, time_hi;
+	u8 tmr_idx;
+
+	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+	time_lo = ICE_LO_DWORD(at_time);
+	time_hi = ICE_HI_DWORD(at_time);
+
+	/* Write the desired clock adjustment into the GLTSYN_SHADJ register.
+	 * For an ADJ_TIME_AT_TIME command, this set of registers represents
+	 * the value to add to the clock time. It supports subtraction by
+	 * interpreting the value as a 2's complement integer.
+	 */
+	wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
+	wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
+
+	/* Write the target time to trigger the adjustment for source clock */
+	wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0);
+	wr32(hw, GLTSYN_SHTIME_L(tmr_idx), time_lo);
+	wr32(hw, GLTSYN_SHTIME_H(tmr_idx), time_hi);
+
+	/* Prepare PHY port adjustments */
+	if (ice_is_e810(hw))
+		status = ice_ptp_prep_phy_adj_e810(hw, adj, true);
+	else
+		status = ice_ptp_prep_phy_adj_e822(hw, adj, true);
+	if (status)
+		return status;
+
+	/* Set target time for each PHY port */
+	if (ice_is_e810(hw))
+		status = ice_ptp_prep_phy_adj_target_e810(hw, time_lo);
+	else
+		status = ice_ptp_prep_phy_adj_target_e822(hw, time_lo);
+	if (status)
+		return status;
+
+	return ice_ptp_tmr_cmd(hw, ADJ_TIME_AT_TIME, true);
+}
+
+/**
+ * ice_read_phy_tstamp - Read a PHY timestamp from the timestamo block
+ * @hw: pointer to the HW struct
+ * @block: the block to read from
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the timestamp block. For E822 devices,
+ * the block is the quad to read from. For E810 devices, the block is the
+ * logical port to read from.
+ */
+enum ice_status
+ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
+{
+	if (ice_is_e810(hw))
+		return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
+	else
+		return ice_read_phy_tstamp_e822(hw, block, idx, tstamp);
+}
+
+/**
+ * ice_clear_phy_tstamp - Clear a timestamp from the timestamp block
+ * @hw: pointer to the HW struct
+ * @block: the block to read from
+ * @idx: the timestamp index to reset
+ *
+ * Clear a timestamp, resetting its valid bit, from the timestamp block. For
+ * E822 devices, the block is the quad to clear from. For E810 devices, the
+ * block is the logical port to clear from.
+ */
+enum ice_status
+ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
+{
+	if (ice_is_e810(hw))
+		return ice_clear_phy_tstamp_e810(hw, block, idx);
+	else
+		return ice_clear_phy_tstamp_e822(hw, block, idx);
+}
diff --git a/drivers/net/ice/base/ice_ptp_hw.h b/drivers/net/ice/base/ice_ptp_hw.h
new file mode 100644
index 0000000..8cbe817
--- /dev/null
+++ b/drivers/net/ice/base/ice_ptp_hw.h
@@ -0,0 +1,376 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2021 Intel Corporation
+ */
+
+#ifndef _ICE_PTP_HW_H_
+#define _ICE_PTP_HW_H_
+
+enum ice_ptp_tmr_cmd {
+	INIT_TIME,
+	INIT_INCVAL,
+	ADJ_TIME,
+	ADJ_TIME_AT_TIME,
+	READ_TIME
+};
+
+enum ice_ptp_serdes {
+	ICE_PTP_SERDES_1G,
+	ICE_PTP_SERDES_10G,
+	ICE_PTP_SERDES_25G,
+	ICE_PTP_SERDES_40G,
+	ICE_PTP_SERDES_50G,
+	ICE_PTP_SERDES_100G
+};
+
+enum ice_ptp_link_spd {
+	ICE_PTP_LNK_SPD_1G,
+	ICE_PTP_LNK_SPD_10G,
+	ICE_PTP_LNK_SPD_25G,
+	ICE_PTP_LNK_SPD_25G_RS,
+	ICE_PTP_LNK_SPD_40G,
+	ICE_PTP_LNK_SPD_50G,
+	ICE_PTP_LNK_SPD_50G_RS,
+	ICE_PTP_LNK_SPD_100G_RS,
+	NUM_ICE_PTP_LNK_SPD /* Must be last */
+};
+
+enum ice_ptp_fec_mode {
+	ICE_PTP_FEC_MODE_NONE,
+	ICE_PTP_FEC_MODE_CLAUSE74,
+	ICE_PTP_FEC_MODE_RS_FEC
+};
+
+/**
+ * struct ice_time_ref_info_e822
+ * @pll_freq: Frequency of PLL that drives timer ticks in Hz
+ * @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L
+ * @pps_delay: propagation delay of the PPS output signal
+ *
+ * Characteristic information for the various TIME_REF sources possible in the
+ * E822 devices
+ */
+struct ice_time_ref_info_e822 {
+	u64 pll_freq;
+	u64 nominal_incval;
+	u8 pps_delay;
+};
+
+/* Table of constants related to possible TIME_REF sources */
+extern const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ];
+
+/* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for
+ * the E810 devices. Based off of a PLL with an 812.5 MHz frequency.
+ */
+#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
+
+/* Device agnostic functions */
+u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
+u64 ice_ptp_read_src_incval(struct ice_hw *hw);
+bool ice_ptp_lock(struct ice_hw *hw);
+void ice_ptp_unlock(struct ice_hw *hw);
+void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd);
+enum ice_status ice_ptp_init_time(struct ice_hw *hw, u64 time);
+enum ice_status ice_ptp_write_incval(struct ice_hw *hw, u64 incval);
+enum ice_status ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval);
+enum ice_status ice_ptp_adj_clock(struct ice_hw *hw, s32 adj, bool lock_sbq);
+enum ice_status
+ice_ptp_adj_clock_at_time(struct ice_hw *hw, u64 at_time, s32 adj);
+enum ice_status
+ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp);
+enum ice_status
+ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx);
+
+/* E822 family functions */
+enum ice_status
+ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val);
+enum ice_status
+ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val);
+enum ice_status
+ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val);
+enum ice_status
+ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val);
+enum ice_status
+ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time,
+			   bool lock_sbq);
+enum ice_status
+ice_ptp_read_phy_incval_e822(struct ice_hw *hw, u8 port, u64 *incval);
+enum ice_status
+ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts);
+enum ice_status
+ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd,
+		     bool lock_sbq);
+
+static inline u64 ice_e822_pll_freq(enum ice_time_ref_freq time_ref)
+{
+	return e822_time_ref[time_ref].pll_freq;
+}
+
+static inline u64 ice_e822_nominal_incval(enum ice_time_ref_freq time_ref)
+{
+	return e822_time_ref[time_ref].nominal_incval;
+}
+
+static inline u64 ice_e822_pps_delay(enum ice_time_ref_freq time_ref)
+{
+	return e822_time_ref[time_ref].pps_delay;
+}
+
+/* E822 Vernier calibration functions */
+enum ice_status ice_ptp_set_vernier_wl(struct ice_hw *hw);
+enum ice_status
+ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
+			       enum ice_ptp_link_spd *link_out,
+			       enum ice_ptp_fec_mode *fec_out);
+void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port);
+
+/* E810 family functions */
+enum ice_status ice_ptp_init_phy_e810(struct ice_hw *hw);
+
+#define PFTSYN_SEM_BYTES	4
+
+#define ICE_PTP_CLOCK_INDEX_0	0x00
+#define ICE_PTP_CLOCK_INDEX_1	0x01
+
+/* PHY timer commands */
+#define SEL_CPK_SRC	8
+#define SEL_PHY_SRC	3
+
+/* Time Sync command Definitions */
+#define GLTSYN_CMD_INIT_TIME		BIT(0)
+#define GLTSYN_CMD_INIT_INCVAL		BIT(1)
+#define GLTSYN_CMD_INIT_TIME_INCVAL	(BIT(0) | BIT(1))
+#define GLTSYN_CMD_ADJ_TIME		BIT(2)
+#define GLTSYN_CMD_ADJ_INIT_TIME	(BIT(2) | BIT(3))
+#define GLTSYN_CMD_READ_TIME		BIT(7)
+
+/* PHY port Time Sync command definitions */
+#define PHY_CMD_INIT_TIME		BIT(0)
+#define PHY_CMD_INIT_INCVAL		BIT(1)
+#define PHY_CMD_ADJ_TIME		(BIT(0) | BIT(1))
+#define PHY_CMD_ADJ_TIME_AT_TIME	(BIT(0) | BIT(2))
+#define PHY_CMD_READ_TIME		(BIT(0) | BIT(1) | BIT(2))
+
+#define TS_CMD_MASK_E810		0xFF
+#define TS_CMD_MASK			0xF
+#define SYNC_EXEC_CMD			0x3
+
+/* Macros to derive port low and high addresses on both quads */
+#define P_Q0_L(a, p) ((((a) + (0x2000 * (p)))) & 0xFFFF)
+#define P_Q0_H(a, p) ((((a) + (0x2000 * (p)))) >> 16)
+#define P_Q1_L(a, p) ((((a) - (0x2000 * ((p) - ICE_PORTS_PER_QUAD)))) & 0xFFFF)
+#define P_Q1_H(a, p) ((((a) - (0x2000 * ((p) - ICE_PORTS_PER_QUAD)))) >> 16)
+
+/* PHY QUAD register base addresses */
+#define Q_0_BASE			0x94000
+#define Q_1_BASE			0x114000
+
+/* Timestamp memory reset registers */
+#define Q_REG_TS_CTRL			0x618
+#define Q_REG_TS_CTRL_S			0
+#define Q_REG_TS_CTRL_M			BIT(0)
+
+/* Timestamp availability status registers */
+#define Q_REG_TX_MEMORY_STATUS_L	0xCF0
+#define Q_REG_TX_MEMORY_STATUS_U	0xCF4
+
+/* Tx FIFO status registers */
+#define Q_REG_FIFO23_STATUS		0xCF8
+#define Q_REG_FIFO01_STATUS		0xCFC
+#define Q_REG_FIFO02_S			0
+#define Q_REG_FIFO02_M			MAKEMASK(0x3FF, 0)
+#define Q_REG_FIFO13_S			10
+#define Q_REG_FIFO13_M			MAKEMASK(0x3FF, 10)
+
+/* Interrupt control Config registers */
+#define Q_REG_TX_MEM_GBL_CFG		0xC08
+#define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_S	0
+#define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M	BIT(0)
+#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_S	1
+#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_M	MAKEMASK(0xFF, 1)
+#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_S	9
+#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_M MAKEMASK(0x3F, 9)
+#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_S	15
+#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M	BIT(15)
+
+/* Tx Timestamp data registers */
+#define Q_REG_TX_MEMORY_BANK_START	0xA00
+
+/* PHY port register base addresses */
+#define P_0_BASE			0x80000
+#define P_4_BASE			0x106000
+
+/* Timestamp init registers */
+#define P_REG_RX_TIMER_INC_PRE_L	0x46C
+#define P_REG_RX_TIMER_INC_PRE_U	0x470
+#define P_REG_TX_TIMER_INC_PRE_L	0x44C
+#define P_REG_TX_TIMER_INC_PRE_U	0x450
+
+/* Timestamp match and adjust target registers */
+#define P_REG_RX_TIMER_CNT_ADJ_L	0x474
+#define P_REG_RX_TIMER_CNT_ADJ_U	0x478
+#define P_REG_TX_TIMER_CNT_ADJ_L	0x454
+#define P_REG_TX_TIMER_CNT_ADJ_U	0x458
+
+/* Timestamp capture registers */
+#define P_REG_RX_CAPTURE_L		0x4D8
+#define P_REG_RX_CAPTURE_U		0x4DC
+#define P_REG_TX_CAPTURE_L		0x4B4
+#define P_REG_TX_CAPTURE_U		0x4B8
+
+/* Timestamp PHY incval registers */
+#define P_REG_TIMETUS_L			0x410
+#define P_REG_TIMETUS_U			0x414
+
+#define P_REG_40B_LOW_M			0xFF
+#define P_REG_40B_HIGH_S		8
+
+/* PHY window length registers */
+#define P_REG_WL			0x40C
+
+#define PTP_VERNIER_WL			0x111ed
+
+/* PHY start registers */
+#define P_REG_PS			0x408
+#define P_REG_PS_START_S		0
+#define P_REG_PS_START_M		BIT(0)
+#define P_REG_PS_BYPASS_MODE_S		1
+#define P_REG_PS_BYPASS_MODE_M		BIT(1)
+#define P_REG_PS_ENA_CLK_S		2
+#define P_REG_PS_ENA_CLK_M		BIT(2)
+#define P_REG_PS_LOAD_OFFSET_S		3
+#define P_REG_PS_LOAD_OFFSET_M		BIT(3)
+#define P_REG_PS_SFT_RESET_S		11
+#define P_REG_PS_SFT_RESET_M		BIT(11)
+
+/* PHY offset valid registers */
+#define P_REG_TX_OV_STATUS		0x4D4
+#define P_REG_TX_OV_STATUS_OV_S		0
+#define P_REG_TX_OV_STATUS_OV_M		BIT(0)
+#define P_REG_RX_OV_STATUS		0x4F8
+#define P_REG_RX_OV_STATUS_OV_S		0
+#define P_REG_RX_OV_STATUS_OV_M		BIT(0)
+
+/* PHY offset ready registers */
+#define P_REG_TX_OR			0x45C
+#define P_REG_RX_OR			0x47C
+
+/* PHY total offset registers */
+#define P_REG_TOTAL_RX_OFFSET_L		0x460
+#define P_REG_TOTAL_RX_OFFSET_U		0x464
+#define P_REG_TOTAL_TX_OFFSET_L		0x440
+#define P_REG_TOTAL_TX_OFFSET_U		0x444
+
+/* Timestamp PAR/PCS registers */
+#define P_REG_UIX66_10G_40G_L		0x480
+#define P_REG_UIX66_10G_40G_U		0x484
+#define P_REG_UIX66_25G_100G_L		0x488
+#define P_REG_UIX66_25G_100G_U		0x48C
+#define P_REG_DESK_PAR_RX_TUS_L		0x490
+#define P_REG_DESK_PAR_RX_TUS_U		0x494
+#define P_REG_DESK_PAR_TX_TUS_L		0x498
+#define P_REG_DESK_PAR_TX_TUS_U		0x49C
+#define P_REG_DESK_PCS_RX_TUS_L		0x4A0
+#define P_REG_DESK_PCS_RX_TUS_U		0x4A4
+#define P_REG_DESK_PCS_TX_TUS_L		0x4A8
+#define P_REG_DESK_PCS_TX_TUS_U		0x4AC
+#define P_REG_PAR_RX_TUS_L		0x420
+#define P_REG_PAR_RX_TUS_U		0x424
+#define P_REG_PAR_TX_TUS_L		0x428
+#define P_REG_PAR_TX_TUS_U		0x42C
+#define P_REG_PCS_RX_TUS_L		0x430
+#define P_REG_PCS_RX_TUS_U		0x434
+#define P_REG_PCS_TX_TUS_L		0x438
+#define P_REG_PCS_TX_TUS_U		0x43C
+#define P_REG_PAR_RX_TIME_L		0x4F0
+#define P_REG_PAR_RX_TIME_U		0x4F4
+#define P_REG_PAR_TX_TIME_L		0x4CC
+#define P_REG_PAR_TX_TIME_U		0x4D0
+#define P_REG_PAR_PCS_RX_OFFSET_L	0x4E8
+#define P_REG_PAR_PCS_RX_OFFSET_U	0x4EC
+#define P_REG_PAR_PCS_TX_OFFSET_L	0x4C4
+#define P_REG_PAR_PCS_TX_OFFSET_U	0x4C8
+#define P_REG_LINK_SPEED		0x4FC
+#define P_REG_LINK_SPEED_SERDES_S	0
+#define P_REG_LINK_SPEED_SERDES_M	MAKEMASK(0x7, 0)
+#define P_REG_LINK_SPEED_FEC_MODE_S	3
+#define P_REG_LINK_SPEED_FEC_MODE_M	MAKEMASK(0x3, 3)
+#define P_REG_LINK_SPEED_FEC_MODE(reg)			\
+	(((reg) & P_REG_LINK_SPEED_FEC_MODE_M) >>	\
+	 P_REG_LINK_SPEED_FEC_MODE_S)
+
+/* PHY timestamp related registers */
+#define P_REG_PMD_ALIGNMENT		0x0FC
+#define P_REG_RX_80_TO_160_CNT		0x6FC
+#define P_REG_RX_80_TO_160_CNT_RXCYC_S	0
+#define P_REG_RX_80_TO_160_CNT_RXCYC_M	BIT(0)
+#define P_REG_RX_40_TO_160_CNT		0x8FC
+#define P_REG_RX_40_TO_160_CNT_RXCYC_S	0
+#define P_REG_RX_40_TO_160_CNT_RXCYC_M	MAKEMASK(0x3, 0)
+
+/* Rx FIFO status registers */
+#define P_REG_RX_OV_FS			0x4F8
+#define P_REG_RX_OV_FS_FIFO_STATUS_S	2
+#define P_REG_RX_OV_FS_FIFO_STATUS_M	MAKEMASK(0x3FF, 2)
+
+/* Timestamp command registers */
+#define P_REG_TX_TMR_CMD		0x448
+#define P_REG_RX_TMR_CMD		0x468
+
+/* E810 timesync enable register */
+#define ETH_GLTSYN_ENA(_i)		(0x03000348 + ((_i) * 4))
+
+/* E810 shadow init time registers */
+#define ETH_GLTSYN_SHTIME_0(i)		(0x03000368 + ((i) * 32))
+#define ETH_GLTSYN_SHTIME_L(i)		(0x0300036C + ((i) * 32))
+
+/* E810 shadow time adjust registers */
+#define ETH_GLTSYN_SHADJ_L(_i)		(0x03000378 + ((_i) * 32))
+#define ETH_GLTSYN_SHADJ_H(_i)		(0x0300037C + ((_i) * 32))
+
+/* E810 timer command register */
+#define ETH_GLTSYN_CMD			0x03000344
+
+/* Source timer incval macros */
+#define INCVAL_HIGH_M			0xFF
+
+/* Timestamp block macros */
+#define TS_LOW_M			0xFFFFFFFF
+#define TS_HIGH_M			0xFF
+#define TS_HIGH_S			32
+
+#define TS_PHY_LOW_M			0xFF
+#define TS_PHY_HIGH_M			0xFFFFFFFF
+#define TS_PHY_HIGH_S			8
+
+#define BYTES_PER_IDX_ADDR_L_U		8
+#define BYTES_PER_IDX_ADDR_L		4
+
+/* Internal PHY timestamp address */
+#define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U))
+#define TS_H(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U +		\
+			     BYTES_PER_IDX_ADDR_L))
+
+/* External PHY timestamp address */
+#define TS_EXT(a, port, idx) ((a) + (0x1000 * (port)) +			\
+				 ((idx) * BYTES_PER_IDX_ADDR_L_U))
+
+#define LOW_TX_MEMORY_BANK_START	0x03090000
+#define HIGH_TX_MEMORY_BANK_START	0x03090004
+
+/* E810T PCA9575 IO controller registers */
+#define ICE_PCA9575_P0_IN	0x0
+#define ICE_PCA9575_P1_IN	0x1
+#define ICE_PCA9575_P0_CFG	0x8
+#define ICE_PCA9575_P1_CFG	0x9
+#define ICE_PCA9575_P0_OUT	0xA
+#define ICE_PCA9575_P1_OUT	0xB
+
+/* E810T PCA9575 IO controller pin control */
+#define ICE_E810T_P0_GNSS_PRSNT_N	BIT(4)
+#define ICE_E810T_P1_SMA1_DIR_EN	BIT(4)
+#define ICE_E810T_P1_SMA1_TX_EN		BIT(5)
+#define ICE_E810T_P1_SMA2_UFL2_RX_DIS	BIT(3)
+#define ICE_E810T_P1_SMA2_DIR_EN	BIT(6)
+#define ICE_E810T_P1_SMA2_TX_EN		BIT(7)
+
+#endif /* _ICE_PTP_HW_H_ */
diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h
index 2d21c21..2bc4b61 100644
--- a/drivers/net/ice/base/ice_type.h
+++ b/drivers/net/ice/base/ice_type.h
@@ -55,6 +55,7 @@
 #include "ice_lan_tx_rx.h"
 #include "ice_flex_type.h"
 #include "ice_protocol_type.h"
+#include "ice_sbq_cmd.h"
 #include "ice_vlan_mode.h"
 
 /**
@@ -131,6 +132,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
 #define ICE_DBG_PKG		BIT_ULL(16)
 #define ICE_DBG_RES		BIT_ULL(17)
 #define ICE_DBG_ACL		BIT_ULL(18)
+#define ICE_DBG_PTP		BIT_ULL(19)
 #define ICE_DBG_AQ_MSG		BIT_ULL(24)
 #define ICE_DBG_AQ_DESC		BIT_ULL(25)
 #define ICE_DBG_AQ_DESC_BUF	BIT_ULL(26)
@@ -1012,6 +1014,7 @@ struct ice_hw {
 
 	/* Control Queue info */
 	struct ice_ctl_q_info adminq;
+	struct ice_ctl_q_info sbq;
 	struct ice_ctl_q_info mailboxq;
 	/* Additional function to send AdminQ command */
 	int (*aq_send_cmd_fn)(void *param, struct ice_aq_desc *desc,
diff --git a/drivers/net/ice/base/meson.build b/drivers/net/ice/base/meson.build
index 3305e5d..a5db1a5 100644
--- a/drivers/net/ice/base/meson.build
+++ b/drivers/net/ice/base/meson.build
@@ -14,6 +14,7 @@ sources = [
         'ice_acl.c',
         'ice_acl_ctrl.c',
         'ice_vlan_mode.c',
+        'ice_ptp_hw.c',
 ]
 
 error_cflags = [
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [dpdk-dev] [PATCH 3/4] net/ice/base: add clock initialization function
  2021-08-06  1:34 [dpdk-dev] [PATCH 0/4] net/ice: support IEEE 1588 Simei Su
  2021-08-06  1:34 ` [dpdk-dev] [PATCH 1/4] net/ice/base: add 1588 capability probe Simei Su
  2021-08-06  1:34 ` [dpdk-dev] [PATCH 2/4] net/ice/base: add low level functions for device clock control Simei Su
@ 2021-08-06  1:34 ` Simei Su
  2021-08-06  1:34 ` [dpdk-dev] [PATCH 4/4] net/ice: support IEEE 1588 PTP Simei Su
  2021-09-02  1:37 ` [dpdk-dev] [PATCH v2] net/ice: support IEEE 1588 PTP for E810 Simei Su
  4 siblings, 0 replies; 13+ messages in thread
From: Simei Su @ 2021-08-06  1:34 UTC (permalink / raw)
  To: qi.z.zhang; +Cc: dev, haiyue.wang, Jacob Keller

From: Qi Zhang <qi.z.zhang@intel.com>

Before the device PTP hardware clock can be initialized, some steps must
be taken by the driver. This includes writing some registers and
initializing the PHY.

Some of these steps are distinct depending on the device type (E810 or
E822). Additionally, a future change will introduce more steps for E822
devices to program the Clock Generation Unit.

Introduce ice_ptp_init_phc as well as device-specific sub-functions for
e810 and e822 devices.

Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>

tmp

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/ice/base/ice_cgu_regs.h   | 117 ++++++++++++
 drivers/net/ice/base/ice_ptp_consts.h |  74 ++++++++
 drivers/net/ice/base/ice_ptp_hw.c     | 348 +++++++++++++++++++++++++++++++++-
 drivers/net/ice/base/ice_ptp_hw.h     |  24 +++
 4 files changed, 562 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/base/ice_cgu_regs.h

diff --git a/drivers/net/ice/base/ice_cgu_regs.h b/drivers/net/ice/base/ice_cgu_regs.h
new file mode 100644
index 0000000..6751481
--- /dev/null
+++ b/drivers/net/ice/base/ice_cgu_regs.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2021 Intel Corporation
+ */
+
+#ifndef _ICE_CGU_REGS_H_
+#define _ICE_CGU_REGS_H_
+
+#define NAC_CGU_DWORD9 0x24
+union nac_cgu_dword9 {
+	struct {
+		u32 time_ref_freq_sel : 3;
+		u32 clk_eref1_en : 1;
+		u32 clk_eref0_en : 1;
+		u32 time_ref_en : 1;
+		u32 time_sync_en : 1;
+		u32 one_pps_out_en : 1;
+		u32 clk_ref_synce_en : 1;
+		u32 clk_synce1_en : 1;
+		u32 clk_synce0_en : 1;
+		u32 net_clk_ref1_en : 1;
+		u32 net_clk_ref0_en : 1;
+		u32 clk_synce1_amp : 2;
+		u32 misc6 : 1;
+		u32 clk_synce0_amp : 2;
+		u32 one_pps_out_amp : 2;
+		u32 misc24 : 12;
+	} field;
+	u32 val;
+};
+
+#define NAC_CGU_DWORD19 0x4c
+union nac_cgu_dword19 {
+	struct {
+		u32 tspll_fbdiv_intgr : 8;
+		u32 fdpll_ulck_thr : 5;
+		u32 misc15 : 3;
+		u32 tspll_ndivratio : 4;
+		u32 tspll_iref_ndivratio : 3;
+		u32 misc19 : 1;
+		u32 japll_ndivratio : 4;
+		u32 japll_iref_ndivratio : 3;
+		u32 misc27 : 1;
+	} field;
+	u32 val;
+};
+
+#define NAC_CGU_DWORD22 0x58
+union nac_cgu_dword22 {
+	struct {
+		u32 fdpll_frac_div_out_nc : 2;
+		u32 fdpll_lock_int_for : 1;
+		u32 synce_hdov_int_for : 1;
+		u32 synce_lock_int_for : 1;
+		u32 fdpll_phlead_slip_nc : 1;
+		u32 fdpll_acc1_ovfl_nc : 1;
+		u32 fdpll_acc2_ovfl_nc : 1;
+		u32 synce_status_nc : 6;
+		u32 fdpll_acc1f_ovfl : 1;
+		u32 misc18 : 1;
+		u32 fdpllclk_div : 4;
+		u32 time1588clk_div : 4;
+		u32 synceclk_div : 4;
+		u32 synceclk_sel_div2 : 1;
+		u32 fdpllclk_sel_div2 : 1;
+		u32 time1588clk_sel_div2 : 1;
+		u32 misc3 : 1;
+	} field;
+	u32 val;
+};
+
+#define NAC_CGU_DWORD24 0x60
+union nac_cgu_dword24 {
+	struct {
+		u32 tspll_fbdiv_frac : 22;
+		u32 misc20 : 2;
+		u32 ts_pll_enable : 1;
+		u32 time_sync_tspll_align_sel : 1;
+		u32 ext_synce_sel : 1;
+		u32 ref1588_ck_div : 4;
+		u32 time_ref_sel : 1;
+	} field;
+	u32 val;
+};
+
+#define TSPLL_CNTR_BIST_SETTINGS 0x344
+union tspll_cntr_bist_settings {
+	struct {
+		u32 i_irefgen_settling_time_cntr_7_0 : 8;
+		u32 i_irefgen_settling_time_ro_standby_1_0 : 2;
+		u32 reserved195 : 5;
+		u32 i_plllock_sel_0 : 1;
+		u32 i_plllock_sel_1 : 1;
+		u32 i_plllock_cnt_6_0 : 7;
+		u32 i_plllock_cnt_10_7 : 4;
+		u32 reserved200 : 4;
+	} field;
+	u32 val;
+};
+
+#define TSPLL_RO_BWM_LF 0x370
+union tspll_ro_bwm_lf {
+	struct {
+		u32 bw_freqov_high_cri_7_0 : 8;
+		u32 bw_freqov_high_cri_9_8 : 2;
+		u32 biascaldone_cri : 1;
+		u32 plllock_gain_tran_cri : 1;
+		u32 plllock_true_lock_cri : 1;
+		u32 pllunlock_flag_cri : 1;
+		u32 afcerr_cri : 1;
+		u32 afcdone_cri : 1;
+		u32 feedfwrdgain_cal_cri_7_0 : 8;
+		u32 m2fbdivmod_cri_7_0 : 8;
+	} field;
+	u32 val;
+};
+
+#endif /* _ICE_CGU_REGS_H_ */
diff --git a/drivers/net/ice/base/ice_ptp_consts.h b/drivers/net/ice/base/ice_ptp_consts.h
index 2bd338c..4583dd4 100644
--- a/drivers/net/ice/base/ice_ptp_consts.h
+++ b/drivers/net/ice/base/ice_ptp_consts.h
@@ -83,4 +83,78 @@ const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = {
 	},
 };
 
+const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
+	/* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
+	{
+		/* refclk_pre_div */
+		1,
+		/* feedback_div */
+		197,
+		/* frac_n_div */
+		2621440,
+		/* post_pll_div */
+		6,
+	},
+
+	/* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
+	{
+		/* refclk_pre_div */
+		5,
+		/* feedback_div */
+		223,
+		/* frac_n_div */
+		524288,
+		/* post_pll_div */
+		7,
+	},
+
+	/* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
+	{
+		/* refclk_pre_div */
+		5,
+		/* feedback_div */
+		223,
+		/* frac_n_div */
+		524288,
+		/* post_pll_div */
+		7,
+	},
+
+	/* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
+	{
+		/* refclk_pre_div */
+		5,
+		/* feedback_div */
+		159,
+		/* frac_n_div */
+		1572864,
+		/* post_pll_div */
+		6,
+	},
+
+	/* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
+	{
+		/* refclk_pre_div */
+		5,
+		/* feedback_div */
+		159,
+		/* frac_n_div */
+		1572864,
+		/* post_pll_div */
+		6,
+	},
+
+	/* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
+	{
+		/* refclk_pre_div */
+		10,
+		/* feedback_div */
+		223,
+		/* frac_n_div */
+		524288,
+		/* post_pll_div */
+		7,
+	},
+};
+
 #endif /* _ICE_PTP_CONSTS_H_ */
diff --git a/drivers/net/ice/base/ice_ptp_hw.c b/drivers/net/ice/base/ice_ptp_hw.c
index 8aefcf9..cb32a4f 100644
--- a/drivers/net/ice/base/ice_ptp_hw.c
+++ b/drivers/net/ice/base/ice_ptp_hw.c
@@ -6,7 +6,7 @@
 #include "ice_common.h"
 #include "ice_ptp_hw.h"
 #include "ice_ptp_consts.h"
-
+#include "ice_cgu_regs.h"
 
 /* Low level functions for interacting with and managing the device clock used
  * for the Precision Time Protocol.
@@ -700,6 +700,315 @@ ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
 }
 
 /**
+ * ice_read_cgu_reg_e822 - Read a CGU register
+ * @hw: pointer to the HW struct
+ * @addr: Register address to read
+ * @val: storage for register value read
+ *
+ * Read the contents of a register of the Clock Generation Unit. Only
+ * applicable to E822 devices.
+ */
+static enum ice_status
+ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
+{
+	struct ice_sbq_msg_input cgu_msg;
+	enum ice_status status;
+
+	cgu_msg.opcode = ice_sbq_msg_rd;
+	cgu_msg.dest_dev = cgu;
+	cgu_msg.msg_addr_low = addr;
+	cgu_msg.msg_addr_high = 0x0;
+
+	status = ice_sbq_rw_reg_lp(hw, &cgu_msg, true);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, status %d\n",
+			  addr, status);
+		return status;
+	}
+
+	*val = cgu_msg.data;
+
+	return status;
+}
+
+/**
+ * ice_write_cgu_reg_e822 - Write a CGU register
+ * @hw: pointer to the HW struct
+ * @addr: Register address to write
+ * @val: value to write into the register
+ *
+ * Write the specified value to a register of the Clock Generation Unit. Only
+ * applicable to E822 devices.
+ */
+static enum ice_status
+ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val)
+{
+	struct ice_sbq_msg_input cgu_msg;
+	enum ice_status status;
+
+	cgu_msg.opcode = ice_sbq_msg_wr;
+	cgu_msg.dest_dev = cgu;
+	cgu_msg.msg_addr_low = addr;
+	cgu_msg.msg_addr_high = 0x0;
+	cgu_msg.data = val;
+
+	status = ice_sbq_rw_reg_lp(hw, &cgu_msg, true);
+	if (status) {
+		ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, status %d\n",
+			  addr, status);
+		return status;
+	}
+
+	return status;
+}
+
+/**
+ * ice_clk_freq_str - Convert time_ref_freq to string
+ * @clk_freq: Clock frequency
+ *
+ * Convert the specified TIME_REF clock frequency to a string.
+ */
+static const char *ice_clk_freq_str(u8 clk_freq)
+{
+	switch ((enum ice_time_ref_freq)clk_freq) {
+	case ICE_TIME_REF_FREQ_25_000:
+		return "25 MHz";
+	case ICE_TIME_REF_FREQ_122_880:
+		return "122.88 MHz";
+	case ICE_TIME_REF_FREQ_125_000:
+		return "125 MHz";
+	case ICE_TIME_REF_FREQ_153_600:
+		return "153.6 MHz";
+	case ICE_TIME_REF_FREQ_156_250:
+		return "156.25 MHz";
+	case ICE_TIME_REF_FREQ_245_760:
+		return "245.76 MHz";
+	default:
+		return "Unknown";
+	}
+}
+
+/**
+ * ice_clk_src_str - Convert time_ref_src to string
+ * @clk_src: Clock source
+ *
+ * Convert the specified clock source to its string name.
+ */
+static const char *ice_clk_src_str(u8 clk_src)
+{
+	switch ((enum ice_clk_src)clk_src) {
+	case ICE_CLK_SRC_TCX0:
+		return "TCX0";
+	case ICE_CLK_SRC_TIME_REF:
+		return "TIME_REF";
+	default:
+		return "Unknown";
+	}
+}
+
+/**
+ * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit
+ * @hw: pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCX0)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ */
+enum ice_status
+ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
+		     enum ice_clk_src clk_src)
+{
+	union tspll_ro_bwm_lf bwm_lf;
+	union nac_cgu_dword19 dw19;
+	union nac_cgu_dword22 dw22;
+	union nac_cgu_dword24 dw24;
+	union nac_cgu_dword9 dw9;
+	enum ice_status status;
+
+	if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
+		ice_warn(hw, "Invalid TIME_REF frequency %u\n", clk_freq);
+		return ICE_ERR_PARAM;
+	}
+
+	if (clk_src >= NUM_ICE_CLK_SRC) {
+		ice_warn(hw, "Invalid clock source %u\n", clk_src);
+		return ICE_ERR_PARAM;
+	}
+
+	if (clk_src == ICE_CLK_SRC_TCX0 &&
+	    clk_freq != ICE_TIME_REF_FREQ_25_000) {
+		ice_warn(hw, "TCX0 only supports 25 MHz frequency\n");
+		return ICE_ERR_PARAM;
+	}
+
+	status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val);
+	if (status)
+		return status;
+
+	status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
+	if (status)
+		return status;
+
+	status = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+	if (status)
+		return status;
+
+	/* Log the current clock configuration */
+	ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+		  dw24.field.ts_pll_enable ? "enabled" : "disabled",
+		  ice_clk_src_str(dw24.field.time_ref_sel),
+		  ice_clk_freq_str(dw9.field.time_ref_freq_sel),
+		  bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
+
+	/* Disable the PLL before changing the clock source or frequency */
+	if (dw24.field.ts_pll_enable) {
+		dw24.field.ts_pll_enable = 0;
+
+		status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+		if (status)
+			return status;
+	}
+
+	/* Set the frequency */
+	dw9.field.time_ref_freq_sel = clk_freq;
+	status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val);
+	if (status)
+		return status;
+
+	/* Configure the TS PLL feedback divisor */
+	status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val);
+	if (status)
+		return status;
+
+	dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
+	dw19.field.tspll_ndivratio = 1;
+
+	status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val);
+	if (status)
+		return status;
+
+	/* Configure the TS PLL post divisor */
+	status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val);
+	if (status)
+		return status;
+
+	dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
+	dw22.field.time1588clk_sel_div2 = 0;
+
+	status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val);
+	if (status)
+		return status;
+
+	/* Configure the TS PLL pre divisor and clock source */
+	status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
+	if (status)
+		return status;
+
+	dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
+	dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
+	dw24.field.time_ref_sel = clk_src;
+
+	status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+	if (status)
+		return status;
+
+	/* Finally, enable the PLL */
+	dw24.field.ts_pll_enable = 1;
+
+	status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+	if (status)
+		return status;
+
+	/* Wait to verify if the PLL locks */
+	ice_msec_delay(1, true);
+
+	status = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+	if (status)
+		return status;
+
+	if (!bwm_lf.field.plllock_true_lock_cri) {
+		ice_warn(hw, "CGU PLL failed to lock\n");
+		return ICE_ERR_NOT_READY;
+	}
+
+	/* Log the current clock configuration */
+	ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+		  dw24.field.ts_pll_enable ? "enabled" : "disabled",
+		  ice_clk_src_str(dw24.field.time_ref_sel),
+		  ice_clk_freq_str(dw9.field.time_ref_freq_sel),
+		  bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
+
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_init_cgu_e822 - Initialize CGU with settings from firmware
+ * @hw: pointer to the HW structure
+ *
+ * Initialize the Clock Generation Unit of the E822 device.
+ */
+static enum ice_status ice_init_cgu_e822(struct ice_hw *hw)
+{
+	struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
+	union tspll_cntr_bist_settings cntr_bist;
+	enum ice_status status;
+
+	status = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
+				       &cntr_bist.val);
+	if (status)
+		return status;
+
+	/* Disable sticky lock detection so lock status reported is accurate */
+	cntr_bist.field.i_plllock_sel_0 = 0;
+	cntr_bist.field.i_plllock_sel_1 = 0;
+
+	status = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
+					cntr_bist.val);
+	if (status)
+		return status;
+
+	/* Configure the CGU PLL using the parameters from the function
+	 * capabilities.
+	 */
+	status = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref,
+				      (enum ice_clk_src)ts_info->clk_src);
+	if (status)
+		return status;
+
+	return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform PHC initialization steps specific to E822 devices.
+ */
+static enum ice_status ice_ptp_init_phc_e822(struct ice_hw *hw)
+{
+	enum ice_status status;
+	u32 regval;
+
+	/* Enable reading switch and PHY registers over the sideband queue */
+#define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1)
+#define PF_SB_REM_DEV_CTL_PHY0 BIT(2)
+	regval = rd32(hw, PF_SB_REM_DEV_CTL);
+	regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ |
+		   PF_SB_REM_DEV_CTL_PHY0);
+	wr32(hw, PF_SB_REM_DEV_CTL, regval);
+
+	/* Initialize the Clock Generation Unit */
+	status = ice_init_cgu_e822(hw);
+	if (status)
+		return status;
+
+	/* Set window length for all the ports */
+	return ice_ptp_set_vernier_wl(hw);
+}
+
+/**
  * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time
  * @hw: pointer to the HW struct
  * @time: Time to initialize the PHY port clocks to
@@ -1446,6 +1755,21 @@ enum ice_status ice_ptp_init_phy_e810(struct ice_hw *hw)
 }
 
 /**
+ * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform E810-specific PTP hardware clock initialization steps.
+ */
+static enum ice_status ice_ptp_init_phc_e810(struct ice_hw *hw)
+{
+	/* Ensure synchronization delay is zero */
+	wr32(hw, GLTSYN_SYNC_DLAY, 0);
+
+	/* Initialize the PHY */
+	return ice_ptp_init_phy_e810(hw);
+}
+
+/**
  * ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time
  * @hw: Board private structure
  * @time: Time to initialize the PHY port clock to
@@ -2021,3 +2345,25 @@ ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
 	else
 		return ice_clear_phy_tstamp_e822(hw, block, idx);
 }
+
+/**
+ * ice_ptp_init_phc - Initialize PTP hardware clock
+ * @hw: pointer to the HW struct
+ *
+ * Perform the steps required to initialize the PTP hardware clock.
+ */
+enum ice_status ice_ptp_init_phc(struct ice_hw *hw)
+{
+	u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+	/* Enable source clocks */
+	wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
+
+	/* Clear event status indications for auxiliary pins */
+	(void)rd32(hw, GLTSYN_STAT(src_idx));
+
+	if (ice_is_e810(hw))
+		return ice_ptp_init_phc_e810(hw);
+	else
+		return ice_ptp_init_phc_e822(hw);
+}
diff --git a/drivers/net/ice/base/ice_ptp_hw.h b/drivers/net/ice/base/ice_ptp_hw.h
index 8cbe817..eb0e410 100644
--- a/drivers/net/ice/base/ice_ptp_hw.h
+++ b/drivers/net/ice/base/ice_ptp_hw.h
@@ -55,6 +55,26 @@ struct ice_time_ref_info_e822 {
 	u8 pps_delay;
 };
 
+/**
+ * struct ice_cgu_pll_params_e822
+ * @refclk_pre_div: Reference clock pre-divisor
+ * @feedback_div: Feedback divisor
+ * @frac_n_div: Fractional divisor
+ * @post_pll_div: Post PLL divisor
+ *
+ * Clock Generation Unit parameters used to program the PLL based on the
+ * selected TIME_REF frequency.
+ */
+struct ice_cgu_pll_params_e822 {
+	u32 refclk_pre_div;
+	u32 feedback_div;
+	u32 frac_n_div;
+	u32 post_pll_div;
+};
+
+extern const struct
+ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
+
 /* Table of constants related to possible TIME_REF sources */
 extern const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ];
 
@@ -79,6 +99,7 @@ enum ice_status
 ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp);
 enum ice_status
 ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx);
+enum ice_status ice_ptp_init_phc(struct ice_hw *hw);
 
 /* E822 family functions */
 enum ice_status
@@ -99,6 +120,9 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts);
 enum ice_status
 ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd,
 		     bool lock_sbq);
+enum ice_status
+ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
+		     enum ice_clk_src clk_src);
 
 static inline u64 ice_e822_pll_freq(enum ice_time_ref_freq time_ref)
 {
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [dpdk-dev] [PATCH 4/4] net/ice: support IEEE 1588 PTP
  2021-08-06  1:34 [dpdk-dev] [PATCH 0/4] net/ice: support IEEE 1588 Simei Su
                   ` (2 preceding siblings ...)
  2021-08-06  1:34 ` [dpdk-dev] [PATCH 3/4] net/ice/base: add clock initialization function Simei Su
@ 2021-08-06  1:34 ` Simei Su
  2021-09-02  1:37 ` [dpdk-dev] [PATCH v2] net/ice: support IEEE 1588 PTP for E810 Simei Su
  4 siblings, 0 replies; 13+ messages in thread
From: Simei Su @ 2021-08-06  1:34 UTC (permalink / raw)
  To: qi.z.zhang; +Cc: dev, haiyue.wang, Simei Su

Add ice support for new ethdev APIs to enable and read IEEE1588
PTP timstamps. Currently, only normal path supports 1588 PTP,
vector path doesn't.

The example command for running ptpclinet is as below:
./build/examples/dpdk-ptpclient -c 1 -n 3 --force-max-simd-bitwidth=64 --
-T 0 -p 0x1

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/ice/ice_ethdev.c | 226 ++++++++++++++++++++++++++++++++++++++++++-
 drivers/net/ice/ice_ethdev.h |   5 +
 drivers/net/ice/ice_rxtx.c   |  42 +++++++-
 drivers/net/ice/ice_rxtx.h   |   1 +
 4 files changed, 272 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 5fd5f99..1e76628 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -16,6 +16,7 @@
 #include "base/ice_flow.h"
 #include "base/ice_dcb.h"
 #include "base/ice_common.h"
+#include "base/ice_ptp_hw.h"
 
 #include "rte_pmd_ice.h"
 #include "ice_ethdev.h"
@@ -27,6 +28,8 @@
 #define ICE_PIPELINE_MODE_SUPPORT_ARG  "pipeline-mode-support"
 #define ICE_PROTO_XTR_ARG         "proto_xtr"
 
+#define ICE_CYCLECOUNTER_MASK     0xffffffffffffffffULL
+
 static const char * const ice_valid_args[] = {
 	ICE_SAFE_MODE_SUPPORT_ARG,
 	ICE_PIPELINE_MODE_SUPPORT_ARG,
@@ -137,6 +140,18 @@ static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 			struct rte_eth_udp_tunnel *udp_tunnel);
 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 			struct rte_eth_udp_tunnel *udp_tunnel);
+static int ice_timesync_enable(struct rte_eth_dev *dev);
+static int ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+					  struct timespec *timestamp,
+					  uint32_t flags);
+static int ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+					  struct timespec *timestamp);
+static int ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+static int ice_timesync_read_time(struct rte_eth_dev *dev,
+				  struct timespec *timestamp);
+static int ice_timesync_write_time(struct rte_eth_dev *dev,
+				   const struct timespec *timestamp);
+static int ice_timesync_disable(struct rte_eth_dev *dev);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
@@ -220,6 +235,13 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
 	.tx_done_cleanup              = ice_tx_done_cleanup,
 	.get_monitor_addr             = ice_get_monitor_addr,
+	.timesync_enable              = ice_timesync_enable,
+	.timesync_read_rx_timestamp   = ice_timesync_read_rx_timestamp,
+	.timesync_read_tx_timestamp   = ice_timesync_read_tx_timestamp,
+	.timesync_adjust_time         = ice_timesync_adjust_time,
+	.timesync_read_time           = ice_timesync_read_time,
+	.timesync_write_time          = ice_timesync_write_time,
+	.timesync_disable             = ice_timesync_disable,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -3442,7 +3464,8 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 			DEV_RX_OFFLOAD_QINQ_STRIP |
 			DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
 			DEV_RX_OFFLOAD_VLAN_EXTEND |
-			DEV_RX_OFFLOAD_RSS_HASH;
+			DEV_RX_OFFLOAD_RSS_HASH |
+			DEV_RX_OFFLOAD_TIMESTAMP;
 		dev_info->tx_offload_capa |=
 			DEV_TX_OFFLOAD_QINQ_INSERT |
 			DEV_TX_OFFLOAD_IPV4_CKSUM |
@@ -5254,6 +5277,207 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 }
 
 static int
+ice_timesync_enable(struct rte_eth_dev *dev)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	int ret;
+
+	if (hw->func_caps.ts_func_info.src_tmr_owned) {
+		ret = ice_ptp_init_phc(hw);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to initialize PHC\n");
+			return -1;
+		}
+
+		ret = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
+		if (ret) {
+			PMD_DRV_LOG(ERR,
+				    "Failed to write PHC increment time value\n");
+			return -1;
+		}
+	}
+
+	/* Initialize cycle counters for system time/RX/TX timestamp */
+	memset(&ad->systime_tc, 0, sizeof(struct rte_timecounter));
+	memset(&ad->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+	memset(&ad->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+	ad->systime_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+	ad->systime_tc.cc_shift = 0;
+	ad->systime_tc.nsec_mask = 0;
+
+	ad->rx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+	ad->rx_tstamp_tc.cc_shift = 0;
+	ad->rx_tstamp_tc.nsec_mask = 0;
+
+	ad->tx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+	ad->tx_tstamp_tc.cc_shift = 0;
+	ad->tx_tstamp_tc.nsec_mask = 0;
+
+	return 0;
+}
+
+static uint64_t
+ice_read_time(struct ice_hw *hw)
+{
+	uint32_t hi, lo, lo2;
+	uint64_t time;
+
+	lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+	hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
+	lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+
+	if (lo2 < lo) {
+		lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+		hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
+	}
+
+	time = ((uint64_t)hi << 32) | lo;
+
+	return time;
+}
+
+static uint64_t
+ice_tstamp_convert_32b_64b(uint64_t time, uint64_t timestamp)
+{
+	const uint64_t mask = 0xFFFFFFFF;
+	uint32_t delta;
+	uint64_t ns;
+
+	delta = (timestamp - (uint32_t)(time & mask));
+
+	if (delta > (mask / 2)) {
+		delta = ((uint32_t)(time & mask) - timestamp);
+		ns = time - delta;
+	} else {
+		ns = time + delta;
+	}
+
+	return ns;
+}
+
+static int
+ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+			       struct timespec *timestamp, uint32_t flags)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct ice_rx_queue *rxq;
+	uint32_t ts_high;
+	uint64_t time, ts_ns, ns;
+
+	rxq = dev->data->rx_queues[flags];
+
+	time = ice_read_time(hw);
+
+	ts_high = rxq->time_high;
+	ts_ns = ice_tstamp_convert_32b_64b(time, ts_high);
+	ns = rte_timecounter_update(&ad->rx_tstamp_tc, ts_ns);
+	*timestamp = rte_ns_to_timespec(ns);
+
+	return 0;
+}
+
+static int
+ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+			       struct timespec *timestamp)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	uint8_t lport;
+	uint64_t time, ts_ns, ns, tstamp;
+	const uint64_t mask = 0xFFFFFFFF;
+	int ret;
+
+	lport = hw->port_info->lport;
+
+	ret = ice_read_phy_tstamp(hw, lport, 0, &tstamp);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to read phy timestamp\n");
+		return -1;
+	}
+
+	time = ice_read_time(hw);
+
+	ts_ns = ice_tstamp_convert_32b_64b(time, (tstamp >> 8) & mask);
+	ns = rte_timecounter_update(&ad->tx_tstamp_tc, ts_ns);
+	*timestamp = rte_ns_to_timespec(ns);
+
+	return 0;
+}
+
+static int
+ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+	ad->systime_tc.nsec += delta;
+	ad->rx_tstamp_tc.nsec += delta;
+	ad->tx_tstamp_tc.nsec += delta;
+
+	return 0;
+}
+
+static int
+ice_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	uint64_t ns;
+
+	ns = rte_timespec_to_ns(ts);
+
+	ad->systime_tc.nsec = ns;
+	ad->rx_tstamp_tc.nsec = ns;
+	ad->tx_tstamp_tc.nsec = ns;
+
+	return 0;
+}
+
+static int
+ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	uint64_t time, ns;
+
+	time = ice_read_time(hw);
+	ns = rte_timecounter_update(&ad->systime_tc, time);
+	*ts = rte_ns_to_timespec(ns);
+
+	return 0;
+}
+
+static int
+ice_timesync_disable(struct rte_eth_dev *dev)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint64_t val;
+	uint8_t lport;
+
+	lport = hw->port_info->lport;
+
+	dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+
+	ice_clear_phy_tstamp(hw, lport, 0);
+
+	val = ICE_READ_REG(hw, GLTSYN_ENA(0));
+	val &= ~GLTSYN_ENA_TSYN_ENA_M;
+	ICE_WRITE_REG(hw, GLTSYN_ENA(0), val);
+
+	ICE_WRITE_REG(hw, GLTSYN_INCVAL_L(0), 0);
+	ICE_WRITE_REG(hw, GLTSYN_INCVAL_H(0), 0);
+
+	return 0;
+}
+
+static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
 {
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 2a8a816..f71af40 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -6,6 +6,7 @@
 #define _ICE_ETHDEV_H_
 
 #include <rte_kvargs.h>
+#include <rte_time.h>
 
 #include <ethdev_driver.h>
 
@@ -487,6 +488,10 @@ struct ice_adapter {
 	struct ice_devargs devargs;
 	enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
 	uint16_t fdir_ref_cnt;
+	/* For PTP */
+	struct rte_timecounter systime_tc;
+	struct rte_timecounter rx_tstamp_tc;
+	struct rte_timecounter tx_tstamp_tc;
 };
 
 struct ice_vsi_vlan_pvid_info {
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 49abcb2..606a4e2 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -346,6 +346,11 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
 		QRXFLXP_CNTXT_RXDID_PRIO_M;
 
+	/* Enable timestamp bit in the queue context */
+	if (rxmode->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+		regval |= (0x1 << QRXFLXP_CNTXT_TS_S) &
+			QRXFLXP_CNTXT_TS_M;
+
 	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
 
 	err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
@@ -681,6 +686,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	tx_ctx.tso_ena = 1; /* tso enable */
 	tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
 	tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
+	tx_ctx.tsyn_ena = 1;
 
 	ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
 		    ice_tlan_ctx_info);
@@ -1530,6 +1536,7 @@ static inline int
 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
 {
 	volatile union ice_rx_flex_desc *rxdp;
+	struct rte_eth_dev_data *dev_data = rxq->vsi->adapter->pf.dev_data;
 	struct ice_rx_entry *rxep;
 	struct rte_mbuf *mb;
 	uint16_t stat_err0;
@@ -1581,6 +1588,14 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
 			ice_rxd_to_vlan_tci(mb, &rxdp[j]);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 
+			if (dev_data->dev_conf.rxmode.offloads &
+			    DEV_RX_OFFLOAD_TIMESTAMP) {
+				rxq->time_high = rte_le_to_cpu_32(
+						rxdp[j].wb.flex_ts.ts_high);
+				mb->timesync = rxq->queue_id;
+				pkt_flags |= PKT_RX_IEEE1588_PTP;
+			}
+
 			mb->ol_flags |= pkt_flags;
 		}
 
@@ -1749,6 +1764,7 @@ ice_recv_scattered_pkts(void *rx_queue,
 			uint16_t nb_pkts)
 {
 	struct ice_rx_queue *rxq = rx_queue;
+	struct rte_eth_dev_data *dev_data = rxq->vsi->adapter->pf.dev_data;
 	volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
 	volatile union ice_rx_flex_desc *rxdp;
 	union ice_rx_flex_desc rxd;
@@ -1878,6 +1894,14 @@ ice_recv_scattered_pkts(void *rx_queue,
 		ice_rxd_to_vlan_tci(first_seg, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
+
+		if (dev_data->dev_conf.rxmode.offloads &
+		    DEV_RX_OFFLOAD_TIMESTAMP) {
+			rxq->time_high = rxd.wb.flex_ts.ts_high;
+			first_seg->timesync = rxq->queue_id;
+			pkt_flags |= PKT_RX_IEEE1588_PTP;
+		}
+
 		first_seg->ol_flags |= pkt_flags;
 		/* Prefetch data of first segment, if configured to do so. */
 		rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
@@ -2216,6 +2240,7 @@ ice_recv_pkts(void *rx_queue,
 	      uint16_t nb_pkts)
 {
 	struct ice_rx_queue *rxq = rx_queue;
+	struct rte_eth_dev_data *dev_data = rxq->vsi->adapter->pf.dev_data;
 	volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
 	volatile union ice_rx_flex_desc *rxdp;
 	union ice_rx_flex_desc rxd;
@@ -2284,6 +2309,14 @@ ice_recv_pkts(void *rx_queue,
 		ice_rxd_to_vlan_tci(rxm, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
+
+		if (dev_data->dev_conf.rxmode.offloads &
+		    DEV_RX_OFFLOAD_TIMESTAMP) {
+			rxq->time_high = rxd.wb.flex_ts.ts_high;
+			rxm->timesync = rxq->queue_id;
+			pkt_flags |= PKT_RX_IEEE1588_PTP;
+		}
+
 		rxm->ol_flags |= pkt_flags;
 		/* copy old mbuf to rx_pkts */
 		rx_pkts[nb_rx++] = rxm;
@@ -2495,7 +2528,8 @@ ice_calc_context_desc(uint64_t flags)
 	static uint64_t mask = PKT_TX_TCP_SEG |
 		PKT_TX_QINQ |
 		PKT_TX_OUTER_IP_CKSUM |
-		PKT_TX_TUNNEL_MASK;
+		PKT_TX_TUNNEL_MASK |
+		PKT_TX_IEEE1588_TMST;
 
 	return (flags & mask) ? 1 : 0;
 }
@@ -2663,6 +2697,12 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			if (ol_flags & PKT_TX_TCP_SEG)
 				cd_type_cmd_tso_mss |=
 					ice_set_tso_ctx(tx_pkt, tx_offload);
+			else {
+				if (ol_flags & PKT_TX_IEEE1588_TMST)
+					cd_type_cmd_tso_mss |=
+					   ((uint64_t)ICE_TX_CTX_DESC_TSYN <<
+					   ICE_TXD_CTX_QW1_CMD_S);
+			}
 
 			ctx_txd->tunneling_params =
 				rte_cpu_to_le_32(cd_tunneling_params);
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index b29387c..b544eb2 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -89,6 +89,7 @@ struct ice_rx_queue {
 	ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
 	ice_rx_release_mbufs_t rx_rel_mbufs;
 	uint64_t offloads;
+	uint32_t time_high; /* High value of the timestamp */
 };
 
 struct ice_tx_entry {
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [dpdk-dev] [PATCH v2] net/ice: support IEEE 1588 PTP for E810
  2021-08-06  1:34 [dpdk-dev] [PATCH 0/4] net/ice: support IEEE 1588 Simei Su
                   ` (3 preceding siblings ...)
  2021-08-06  1:34 ` [dpdk-dev] [PATCH 4/4] net/ice: support IEEE 1588 PTP Simei Su
@ 2021-09-02  1:37 ` Simei Su
  2021-09-09  1:30   ` [dpdk-dev] [PATCH v3] " Simei Su
  4 siblings, 1 reply; 13+ messages in thread
From: Simei Su @ 2021-09-02  1:37 UTC (permalink / raw)
  To: qi.z.zhang; +Cc: dev, haiyue.wang, Simei Su

Add ice support for new ethdev APIs to enable/disable and read/write/adjust
IEEE1588 PTP timstamps. Currently, only scalar path supports 1588 PTP,
vector path doesn't.

The example command for running ptpclient is as below:
./dpdk-ptpclient -c 1 -n 3 --force-max-simd-bitwidth=64 -- -T 0 -p 0x1

Signed-off-by: Simei Su <simei.su@intel.com>

---
v2:
* Change patchset to one patch based on share code update.
* Change per device offload to per queue offload.

 drivers/net/ice/ice_ethdev.c | 228 ++++++++++++++++++++++++++++++++++++++++++-
 drivers/net/ice/ice_ethdev.h |   5 +
 drivers/net/ice/ice_rxtx.c   |  35 ++++++-
 drivers/net/ice/ice_rxtx.h   |   1 +
 4 files changed, 266 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 8d62b84..3dc7d40 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -18,6 +18,7 @@
 #include "base/ice_flow.h"
 #include "base/ice_dcb.h"
 #include "base/ice_common.h"
+#include "base/ice_ptp_hw.h"
 
 #include "rte_pmd_ice.h"
 #include "ice_ethdev.h"
@@ -30,6 +31,8 @@
 #define ICE_PROTO_XTR_ARG         "proto_xtr"
 #define ICE_HW_DEBUG_MASK_ARG     "hw_debug_mask"
 
+#define ICE_CYCLECOUNTER_MASK  0xffffffffffffffffULL
+
 static const char * const ice_valid_args[] = {
 	ICE_SAFE_MODE_SUPPORT_ARG,
 	ICE_PIPELINE_MODE_SUPPORT_ARG,
@@ -141,6 +144,18 @@ static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 			struct rte_eth_udp_tunnel *udp_tunnel);
 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 			struct rte_eth_udp_tunnel *udp_tunnel);
+static int ice_timesync_enable(struct rte_eth_dev *dev);
+static int ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+					  struct timespec *timestamp,
+					  uint32_t flags);
+static int ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+					  struct timespec *timestamp);
+static int ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+static int ice_timesync_read_time(struct rte_eth_dev *dev,
+				  struct timespec *timestamp);
+static int ice_timesync_write_time(struct rte_eth_dev *dev,
+				   const struct timespec *timestamp);
+static int ice_timesync_disable(struct rte_eth_dev *dev);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
@@ -224,6 +239,13 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
 	.tx_done_cleanup              = ice_tx_done_cleanup,
 	.get_monitor_addr             = ice_get_monitor_addr,
+	.timesync_enable              = ice_timesync_enable,
+	.timesync_read_rx_timestamp   = ice_timesync_read_rx_timestamp,
+	.timesync_read_tx_timestamp   = ice_timesync_read_tx_timestamp,
+	.timesync_adjust_time         = ice_timesync_adjust_time,
+	.timesync_read_time           = ice_timesync_read_time,
+	.timesync_write_time          = ice_timesync_write_time,
+	.timesync_disable             = ice_timesync_disable,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -3475,7 +3497,8 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 			DEV_RX_OFFLOAD_QINQ_STRIP |
 			DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
 			DEV_RX_OFFLOAD_VLAN_EXTEND |
-			DEV_RX_OFFLOAD_RSS_HASH;
+			DEV_RX_OFFLOAD_RSS_HASH |
+			DEV_RX_OFFLOAD_TIMESTAMP;
 		dev_info->tx_offload_capa |=
 			DEV_TX_OFFLOAD_QINQ_INSERT |
 			DEV_TX_OFFLOAD_IPV4_CKSUM |
@@ -3487,7 +3510,7 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
 	}
 
-	dev_info->rx_queue_offload_capa = 0;
+	dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_TIMESTAMP;
 	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	dev_info->reta_size = pf->hash_lut_size;
@@ -5287,6 +5310,207 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 }
 
 static int
+ice_timesync_enable(struct rte_eth_dev *dev)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	int ret;
+
+	if (hw->func_caps.ts_func_info.src_tmr_owned) {
+		ret = ice_ptp_init_phc(hw);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to initialize PHC\n");
+			return -1;
+		}
+
+		ret = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
+		if (ret) {
+			PMD_DRV_LOG(ERR,
+				"Failed to write PHC increment time value\n");
+			return -1;
+		}
+	}
+
+	/* Initialize cycle counters for system time/RX/TX timestamp */
+	memset(&ad->systime_tc, 0, sizeof(struct rte_timecounter));
+	memset(&ad->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+	memset(&ad->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+	ad->systime_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+	ad->systime_tc.cc_shift = 0;
+	ad->systime_tc.nsec_mask = 0;
+
+	ad->rx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+	ad->rx_tstamp_tc.cc_shift = 0;
+	ad->rx_tstamp_tc.nsec_mask = 0;
+
+	ad->tx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+	ad->tx_tstamp_tc.cc_shift = 0;
+	ad->tx_tstamp_tc.nsec_mask = 0;
+
+	return 0;
+}
+
+static uint64_t
+ice_read_time(struct ice_hw *hw)
+{
+	uint32_t hi, lo, lo2;
+	uint64_t time;
+
+	lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+	hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
+	lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+
+	if (lo2 < lo) {
+		lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+		hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
+	}
+
+	time = ((uint64_t)hi << 32) | lo;
+
+	return time;
+}
+
+static uint64_t
+ice_tstamp_convert_32b_64b(uint64_t time, uint64_t timestamp)
+{
+	const uint64_t mask = 0xFFFFFFFF;
+	uint32_t delta;
+	uint64_t ns;
+
+	delta = (timestamp - (uint32_t)(time & mask));
+
+	if (delta > (mask / 2)) {
+		delta = ((uint32_t)(time & mask) - timestamp);
+		ns = time - delta;
+	} else {
+		ns = time + delta;
+	}
+
+	return ns;
+}
+
+static int
+ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+			       struct timespec *timestamp, uint32_t flags)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct ice_rx_queue *rxq;
+	uint32_t ts_high;
+	uint64_t time, ts_ns, ns;
+
+	rxq = dev->data->rx_queues[flags];
+
+	time = ice_read_time(hw);
+
+	ts_high = rxq->time_high;
+	ts_ns = ice_tstamp_convert_32b_64b(time, ts_high);
+	ns = rte_timecounter_update(&ad->rx_tstamp_tc, ts_ns);
+	*timestamp = rte_ns_to_timespec(ns);
+
+	return 0;
+}
+
+static int
+ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+			       struct timespec *timestamp)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	uint8_t lport;
+	uint64_t time, ts_ns, ns, tstamp;
+	const uint64_t mask = 0xFFFFFFFF;
+	int ret;
+
+	lport = hw->port_info->lport;
+
+	ret = ice_read_phy_tstamp(hw, lport, 0, &tstamp);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to read phy timestamp\n");
+		return -1;
+	}
+
+	time = ice_read_time(hw);
+
+	ts_ns = ice_tstamp_convert_32b_64b(time, (tstamp >> 8) & mask);
+	ns = rte_timecounter_update(&ad->tx_tstamp_tc, ts_ns);
+	*timestamp = rte_ns_to_timespec(ns);
+
+	return 0;
+}
+
+static int
+ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+	ad->systime_tc.nsec += delta;
+	ad->rx_tstamp_tc.nsec += delta;
+	ad->tx_tstamp_tc.nsec += delta;
+
+	return 0;
+}
+
+static int
+ice_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	uint64_t ns;
+
+	ns = rte_timespec_to_ns(ts);
+
+	ad->systime_tc.nsec = ns;
+	ad->rx_tstamp_tc.nsec = ns;
+	ad->tx_tstamp_tc.nsec = ns;
+
+	return 0;
+}
+
+static int
+ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	uint64_t time, ns;
+
+	time = ice_read_time(hw);
+	ns = rte_timecounter_update(&ad->systime_tc, time);
+	*ts = rte_ns_to_timespec(ns);
+
+	return 0;
+}
+
+static int
+ice_timesync_disable(struct rte_eth_dev *dev)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint64_t val;
+	uint8_t lport;
+
+	lport = hw->port_info->lport;
+
+	dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+
+	ice_clear_phy_tstamp(hw, lport, 0);
+
+	val = ICE_READ_REG(hw, GLTSYN_ENA(0));
+	val &= ~GLTSYN_ENA_TSYN_ENA_M;
+	ICE_WRITE_REG(hw, GLTSYN_ENA(0), val);
+
+	ICE_WRITE_REG(hw, GLTSYN_INCVAL_L(0), 0);
+	ICE_WRITE_REG(hw, GLTSYN_INCVAL_H(0), 0);
+
+	return 0;
+}
+
+static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
 {
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index b4bf651..1c7c8ea 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -6,6 +6,7 @@
 #define _ICE_ETHDEV_H_
 
 #include <rte_kvargs.h>
+#include <rte_time.h>
 
 #include <ethdev_driver.h>
 
@@ -486,6 +487,10 @@ struct ice_adapter {
 	struct ice_devargs devargs;
 	enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
 	uint16_t fdir_ref_cnt;
+	/* For PTP */
+	struct rte_timecounter systime_tc;
+	struct rte_timecounter rx_tstamp_tc;
+	struct rte_timecounter tx_tstamp_tc;
 #ifdef RTE_ARCH_X86
 	bool rx_use_avx2;
 	bool rx_use_avx512;
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 5d7ab4f..6b6149f 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -354,6 +354,10 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
 		QRXFLXP_CNTXT_RXDID_PRIO_M;
 
+	/* Enable timestamp bit in the queue context */
+	if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+		regval |= QRXFLXP_CNTXT_TS_M;
+
 	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
 
 	err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
@@ -689,6 +693,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	tx_ctx.tso_ena = 1; /* tso enable */
 	tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
 	tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
+	tx_ctx.tsyn_ena = 1;
 
 	ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
 		    ice_tlan_ctx_info);
@@ -1589,6 +1594,13 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
 			ice_rxd_to_vlan_tci(mb, &rxdp[j]);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 
+			if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+				rxq->time_high =
+				   rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
+				mb->timesync = rxq->queue_id;
+				pkt_flags |= PKT_RX_IEEE1588_PTP;
+			}
+
 			mb->ol_flags |= pkt_flags;
 		}
 
@@ -1882,6 +1894,13 @@ ice_recv_scattered_pkts(void *rx_queue,
 		ice_rxd_to_vlan_tci(first_seg, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
+
+		if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+			rxq->time_high = rxd.wb.flex_ts.ts_high;
+			first_seg->timesync = rxq->queue_id;
+			pkt_flags |= PKT_RX_IEEE1588_PTP;
+		}
+
 		first_seg->ol_flags |= pkt_flags;
 		/* Prefetch data of first segment, if configured to do so. */
 		rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
@@ -2288,6 +2307,13 @@ ice_recv_pkts(void *rx_queue,
 		ice_rxd_to_vlan_tci(rxm, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
+
+		if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+			rxq->time_high = rxd.wb.flex_ts.ts_high;
+			rxm->timesync = rxq->queue_id;
+			pkt_flags |= PKT_RX_IEEE1588_PTP;
+		}
+
 		rxm->ol_flags |= pkt_flags;
 		/* copy old mbuf to rx_pkts */
 		rx_pkts[nb_rx++] = rxm;
@@ -2499,7 +2525,8 @@ ice_calc_context_desc(uint64_t flags)
 	static uint64_t mask = PKT_TX_TCP_SEG |
 		PKT_TX_QINQ |
 		PKT_TX_OUTER_IP_CKSUM |
-		PKT_TX_TUNNEL_MASK;
+		PKT_TX_TUNNEL_MASK |
+		PKT_TX_IEEE1588_TMST;
 
 	return (flags & mask) ? 1 : 0;
 }
@@ -2667,6 +2694,12 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			if (ol_flags & PKT_TX_TCP_SEG)
 				cd_type_cmd_tso_mss |=
 					ice_set_tso_ctx(tx_pkt, tx_offload);
+			else {
+				if (ol_flags & PKT_TX_IEEE1588_TMST)
+					cd_type_cmd_tso_mss |=
+					   ((uint64_t)ICE_TX_CTX_DESC_TSYN <<
+					    ICE_TXD_CTX_QW1_CMD_S);
+			}
 
 			ctx_txd->tunneling_params =
 				rte_cpu_to_le_32(cd_tunneling_params);
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index b10db08..ae0b436 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -89,6 +89,7 @@ struct ice_rx_queue {
 	ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
 	ice_rx_release_mbufs_t rx_rel_mbufs;
 	uint64_t offloads;
+	uint32_t time_high; /* High value of the timestamp */
 };
 
 struct ice_tx_entry {
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [dpdk-dev] [PATCH v3] net/ice: support IEEE 1588 PTP for E810
  2021-09-02  1:37 ` [dpdk-dev] [PATCH v2] net/ice: support IEEE 1588 PTP for E810 Simei Su
@ 2021-09-09  1:30   ` Simei Su
  2021-09-22  8:46     ` [dpdk-dev] [PATCH v4] " Simei Su
  0 siblings, 1 reply; 13+ messages in thread
From: Simei Su @ 2021-09-09  1:30 UTC (permalink / raw)
  To: qi.z.zhang; +Cc: dev, haiyue.wang, Simei Su

Add ice support for new ethdev APIs to enable/disable and read/write/adjust
IEEE1588 PTP timstamps. Currently, only scalar path supports 1588 PTP,
vector path doesn't.

The example command for running ptpclient is as below:
./build/examples/dpdk-ptpclient -c 1 -n 3 -- -T 0 -p 0x1

Signed-off-by: Simei Su <simei.su@intel.com>
---
v3:
* Rework code to support scalar path only.
* Update the doc/guides/nics/features/ice.ini to add "Timesync" feature.
* Add release notes.

v2:
* Change patchset to one patch based on share code update.
* Change per device offload to per queue offload.

 doc/guides/nics/features/ice.ini       |   1 +
 doc/guides/rel_notes/release_21_11.rst |   3 +
 drivers/net/ice/ice_ethdev.c           | 228 ++++++++++++++++++++++++++++++++-
 drivers/net/ice/ice_ethdev.h           |   5 +
 drivers/net/ice/ice_rxtx.c             |  35 ++++-
 drivers/net/ice/ice_rxtx.h             |   1 +
 drivers/net/ice/ice_rxtx_vec_common.h  |   3 +
 7 files changed, 273 insertions(+), 3 deletions(-)

diff --git a/doc/guides/nics/features/ice.ini b/doc/guides/nics/features/ice.ini
index e066787..a7978d2 100644
--- a/doc/guides/nics/features/ice.ini
+++ b/doc/guides/nics/features/ice.ini
@@ -43,6 +43,7 @@ Linux                = Y
 Windows              = Y
 x86-32               = Y
 x86-64               = Y
+Timesync             = Y
 
 [rte_flow items]
 ah                   = Y
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index d707a55..abfc8ce 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -55,6 +55,9 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated Intel ice driver.**
+
+  * Added timesync API support under scalar path for E810.
 
 Removed Items
 -------------
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 8d62b84..3dc7d40 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -18,6 +18,7 @@
 #include "base/ice_flow.h"
 #include "base/ice_dcb.h"
 #include "base/ice_common.h"
+#include "base/ice_ptp_hw.h"
 
 #include "rte_pmd_ice.h"
 #include "ice_ethdev.h"
@@ -30,6 +31,8 @@
 #define ICE_PROTO_XTR_ARG         "proto_xtr"
 #define ICE_HW_DEBUG_MASK_ARG     "hw_debug_mask"
 
+#define ICE_CYCLECOUNTER_MASK  0xffffffffffffffffULL
+
 static const char * const ice_valid_args[] = {
 	ICE_SAFE_MODE_SUPPORT_ARG,
 	ICE_PIPELINE_MODE_SUPPORT_ARG,
@@ -141,6 +144,18 @@ static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 			struct rte_eth_udp_tunnel *udp_tunnel);
 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 			struct rte_eth_udp_tunnel *udp_tunnel);
+static int ice_timesync_enable(struct rte_eth_dev *dev);
+static int ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+					  struct timespec *timestamp,
+					  uint32_t flags);
+static int ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+					  struct timespec *timestamp);
+static int ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+static int ice_timesync_read_time(struct rte_eth_dev *dev,
+				  struct timespec *timestamp);
+static int ice_timesync_write_time(struct rte_eth_dev *dev,
+				   const struct timespec *timestamp);
+static int ice_timesync_disable(struct rte_eth_dev *dev);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
@@ -224,6 +239,13 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
 	.tx_done_cleanup              = ice_tx_done_cleanup,
 	.get_monitor_addr             = ice_get_monitor_addr,
+	.timesync_enable              = ice_timesync_enable,
+	.timesync_read_rx_timestamp   = ice_timesync_read_rx_timestamp,
+	.timesync_read_tx_timestamp   = ice_timesync_read_tx_timestamp,
+	.timesync_adjust_time         = ice_timesync_adjust_time,
+	.timesync_read_time           = ice_timesync_read_time,
+	.timesync_write_time          = ice_timesync_write_time,
+	.timesync_disable             = ice_timesync_disable,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -3475,7 +3497,8 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 			DEV_RX_OFFLOAD_QINQ_STRIP |
 			DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
 			DEV_RX_OFFLOAD_VLAN_EXTEND |
-			DEV_RX_OFFLOAD_RSS_HASH;
+			DEV_RX_OFFLOAD_RSS_HASH |
+			DEV_RX_OFFLOAD_TIMESTAMP;
 		dev_info->tx_offload_capa |=
 			DEV_TX_OFFLOAD_QINQ_INSERT |
 			DEV_TX_OFFLOAD_IPV4_CKSUM |
@@ -3487,7 +3510,7 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
 	}
 
-	dev_info->rx_queue_offload_capa = 0;
+	dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_TIMESTAMP;
 	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
 
 	dev_info->reta_size = pf->hash_lut_size;
@@ -5287,6 +5310,207 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 }
 
 static int
+ice_timesync_enable(struct rte_eth_dev *dev)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	int ret;
+
+	if (hw->func_caps.ts_func_info.src_tmr_owned) {
+		ret = ice_ptp_init_phc(hw);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to initialize PHC\n");
+			return -1;
+		}
+
+		ret = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
+		if (ret) {
+			PMD_DRV_LOG(ERR,
+				"Failed to write PHC increment time value\n");
+			return -1;
+		}
+	}
+
+	/* Initialize cycle counters for system time/RX/TX timestamp */
+	memset(&ad->systime_tc, 0, sizeof(struct rte_timecounter));
+	memset(&ad->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+	memset(&ad->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+	ad->systime_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+	ad->systime_tc.cc_shift = 0;
+	ad->systime_tc.nsec_mask = 0;
+
+	ad->rx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+	ad->rx_tstamp_tc.cc_shift = 0;
+	ad->rx_tstamp_tc.nsec_mask = 0;
+
+	ad->tx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+	ad->tx_tstamp_tc.cc_shift = 0;
+	ad->tx_tstamp_tc.nsec_mask = 0;
+
+	return 0;
+}
+
+static uint64_t
+ice_read_time(struct ice_hw *hw)
+{
+	uint32_t hi, lo, lo2;
+	uint64_t time;
+
+	lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+	hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
+	lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+
+	if (lo2 < lo) {
+		lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+		hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
+	}
+
+	time = ((uint64_t)hi << 32) | lo;
+
+	return time;
+}
+
+static uint64_t
+ice_tstamp_convert_32b_64b(uint64_t time, uint64_t timestamp)
+{
+	const uint64_t mask = 0xFFFFFFFF;
+	uint32_t delta;
+	uint64_t ns;
+
+	delta = (timestamp - (uint32_t)(time & mask));
+
+	if (delta > (mask / 2)) {
+		delta = ((uint32_t)(time & mask) - timestamp);
+		ns = time - delta;
+	} else {
+		ns = time + delta;
+	}
+
+	return ns;
+}
+
+static int
+ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+			       struct timespec *timestamp, uint32_t flags)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct ice_rx_queue *rxq;
+	uint32_t ts_high;
+	uint64_t time, ts_ns, ns;
+
+	rxq = dev->data->rx_queues[flags];
+
+	time = ice_read_time(hw);
+
+	ts_high = rxq->time_high;
+	ts_ns = ice_tstamp_convert_32b_64b(time, ts_high);
+	ns = rte_timecounter_update(&ad->rx_tstamp_tc, ts_ns);
+	*timestamp = rte_ns_to_timespec(ns);
+
+	return 0;
+}
+
+static int
+ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+			       struct timespec *timestamp)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	uint8_t lport;
+	uint64_t time, ts_ns, ns, tstamp;
+	const uint64_t mask = 0xFFFFFFFF;
+	int ret;
+
+	lport = hw->port_info->lport;
+
+	ret = ice_read_phy_tstamp(hw, lport, 0, &tstamp);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to read phy timestamp\n");
+		return -1;
+	}
+
+	time = ice_read_time(hw);
+
+	ts_ns = ice_tstamp_convert_32b_64b(time, (tstamp >> 8) & mask);
+	ns = rte_timecounter_update(&ad->tx_tstamp_tc, ts_ns);
+	*timestamp = rte_ns_to_timespec(ns);
+
+	return 0;
+}
+
+static int
+ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+	ad->systime_tc.nsec += delta;
+	ad->rx_tstamp_tc.nsec += delta;
+	ad->tx_tstamp_tc.nsec += delta;
+
+	return 0;
+}
+
+static int
+ice_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	uint64_t ns;
+
+	ns = rte_timespec_to_ns(ts);
+
+	ad->systime_tc.nsec = ns;
+	ad->rx_tstamp_tc.nsec = ns;
+	ad->tx_tstamp_tc.nsec = ns;
+
+	return 0;
+}
+
+static int
+ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	uint64_t time, ns;
+
+	time = ice_read_time(hw);
+	ns = rte_timecounter_update(&ad->systime_tc, time);
+	*ts = rte_ns_to_timespec(ns);
+
+	return 0;
+}
+
+static int
+ice_timesync_disable(struct rte_eth_dev *dev)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint64_t val;
+	uint8_t lport;
+
+	lport = hw->port_info->lport;
+
+	dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+
+	ice_clear_phy_tstamp(hw, lport, 0);
+
+	val = ICE_READ_REG(hw, GLTSYN_ENA(0));
+	val &= ~GLTSYN_ENA_TSYN_ENA_M;
+	ICE_WRITE_REG(hw, GLTSYN_ENA(0), val);
+
+	ICE_WRITE_REG(hw, GLTSYN_INCVAL_L(0), 0);
+	ICE_WRITE_REG(hw, GLTSYN_INCVAL_H(0), 0);
+
+	return 0;
+}
+
+static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
 {
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index b4bf651..1c7c8ea 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -6,6 +6,7 @@
 #define _ICE_ETHDEV_H_
 
 #include <rte_kvargs.h>
+#include <rte_time.h>
 
 #include <ethdev_driver.h>
 
@@ -486,6 +487,10 @@ struct ice_adapter {
 	struct ice_devargs devargs;
 	enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
 	uint16_t fdir_ref_cnt;
+	/* For PTP */
+	struct rte_timecounter systime_tc;
+	struct rte_timecounter rx_tstamp_tc;
+	struct rte_timecounter tx_tstamp_tc;
 #ifdef RTE_ARCH_X86
 	bool rx_use_avx2;
 	bool rx_use_avx512;
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 5d7ab4f..6b6149f 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -354,6 +354,10 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
 		QRXFLXP_CNTXT_RXDID_PRIO_M;
 
+	/* Enable timestamp bit in the queue context */
+	if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+		regval |= QRXFLXP_CNTXT_TS_M;
+
 	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
 
 	err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
@@ -689,6 +693,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	tx_ctx.tso_ena = 1; /* tso enable */
 	tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
 	tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
+	tx_ctx.tsyn_ena = 1;
 
 	ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
 		    ice_tlan_ctx_info);
@@ -1589,6 +1594,13 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
 			ice_rxd_to_vlan_tci(mb, &rxdp[j]);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 
+			if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+				rxq->time_high =
+				   rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
+				mb->timesync = rxq->queue_id;
+				pkt_flags |= PKT_RX_IEEE1588_PTP;
+			}
+
 			mb->ol_flags |= pkt_flags;
 		}
 
@@ -1882,6 +1894,13 @@ ice_recv_scattered_pkts(void *rx_queue,
 		ice_rxd_to_vlan_tci(first_seg, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
+
+		if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+			rxq->time_high = rxd.wb.flex_ts.ts_high;
+			first_seg->timesync = rxq->queue_id;
+			pkt_flags |= PKT_RX_IEEE1588_PTP;
+		}
+
 		first_seg->ol_flags |= pkt_flags;
 		/* Prefetch data of first segment, if configured to do so. */
 		rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
@@ -2288,6 +2307,13 @@ ice_recv_pkts(void *rx_queue,
 		ice_rxd_to_vlan_tci(rxm, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
+
+		if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
+			rxq->time_high = rxd.wb.flex_ts.ts_high;
+			rxm->timesync = rxq->queue_id;
+			pkt_flags |= PKT_RX_IEEE1588_PTP;
+		}
+
 		rxm->ol_flags |= pkt_flags;
 		/* copy old mbuf to rx_pkts */
 		rx_pkts[nb_rx++] = rxm;
@@ -2499,7 +2525,8 @@ ice_calc_context_desc(uint64_t flags)
 	static uint64_t mask = PKT_TX_TCP_SEG |
 		PKT_TX_QINQ |
 		PKT_TX_OUTER_IP_CKSUM |
-		PKT_TX_TUNNEL_MASK;
+		PKT_TX_TUNNEL_MASK |
+		PKT_TX_IEEE1588_TMST;
 
 	return (flags & mask) ? 1 : 0;
 }
@@ -2667,6 +2694,12 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			if (ol_flags & PKT_TX_TCP_SEG)
 				cd_type_cmd_tso_mss |=
 					ice_set_tso_ctx(tx_pkt, tx_offload);
+			else {
+				if (ol_flags & PKT_TX_IEEE1588_TMST)
+					cd_type_cmd_tso_mss |=
+					   ((uint64_t)ICE_TX_CTX_DESC_TSYN <<
+					    ICE_TXD_CTX_QW1_CMD_S);
+			}
 
 			ctx_txd->tunneling_params =
 				rte_cpu_to_le_32(cd_tunneling_params);
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index b10db08..ae0b436 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -89,6 +89,7 @@ struct ice_rx_queue {
 	ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
 	ice_rx_release_mbufs_t rx_rel_mbufs;
 	uint64_t offloads;
+	uint32_t time_high; /* High value of the timestamp */
 };
 
 struct ice_tx_entry {
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index 2d8ef7d..534c906 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -290,6 +290,9 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
 	if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD)
 		return ICE_VECTOR_OFFLOAD_PATH;
 
+	if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+		return -1;
+
 	return ICE_VECTOR_PATH;
 }
 
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [dpdk-dev] [PATCH v4] net/ice: support IEEE 1588 PTP for E810
  2021-09-09  1:30   ` [dpdk-dev] [PATCH v3] " Simei Su
@ 2021-09-22  8:46     ` Simei Su
  2021-09-26 11:16       ` Zhang, Qi Z
  2021-09-27  8:28       ` [dpdk-dev] [PATCH v5] net/ice: support IEEE 1588 PTP Simei Su
  0 siblings, 2 replies; 13+ messages in thread
From: Simei Su @ 2021-09-22  8:46 UTC (permalink / raw)
  To: qi.z.zhang; +Cc: dev, haiyue.wang, Simei Su

Add ice support for new ethdev APIs to enable/disable and read/write/adjust
IEEE1588 PTP timstamps. Currently, only scalar path supports 1588 PTP,
vector path doesn't.

The example command for running ptpclient is as below:
./build/examples/dpdk-ptpclient -c 1 -n 3 -- -T 0 -p 0x1

Signed-off-by: Simei Su <simei.su@intel.com>
---
v4:
* Rework code to consider ice_dev_start and ice_timesync_enable order.

v3:
* Rework code to support scalar path only.
* Update the doc/guides/nics/features/ice.ini to add "Timesync" feature.
* Add release notes.

v2:
* Change patchset to one patch based on share code update.
* Change per device offload to per queue offload.

 doc/guides/nics/features/ice.ini       |   1 +
 doc/guides/rel_notes/release_21_11.rst |   2 +-
 drivers/net/ice/ice_ethdev.c           | 193 +++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_ethdev.h           |   6 +
 drivers/net/ice/ice_rxtx.c             |  46 +++++++-
 5 files changed, 245 insertions(+), 3 deletions(-)

diff --git a/doc/guides/nics/features/ice.ini b/doc/guides/nics/features/ice.ini
index e066787..a7978d2 100644
--- a/doc/guides/nics/features/ice.ini
+++ b/doc/guides/nics/features/ice.ini
@@ -43,6 +43,7 @@ Linux                = Y
 Windows              = Y
 x86-32               = Y
 x86-64               = Y
+Timesync             = Y
 
 [rte_flow items]
 ah                   = Y
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 1b9dac6..2005262 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -71,7 +71,7 @@ New Features
 
   Added 1PPS out support by a devargs.
   * Added Rx timstamp support by dynamic mbuf on Flex Descriptor.
-
+  * Added timesync API support under scalar path for E810.
 
 Removed Items
 -------------
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 06adf43..26491c3 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -18,6 +18,7 @@
 #include "base/ice_flow.h"
 #include "base/ice_dcb.h"
 #include "base/ice_common.h"
+#include "base/ice_ptp_hw.h"
 
 #include "rte_pmd_ice.h"
 #include "ice_ethdev.h"
@@ -31,6 +32,8 @@
 #define ICE_HW_DEBUG_MASK_ARG     "hw_debug_mask"
 #define ICE_ONE_PPS_OUT_ARG       "pps_out"
 
+#define ICE_CYCLECOUNTER_MASK  0xffffffffffffffffULL
+
 uint64_t ice_timestamp_dynflag;
 int ice_timestamp_dynfield_offset = -1;
 
@@ -149,6 +152,18 @@ static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 			struct rte_eth_udp_tunnel *udp_tunnel);
 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 			struct rte_eth_udp_tunnel *udp_tunnel);
+static int ice_timesync_enable(struct rte_eth_dev *dev);
+static int ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+					  struct timespec *timestamp,
+					  uint32_t flags);
+static int ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+					  struct timespec *timestamp);
+static int ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+static int ice_timesync_read_time(struct rte_eth_dev *dev,
+				  struct timespec *timestamp);
+static int ice_timesync_write_time(struct rte_eth_dev *dev,
+				   const struct timespec *timestamp);
+static int ice_timesync_disable(struct rte_eth_dev *dev);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
@@ -232,6 +247,13 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
 	.tx_done_cleanup              = ice_tx_done_cleanup,
 	.get_monitor_addr             = ice_get_monitor_addr,
+	.timesync_enable              = ice_timesync_enable,
+	.timesync_read_rx_timestamp   = ice_timesync_read_rx_timestamp,
+	.timesync_read_tx_timestamp   = ice_timesync_read_tx_timestamp,
+	.timesync_adjust_time         = ice_timesync_adjust_time,
+	.timesync_read_time           = ice_timesync_read_time,
+	.timesync_write_time          = ice_timesync_write_time,
+	.timesync_disable             = ice_timesync_disable,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -5488,6 +5510,177 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 }
 
 static int
+ice_timesync_enable(struct rte_eth_dev *dev)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	int ret;
+
+	if (!dev->data->dev_started)
+		ad->ptp_ena = 1;
+	else {
+		if (!(dev->data->dev_conf.rxmode.offloads &
+		    DEV_RX_OFFLOAD_TIMESTAMP)) {
+			PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
+			return -1;
+		} else
+			ad->ptp_ena = 1;
+	}
+
+	if (hw->func_caps.ts_func_info.src_tmr_owned) {
+		ret = ice_ptp_init_phc(hw);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to initialize PHC");
+			return -1;
+		}
+
+		ret = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
+		if (ret) {
+			PMD_DRV_LOG(ERR,
+				"Failed to write PHC increment time value");
+			return -1;
+		}
+	}
+
+	/* Initialize cycle counters for system time/RX/TX timestamp */
+	memset(&ad->systime_tc, 0, sizeof(struct rte_timecounter));
+	memset(&ad->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+	memset(&ad->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+	ad->systime_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+	ad->systime_tc.cc_shift = 0;
+	ad->systime_tc.nsec_mask = 0;
+
+	ad->rx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+	ad->rx_tstamp_tc.cc_shift = 0;
+	ad->rx_tstamp_tc.nsec_mask = 0;
+
+	ad->tx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+	ad->tx_tstamp_tc.cc_shift = 0;
+	ad->tx_tstamp_tc.nsec_mask = 0;
+
+	return 0;
+}
+
+static int
+ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+			       struct timespec *timestamp, uint32_t flags)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct ice_rx_queue *rxq;
+	uint32_t ts_high;
+	uint64_t time, ts_ns, ns;
+
+	rxq = dev->data->rx_queues[flags];
+
+	time = ice_read_time(hw);
+
+	ts_high = rxq->time_high;
+	ts_ns = ice_tstamp_convert_32b_64b(time, ts_high);
+	ns = rte_timecounter_update(&ad->rx_tstamp_tc, ts_ns);
+	*timestamp = rte_ns_to_timespec(ns);
+
+	return 0;
+}
+
+static int
+ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+			       struct timespec *timestamp)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	uint8_t lport;
+	uint64_t time, ts_ns, ns, tstamp;
+	const uint64_t mask = 0xFFFFFFFF;
+	int ret;
+
+	lport = hw->port_info->lport;
+
+	ret = ice_read_phy_tstamp(hw, lport, 0, &tstamp);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to read phy timestamp");
+		return -1;
+	}
+
+	time = ice_read_time(hw);
+
+	ts_ns = ice_tstamp_convert_32b_64b(time, (tstamp >> 8) & mask);
+	ns = rte_timecounter_update(&ad->tx_tstamp_tc, ts_ns);
+	*timestamp = rte_ns_to_timespec(ns);
+
+	return 0;
+}
+
+static int
+ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+	ad->systime_tc.nsec += delta;
+	ad->rx_tstamp_tc.nsec += delta;
+	ad->tx_tstamp_tc.nsec += delta;
+
+	return 0;
+}
+
+static int
+ice_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	uint64_t ns;
+
+	ns = rte_timespec_to_ns(ts);
+
+	ad->systime_tc.nsec = ns;
+	ad->rx_tstamp_tc.nsec = ns;
+	ad->tx_tstamp_tc.nsec = ns;
+
+	return 0;
+}
+
+static int
+ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	uint64_t time, ns;
+
+	time = ice_read_time(hw);
+	ns = rte_timecounter_update(&ad->systime_tc, time);
+	*ts = rte_ns_to_timespec(ns);
+
+	return 0;
+}
+
+static int
+ice_timesync_disable(struct rte_eth_dev *dev)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint64_t val;
+	uint8_t lport;
+
+	lport = hw->port_info->lport;
+
+	ice_clear_phy_tstamp(hw, lport, 0);
+
+	val = ICE_READ_REG(hw, GLTSYN_ENA(0));
+	val &= ~GLTSYN_ENA_TSYN_ENA_M;
+	ICE_WRITE_REG(hw, GLTSYN_ENA(0), val);
+
+	ICE_WRITE_REG(hw, GLTSYN_INCVAL_L(0), 0);
+	ICE_WRITE_REG(hw, GLTSYN_INCVAL_H(0), 0);
+
+	return 0;
+}
+
+static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
 {
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index ea9d892..448e186 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -6,6 +6,7 @@
 #define _ICE_ETHDEV_H_
 
 #include <rte_kvargs.h>
+#include <rte_time.h>
 
 #include <ethdev_driver.h>
 
@@ -501,6 +502,11 @@ struct ice_adapter {
 	struct ice_devargs devargs;
 	enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
 	uint16_t fdir_ref_cnt;
+	/* For PTP */
+	struct rte_timecounter systime_tc;
+	struct rte_timecounter rx_tstamp_tc;
+	struct rte_timecounter tx_tstamp_tc;
+	bool ptp_ena;
 #ifdef RTE_ARCH_X86
 	bool rx_use_avx2;
 	bool rx_use_avx512;
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 717d3f0..5b2aa32 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -270,6 +270,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 	struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
 	uint32_t rxdid = ICE_RXDID_COMMS_OVS;
 	uint32_t regval;
+	struct ice_adapter *ad = rxq->vsi->adapter;
 
 	/* Set buffer size as the head split is disabled. */
 	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
@@ -366,7 +367,10 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
 		QRXFLXP_CNTXT_RXDID_PRIO_M;
 
-	if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (!ad->ptp_ena) {
+		if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+			regval |= QRXFLXP_CNTXT_TS_M;
+	} else
 		regval |= QRXFLXP_CNTXT_TS_M;
 
 	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
@@ -704,6 +708,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	tx_ctx.tso_ena = 1; /* tso enable */
 	tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
 	tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
+	tx_ctx.tsyn_ena = 1;
 
 	ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
 		    ice_tlan_ctx_info);
@@ -1603,6 +1608,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
 	struct ice_vsi *vsi = rxq->vsi;
 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
 	uint64_t time, ts_ns;
+	struct ice_adapter *ad = rxq->vsi->adapter;
 
 	rxdp = &rxq->rx_ring[rxq->rx_tail];
 	rxep = &rxq->sw_ring[rxq->rx_tail];
@@ -1660,6 +1666,15 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
 				}
 			}
 
+			if (ad->ptp_ena && ((mb->packet_type &
+			    RTE_PTYPE_L2_MASK) ==
+			    RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+				rxq->time_high =
+				   rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
+				mb->timesync = rxq->queue_id;
+				pkt_flags |= PKT_RX_IEEE1588_PTP;
+			}
+
 			mb->ol_flags |= pkt_flags;
 		}
 
@@ -1846,6 +1861,7 @@ ice_recv_scattered_pkts(void *rx_queue,
 	struct ice_vsi *vsi = rxq->vsi;
 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
 	uint64_t time, ts_ns;
+	struct ice_adapter *ad = rxq->vsi->adapter;
 
 	while (nb_rx < nb_pkts) {
 		rxdp = &rx_ring[rx_id];
@@ -1971,6 +1987,14 @@ ice_recv_scattered_pkts(void *rx_queue,
 			}
 		}
 
+		if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
+		    == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+			rxq->time_high =
+				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+			first_seg->timesync = rxq->queue_id;
+			pkt_flags |= PKT_RX_IEEE1588_PTP;
+		}
+
 		first_seg->ol_flags |= pkt_flags;
 		/* Prefetch data of first segment, if configured to do so. */
 		rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
@@ -2329,6 +2353,7 @@ ice_recv_pkts(void *rx_queue,
 	struct ice_vsi *vsi = rxq->vsi;
 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
 	uint64_t time, ts_ns;
+	struct ice_adapter *ad = rxq->vsi->adapter;
 
 	while (nb_rx < nb_pkts) {
 		rxdp = &rx_ring[rx_id];
@@ -2395,6 +2420,14 @@ ice_recv_pkts(void *rx_queue,
 			}
 		}
 
+		if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
+		    RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+			rxq->time_high =
+				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+			rxm->timesync = rxq->queue_id;
+			pkt_flags |= PKT_RX_IEEE1588_PTP;
+		}
+
 		rxm->ol_flags |= pkt_flags;
 		/* copy old mbuf to rx_pkts */
 		rx_pkts[nb_rx++] = rxm;
@@ -2606,7 +2639,8 @@ ice_calc_context_desc(uint64_t flags)
 	static uint64_t mask = PKT_TX_TCP_SEG |
 		PKT_TX_QINQ |
 		PKT_TX_OUTER_IP_CKSUM |
-		PKT_TX_TUNNEL_MASK;
+		PKT_TX_TUNNEL_MASK |
+		PKT_TX_IEEE1588_TMST;
 
 	return (flags & mask) ? 1 : 0;
 }
@@ -2774,6 +2808,12 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			if (ol_flags & PKT_TX_TCP_SEG)
 				cd_type_cmd_tso_mss |=
 					ice_set_tso_ctx(tx_pkt, tx_offload);
+			else {
+				if (ol_flags & PKT_TX_IEEE1588_TMST)
+					cd_type_cmd_tso_mss |=
+					   ((uint64_t)ICE_TX_CTX_DESC_TSYN <<
+					    ICE_TXD_CTX_QW1_CMD_S);
+			}
 
 			ctx_txd->tunneling_params =
 				rte_cpu_to_le_32(cd_tunneling_params);
@@ -3175,6 +3215,8 @@ ice_set_rx_function(struct rte_eth_dev *dev)
 		ad->rx_use_avx512 = false;
 		ad->rx_use_avx2 = false;
 		rx_check_ret = ice_rx_vec_dev_check(dev);
+		if (ad->ptp_ena)
+			rx_check_ret = -1;
 		if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
 		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
 			ad->rx_vec_allowed = true;
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [dpdk-dev] [PATCH v4] net/ice: support IEEE 1588 PTP for E810
  2021-09-22  8:46     ` [dpdk-dev] [PATCH v4] " Simei Su
@ 2021-09-26 11:16       ` Zhang, Qi Z
  2021-09-27  8:28       ` [dpdk-dev] [PATCH v5] net/ice: support IEEE 1588 PTP Simei Su
  1 sibling, 0 replies; 13+ messages in thread
From: Zhang, Qi Z @ 2021-09-26 11:16 UTC (permalink / raw)
  To: Su, Simei; +Cc: dev, Wang, Haiyue



> -----Original Message-----
> From: Su, Simei <simei.su@intel.com>
> Sent: Wednesday, September 22, 2021 4:47 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org; Wang, Haiyue <haiyue.wang@intel.com>; Su, Simei
> <simei.su@intel.com>
> Subject: [PATCH v4] net/ice: support IEEE 1588 PTP for E810

No need to mention E810, net/ice already imply its E810.

> 
> Add ice support for new ethdev APIs to enable/disable and read/write/adjust
> IEEE1588 PTP timstamps. Currently, only scalar path supports 1588 PTP, vector
> path doesn't.

timestamps

> 
> The example command for running ptpclient is as below:
> ./build/examples/dpdk-ptpclient -c 1 -n 3 -- -T 0 -p 0x1
> 
> Signed-off-by: Simei Su <simei.su@intel.com>
> ---
> v4:
> * Rework code to consider ice_dev_start and ice_timesync_enable order.
> 
> v3:
> * Rework code to support scalar path only.
> * Update the doc/guides/nics/features/ice.ini to add "Timesync" feature.
> * Add release notes.
> 
> v2:
> * Change patchset to one patch based on share code update.
> * Change per device offload to per queue offload.
> 
>  doc/guides/nics/features/ice.ini       |   1 +
>  doc/guides/rel_notes/release_21_11.rst |   2 +-
>  drivers/net/ice/ice_ethdev.c           | 193
> +++++++++++++++++++++++++++++++++
>  drivers/net/ice/ice_ethdev.h           |   6 +
>  drivers/net/ice/ice_rxtx.c             |  46 +++++++-
>  5 files changed, 245 insertions(+), 3 deletions(-)
> 
> diff --git a/doc/guides/nics/features/ice.ini b/doc/guides/nics/features/ice.ini
> index e066787..a7978d2 100644
> --- a/doc/guides/nics/features/ice.ini
> +++ b/doc/guides/nics/features/ice.ini
> @@ -43,6 +43,7 @@ Linux                = Y
>  Windows              = Y
>  x86-32               = Y
>  x86-64               = Y
> +Timesync             = Y
> 
>  [rte_flow items]
>  ah                   = Y
> diff --git a/doc/guides/rel_notes/release_21_11.rst
> b/doc/guides/rel_notes/release_21_11.rst
> index 1b9dac6..2005262 100644
> --- a/doc/guides/rel_notes/release_21_11.rst
> +++ b/doc/guides/rel_notes/release_21_11.rst
> @@ -71,7 +71,7 @@ New Features
> 
>    Added 1PPS out support by a devargs.
>    * Added Rx timstamp support by dynamic mbuf on Flex Descriptor.
> -
> +  * Added timesync API support under scalar path for E810.
> 
>  Removed Items
>  -------------
> diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index
> 06adf43..26491c3 100644
> --- a/drivers/net/ice/ice_ethdev.c
> +++ b/drivers/net/ice/ice_ethdev.c
> @@ -18,6 +18,7 @@
>  #include "base/ice_flow.h"
>  #include "base/ice_dcb.h"
>  #include "base/ice_common.h"
> +#include "base/ice_ptp_hw.h"
> 
>  #include "rte_pmd_ice.h"
>  #include "ice_ethdev.h"
> @@ -31,6 +32,8 @@
>  #define ICE_HW_DEBUG_MASK_ARG     "hw_debug_mask"
>  #define ICE_ONE_PPS_OUT_ARG       "pps_out"
> 
> +#define ICE_CYCLECOUNTER_MASK  0xffffffffffffffffULL
> +
>  uint64_t ice_timestamp_dynflag;
>  int ice_timestamp_dynfield_offset = -1;
> 
> @@ -149,6 +152,18 @@ static int ice_dev_udp_tunnel_port_add(struct
> rte_eth_dev *dev,
>  			struct rte_eth_udp_tunnel *udp_tunnel);  static int
> ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
>  			struct rte_eth_udp_tunnel *udp_tunnel);
> +static int ice_timesync_enable(struct rte_eth_dev *dev); static int
> +ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
> +					  struct timespec *timestamp,
> +					  uint32_t flags);
> +static int ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
> +					  struct timespec *timestamp);
> +static int ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t
> +delta); static int ice_timesync_read_time(struct rte_eth_dev *dev,
> +				  struct timespec *timestamp);
> +static int ice_timesync_write_time(struct rte_eth_dev *dev,
> +				   const struct timespec *timestamp); static int
> +ice_timesync_disable(struct rte_eth_dev *dev);
> 
>  static const struct rte_pci_id pci_id_ice_map[] = {
>  	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID,
> ICE_DEV_ID_E823L_BACKPLANE) }, @@ -232,6 +247,13 @@ static const struct
> eth_dev_ops ice_eth_dev_ops = {
>  	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
>  	.tx_done_cleanup              = ice_tx_done_cleanup,
>  	.get_monitor_addr             = ice_get_monitor_addr,
> +	.timesync_enable              = ice_timesync_enable,
> +	.timesync_read_rx_timestamp   = ice_timesync_read_rx_timestamp,
> +	.timesync_read_tx_timestamp   = ice_timesync_read_tx_timestamp,
> +	.timesync_adjust_time         = ice_timesync_adjust_time,
> +	.timesync_read_time           = ice_timesync_read_time,
> +	.timesync_write_time          = ice_timesync_write_time,
> +	.timesync_disable             = ice_timesync_disable,
>  };
> 
>  /* store statistics names and its offset in stats structure */ @@ -5488,6
> +5510,177 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,  }
> 
>  static int
> +ice_timesync_enable(struct rte_eth_dev *dev) {
> +	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	struct ice_adapter *ad =
> +			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> +	int ret;
> +
> +	if (!dev->data->dev_started)
> +		ad->ptp_ena = 1;

Please move ptp_ena =1 to end of the function.
And when this flag be reset ?

> +	else {
> +		if (!(dev->data->dev_conf.rxmode.offloads &
> +		    DEV_RX_OFFLOAD_TIMESTAMP)) {
> +			PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
> +			return -1;
> +		} else
> +			ad->ptp_ena = 1;
> +	}
> +
> +	if (hw->func_caps.ts_func_info.src_tmr_owned) {
> +		ret = ice_ptp_init_phc(hw);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Failed to initialize PHC");
> +			return -1;
> +		}
> +
> +		ret = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR,
> +				"Failed to write PHC increment time value");
> +			return -1;
> +		}
> +	}
> +
> +	/* Initialize cycle counters for system time/RX/TX timestamp */
> +	memset(&ad->systime_tc, 0, sizeof(struct rte_timecounter));
> +	memset(&ad->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
> +	memset(&ad->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
> +
> +	ad->systime_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
> +	ad->systime_tc.cc_shift = 0;
> +	ad->systime_tc.nsec_mask = 0;
> +
> +	ad->rx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
> +	ad->rx_tstamp_tc.cc_shift = 0;
> +	ad->rx_tstamp_tc.nsec_mask = 0;
> +
> +	ad->tx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
> +	ad->tx_tstamp_tc.cc_shift = 0;
> +	ad->tx_tstamp_tc.nsec_mask = 0;
> +
> +	return 0;
> +}
> +
> +static int
> +ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
> +			       struct timespec *timestamp, uint32_t flags) {
> +	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	struct ice_adapter *ad =
> +			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> +	struct ice_rx_queue *rxq;
> +	uint32_t ts_high;
> +	uint64_t time, ts_ns, ns;
> +
> +	rxq = dev->data->rx_queues[flags];
> +
> +	time = ice_read_time(hw);
> +
> +	ts_high = rxq->time_high;
> +	ts_ns = ice_tstamp_convert_32b_64b(time, ts_high);
> +	ns = rte_timecounter_update(&ad->rx_tstamp_tc, ts_ns);
> +	*timestamp = rte_ns_to_timespec(ns);
> +
> +	return 0;
> +}
> +
> +static int
> +ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
> +			       struct timespec *timestamp)
> +{
> +	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	struct ice_adapter *ad =
> +			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> +	uint8_t lport;
> +	uint64_t time, ts_ns, ns, tstamp;
> +	const uint64_t mask = 0xFFFFFFFF;
> +	int ret;
> +
> +	lport = hw->port_info->lport;
> +
> +	ret = ice_read_phy_tstamp(hw, lport, 0, &tstamp);
> +	if (ret) {
> +		PMD_DRV_LOG(ERR, "Failed to read phy timestamp");
> +		return -1;
> +	}
> +
> +	time = ice_read_time(hw);
> +
> +	ts_ns = ice_tstamp_convert_32b_64b(time, (tstamp >> 8) & mask);
> +	ns = rte_timecounter_update(&ad->tx_tstamp_tc, ts_ns);
> +	*timestamp = rte_ns_to_timespec(ns);
> +
> +	return 0;
> +}
> +
> +static int
> +ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) {
> +	struct ice_adapter *ad =
> +			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> +
> +	ad->systime_tc.nsec += delta;
> +	ad->rx_tstamp_tc.nsec += delta;
> +	ad->tx_tstamp_tc.nsec += delta;
> +
> +	return 0;
> +}
> +
> +static int
> +ice_timesync_write_time(struct rte_eth_dev *dev, const struct timespec
> +*ts) {
> +	struct ice_adapter *ad =
> +			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> +	uint64_t ns;
> +
> +	ns = rte_timespec_to_ns(ts);
> +
> +	ad->systime_tc.nsec = ns;
> +	ad->rx_tstamp_tc.nsec = ns;
> +	ad->tx_tstamp_tc.nsec = ns;
> +
> +	return 0;
> +}
> +
> +static int
> +ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) {
> +	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	struct ice_adapter *ad =
> +			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> +	uint64_t time, ns;
> +
> +	time = ice_read_time(hw);
> +	ns = rte_timecounter_update(&ad->systime_tc, time);
> +	*ts = rte_ns_to_timespec(ns);
> +
> +	return 0;
> +}
> +
> +static int
> +ice_timesync_disable(struct rte_eth_dev *dev) {
> +	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	uint64_t val;
> +	uint8_t lport;
> +
> +	lport = hw->port_info->lport;
> +
> +	ice_clear_phy_tstamp(hw, lport, 0);
> +
> +	val = ICE_READ_REG(hw, GLTSYN_ENA(0));
> +	val &= ~GLTSYN_ENA_TSYN_ENA_M;
> +	ICE_WRITE_REG(hw, GLTSYN_ENA(0), val);
> +
> +	ICE_WRITE_REG(hw, GLTSYN_INCVAL_L(0), 0);
> +	ICE_WRITE_REG(hw, GLTSYN_INCVAL_H(0), 0);
> +
> +	return 0;
> +}
> +
> +static int
>  ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
>  	      struct rte_pci_device *pci_dev)
>  {
> diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index
> ea9d892..448e186 100644
> --- a/drivers/net/ice/ice_ethdev.h
> +++ b/drivers/net/ice/ice_ethdev.h
> @@ -6,6 +6,7 @@
>  #define _ICE_ETHDEV_H_
> 
>  #include <rte_kvargs.h>
> +#include <rte_time.h>
> 
>  #include <ethdev_driver.h>
> 
> @@ -501,6 +502,11 @@ struct ice_adapter {
>  	struct ice_devargs devargs;
>  	enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
>  	uint16_t fdir_ref_cnt;
> +	/* For PTP */
> +	struct rte_timecounter systime_tc;
> +	struct rte_timecounter rx_tstamp_tc;
> +	struct rte_timecounter tx_tstamp_tc;
> +	bool ptp_ena;
>  #ifdef RTE_ARCH_X86
>  	bool rx_use_avx2;
>  	bool rx_use_avx512;
> diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index
> 717d3f0..5b2aa32 100644
> --- a/drivers/net/ice/ice_rxtx.c
> +++ b/drivers/net/ice/ice_rxtx.c
> @@ -270,6 +270,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
>  	struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
>  	uint32_t rxdid = ICE_RXDID_COMMS_OVS;
>  	uint32_t regval;
> +	struct ice_adapter *ad = rxq->vsi->adapter;
> 
>  	/* Set buffer size as the head split is disabled. */
>  	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - @@ -366,7
> +367,10 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
>  	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
>  		QRXFLXP_CNTXT_RXDID_PRIO_M;
> 
> -	if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
> +	if (!ad->ptp_ena) {
> +		if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
> +			regval |= QRXFLXP_CNTXT_TS_M;
> +	} else
>  		regval |= QRXFLXP_CNTXT_TS_M;

Please simplify above logic.



^ permalink raw reply	[flat|nested] 13+ messages in thread

* [dpdk-dev] [PATCH v5] net/ice: support IEEE 1588 PTP
  2021-09-22  8:46     ` [dpdk-dev] [PATCH v4] " Simei Su
  2021-09-26 11:16       ` Zhang, Qi Z
@ 2021-09-27  8:28       ` Simei Su
  2021-09-28  2:16         ` Zhang, Qi Z
  2021-09-28  6:27         ` [dpdk-dev] [PATCH v6] " Simei Su
  1 sibling, 2 replies; 13+ messages in thread
From: Simei Su @ 2021-09-27  8:28 UTC (permalink / raw)
  To: qi.z.zhang; +Cc: dev, haiyue.wang, Simei Su

Add ice support for new ethdev APIs to enable/disable and read/write/adjust
IEEE1588 PTP timestamps. Currently, only scalar path supports 1588 PTP,
vector path doesn't.

The example command for running ptpclient is as below:
./build/examples/dpdk-ptpclient -c 1 -n 3 -- -T 0 -p 0x1

Signed-off-by: Simei Su <simei.su@intel.com>
---
v5:
* Refine patch title and commit log.
* Simplify judge logic in ice_timesync_enable and ice_program_hw_rx_queue.
* Add flag reset in ice_timesync_disable. 

v4:
* Rework code to consider ice_dev_start and ice_timesync_enable order.

v3:
* Rework code to support scalar path only.
* Update the doc/guides/nics/features/ice.ini to add "Timesync" feature.
* Add release notes.

v2:
* Change patchset to one patch based on share code update.
* Change per device offload to per queue offload.

 doc/guides/nics/features/ice.ini       |   1 +
 doc/guides/rel_notes/release_21_11.rst |   1 +
 drivers/net/ice/ice_ethdev.c           | 201 ++++++++++++++++++++++++++++++++-
 drivers/net/ice/ice_ethdev.h           |   6 +
 drivers/net/ice/ice_rxtx.c             |  41 ++++++-
 drivers/net/ice/ice_rxtx.h             |   1 +
 6 files changed, 248 insertions(+), 3 deletions(-)

diff --git a/doc/guides/nics/features/ice.ini b/doc/guides/nics/features/ice.ini
index e066787..a7978d2 100644
--- a/doc/guides/nics/features/ice.ini
+++ b/doc/guides/nics/features/ice.ini
@@ -43,6 +43,7 @@ Linux                = Y
 Windows              = Y
 x86-32               = Y
 x86-64               = Y
+Timesync             = Y
 
 [rte_flow items]
 ah                   = Y
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 8e29833..33527ed 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -82,6 +82,7 @@ New Features
   * Added 1PPS out support by a devargs.
   * Added IPv4 and L4(TCP/UDP/SCTP) checksum hash support in RSS flow.
   * Added DEV_RX_OFFLOAD_TIMESTAMP support.
+  * Added timesync API support under scalar path.
 
 * **Updated Marvell cnxk ethdev driver.**
 
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 6b85f68..0f457dc 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -18,6 +18,7 @@
 #include "base/ice_flow.h"
 #include "base/ice_dcb.h"
 #include "base/ice_common.h"
+#include "base/ice_ptp_hw.h"
 
 #include "rte_pmd_ice.h"
 #include "ice_ethdev.h"
@@ -32,6 +33,8 @@
 #define ICE_ONE_PPS_OUT_ARG       "pps_out"
 #define ICE_RX_LOW_LATENCY_ARG    "rx_low_latency"
 
+#define ICE_CYCLECOUNTER_MASK  0xffffffffffffffffULL
+
 uint64_t ice_timestamp_dynflag;
 int ice_timestamp_dynfield_offset = -1;
 
@@ -45,7 +48,6 @@ static const char * const ice_valid_args[] = {
 	NULL
 };
 
-#define NSEC_PER_SEC      1000000000
 #define PPS_OUT_DELAY_NS  1
 
 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {
@@ -151,6 +153,18 @@ static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 			struct rte_eth_udp_tunnel *udp_tunnel);
 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 			struct rte_eth_udp_tunnel *udp_tunnel);
+static int ice_timesync_enable(struct rte_eth_dev *dev);
+static int ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+					  struct timespec *timestamp,
+					  uint32_t flags);
+static int ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+					  struct timespec *timestamp);
+static int ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+static int ice_timesync_read_time(struct rte_eth_dev *dev,
+				  struct timespec *timestamp);
+static int ice_timesync_write_time(struct rte_eth_dev *dev,
+				   const struct timespec *timestamp);
+static int ice_timesync_disable(struct rte_eth_dev *dev);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
@@ -234,6 +248,13 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
 	.tx_done_cleanup              = ice_tx_done_cleanup,
 	.get_monitor_addr             = ice_get_monitor_addr,
+	.timesync_enable              = ice_timesync_enable,
+	.timesync_read_rx_timestamp   = ice_timesync_read_rx_timestamp,
+	.timesync_read_tx_timestamp   = ice_timesync_read_tx_timestamp,
+	.timesync_adjust_time         = ice_timesync_adjust_time,
+	.timesync_read_time           = ice_timesync_read_time,
+	.timesync_write_time          = ice_timesync_write_time,
+	.timesync_disable             = ice_timesync_disable,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -5487,6 +5508,184 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 }
 
 static int
+ice_timesync_enable(struct rte_eth_dev *dev)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	int ret;
+
+	if (hw->func_caps.ts_func_info.src_tmr_owned) {
+		ret = ice_ptp_init_phc(hw);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to initialize PHC");
+			return -1;
+		}
+
+		ret = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
+		if (ret) {
+			PMD_DRV_LOG(ERR,
+				"Failed to write PHC increment time value");
+			return -1;
+		}
+	}
+
+	/* Initialize cycle counters for system time/RX/TX timestamp */
+	memset(&ad->systime_tc, 0, sizeof(struct rte_timecounter));
+	memset(&ad->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+	memset(&ad->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+	ad->systime_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+	ad->systime_tc.cc_shift = 0;
+	ad->systime_tc.nsec_mask = 0;
+
+	ad->rx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+	ad->rx_tstamp_tc.cc_shift = 0;
+	ad->rx_tstamp_tc.nsec_mask = 0;
+
+	ad->tx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+	ad->tx_tstamp_tc.cc_shift = 0;
+	ad->tx_tstamp_tc.nsec_mask = 0;
+
+	if (dev->data->dev_started && !(dev->data->dev_conf.rxmode.offloads &
+	    DEV_RX_OFFLOAD_TIMESTAMP)) {
+		PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
+		return -1;
+	} else {
+		ad->ptp_ena = 1;
+	}
+
+	return 0;
+}
+
+static int
+ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+			       struct timespec *timestamp, uint32_t flags)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct ice_rx_queue *rxq;
+	uint32_t ts_high;
+	uint64_t ts_ns, ns;
+
+	rxq = dev->data->rx_queues[flags];
+
+	ts_high = rxq->time_high;
+	ts_ns = ice_tstamp_convert_32b_64b(hw, ts_high);
+	ns = rte_timecounter_update(&ad->rx_tstamp_tc, ts_ns);
+	*timestamp = rte_ns_to_timespec(ns);
+
+	return 0;
+}
+
+static int
+ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+			       struct timespec *timestamp)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	uint8_t lport;
+	uint64_t ts_ns, ns, tstamp;
+	const uint64_t mask = 0xFFFFFFFF;
+	int ret;
+
+	lport = hw->port_info->lport;
+
+	ret = ice_read_phy_tstamp(hw, lport, 0, &tstamp);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to read phy timestamp");
+		return -1;
+	}
+
+	ts_ns = ice_tstamp_convert_32b_64b(hw, (tstamp >> 8) & mask);
+	ns = rte_timecounter_update(&ad->tx_tstamp_tc, ts_ns);
+	*timestamp = rte_ns_to_timespec(ns);
+
+	return 0;
+}
+
+static int
+ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+	ad->systime_tc.nsec += delta;
+	ad->rx_tstamp_tc.nsec += delta;
+	ad->tx_tstamp_tc.nsec += delta;
+
+	return 0;
+}
+
+static int
+ice_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	uint64_t ns;
+
+	ns = rte_timespec_to_ns(ts);
+
+	ad->systime_tc.nsec = ns;
+	ad->rx_tstamp_tc.nsec = ns;
+	ad->tx_tstamp_tc.nsec = ns;
+
+	return 0;
+}
+
+static int
+ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	uint32_t hi, lo, lo2;
+	uint64_t time, ns;
+
+	lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+	hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
+	lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+
+	if (lo2 < lo) {
+		lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+		hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
+	}
+
+	time = ((uint64_t)hi << 32) | lo;
+	ns = rte_timecounter_update(&ad->systime_tc, time);
+	*ts = rte_ns_to_timespec(ns);
+
+	return 0;
+}
+
+static int
+ice_timesync_disable(struct rte_eth_dev *dev)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	uint64_t val;
+	uint8_t lport;
+
+	lport = hw->port_info->lport;
+
+	ice_clear_phy_tstamp(hw, lport, 0);
+
+	val = ICE_READ_REG(hw, GLTSYN_ENA(0));
+	val &= ~GLTSYN_ENA_TSYN_ENA_M;
+	ICE_WRITE_REG(hw, GLTSYN_ENA(0), val);
+
+	ICE_WRITE_REG(hw, GLTSYN_INCVAL_L(0), 0);
+	ICE_WRITE_REG(hw, GLTSYN_INCVAL_H(0), 0);
+
+	ad->ptp_ena = 0;
+
+	return 0;
+}
+
+static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
 {
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 26f5c56..5845f44 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -6,6 +6,7 @@
 #define _ICE_ETHDEV_H_
 
 #include <rte_kvargs.h>
+#include <rte_time.h>
 
 #include <ethdev_driver.h>
 
@@ -502,6 +503,11 @@ struct ice_adapter {
 	struct ice_devargs devargs;
 	enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
 	uint16_t fdir_ref_cnt;
+	/* For PTP */
+	struct rte_timecounter systime_tc;
+	struct rte_timecounter rx_tstamp_tc;
+	struct rte_timecounter tx_tstamp_tc;
+	bool ptp_ena;
 #ifdef RTE_ARCH_X86
 	bool rx_use_avx2;
 	bool rx_use_avx512;
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index bb75183..7089202 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -270,6 +270,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 	struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
 	uint32_t rxdid = ICE_RXDID_COMMS_OVS;
 	uint32_t regval;
+	struct ice_adapter *ad = rxq->vsi->adapter;
 
 	/* Set buffer size as the head split is disabled. */
 	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
@@ -366,7 +367,8 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
 		QRXFLXP_CNTXT_RXDID_PRIO_M;
 
-	if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if ((!ad->ptp_ena && (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)) ||
+	    ad->ptp_ena)
 		regval |= QRXFLXP_CNTXT_TS_M;
 
 	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
@@ -704,6 +706,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	tx_ctx.tso_ena = 1; /* tso enable */
 	tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
 	tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
+	tx_ctx.tsyn_ena = 1;
 
 	ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
 		    ice_tlan_ctx_info);
@@ -1564,6 +1567,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
 	struct ice_vsi *vsi = rxq->vsi;
 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
 	uint64_t ts_ns;
+	struct ice_adapter *ad = rxq->vsi->adapter;
 
 	rxdp = &rxq->rx_ring[rxq->rx_tail];
 	rxep = &rxq->sw_ring[rxq->rx_tail];
@@ -1618,6 +1622,14 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
 				}
 			}
 
+			if (ad->ptp_ena && ((mb->packet_type &
+			    RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+				rxq->time_high =
+				   rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
+				mb->timesync = rxq->queue_id;
+				pkt_flags |= PKT_RX_IEEE1588_PTP;
+			}
+
 			mb->ol_flags |= pkt_flags;
 		}
 
@@ -1804,6 +1816,7 @@ ice_recv_scattered_pkts(void *rx_queue,
 	struct ice_vsi *vsi = rxq->vsi;
 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
 	uint64_t ts_ns;
+	struct ice_adapter *ad = rxq->vsi->adapter;
 
 	while (nb_rx < nb_pkts) {
 		rxdp = &rx_ring[rx_id];
@@ -1926,6 +1939,14 @@ ice_recv_scattered_pkts(void *rx_queue,
 			}
 		}
 
+		if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
+		    == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+			rxq->time_high =
+			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+			first_seg->timesync = rxq->queue_id;
+			pkt_flags |= PKT_RX_IEEE1588_PTP;
+		}
+
 		first_seg->ol_flags |= pkt_flags;
 		/* Prefetch data of first segment, if configured to do so. */
 		rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
@@ -2284,6 +2305,7 @@ ice_recv_pkts(void *rx_queue,
 	struct ice_vsi *vsi = rxq->vsi;
 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
 	uint64_t ts_ns;
+	struct ice_adapter *ad = rxq->vsi->adapter;
 
 	while (nb_rx < nb_pkts) {
 		rxdp = &rx_ring[rx_id];
@@ -2347,6 +2369,14 @@ ice_recv_pkts(void *rx_queue,
 			}
 		}
 
+		if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
+		    RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+			rxq->time_high =
+			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+			rxm->timesync = rxq->queue_id;
+			pkt_flags |= PKT_RX_IEEE1588_PTP;
+		}
+
 		rxm->ol_flags |= pkt_flags;
 		/* copy old mbuf to rx_pkts */
 		rx_pkts[nb_rx++] = rxm;
@@ -2558,7 +2588,8 @@ ice_calc_context_desc(uint64_t flags)
 	static uint64_t mask = PKT_TX_TCP_SEG |
 		PKT_TX_QINQ |
 		PKT_TX_OUTER_IP_CKSUM |
-		PKT_TX_TUNNEL_MASK;
+		PKT_TX_TUNNEL_MASK |
+		PKT_TX_IEEE1588_TMST;
 
 	return (flags & mask) ? 1 : 0;
 }
@@ -2726,6 +2757,10 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			if (ol_flags & PKT_TX_TCP_SEG)
 				cd_type_cmd_tso_mss |=
 					ice_set_tso_ctx(tx_pkt, tx_offload);
+			else if (ol_flags & PKT_TX_IEEE1588_TMST)
+				cd_type_cmd_tso_mss |=
+					((uint64_t)ICE_TX_CTX_DESC_TSYN <<
+					ICE_TXD_CTX_QW1_CMD_S);
 
 			ctx_txd->tunneling_params =
 				rte_cpu_to_le_32(cd_tunneling_params);
@@ -3127,6 +3162,8 @@ ice_set_rx_function(struct rte_eth_dev *dev)
 		ad->rx_use_avx512 = false;
 		ad->rx_use_avx2 = false;
 		rx_check_ret = ice_rx_vec_dev_check(dev);
+		if (ad->ptp_ena)
+			rx_check_ret = -1;
 		if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
 		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
 			ad->rx_vec_allowed = true;
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index 4c8b6f7..eef76ff 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -92,6 +92,7 @@ struct ice_rx_queue {
 	ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
 	ice_rx_release_mbufs_t rx_rel_mbufs;
 	uint64_t offloads;
+	uint32_t time_high;
 };
 
 struct ice_tx_entry {
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [dpdk-dev] [PATCH v5] net/ice: support IEEE 1588 PTP
  2021-09-27  8:28       ` [dpdk-dev] [PATCH v5] net/ice: support IEEE 1588 PTP Simei Su
@ 2021-09-28  2:16         ` Zhang, Qi Z
  2021-09-28  6:27         ` [dpdk-dev] [PATCH v6] " Simei Su
  1 sibling, 0 replies; 13+ messages in thread
From: Zhang, Qi Z @ 2021-09-28  2:16 UTC (permalink / raw)
  To: Su, Simei; +Cc: dev, Wang, Haiyue



> -----Original Message-----
> From: Su, Simei <simei.su@intel.com>
> Sent: Monday, September 27, 2021 4:28 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org; Wang, Haiyue <haiyue.wang@intel.com>; Su, Simei
> <simei.su@intel.com>
> Subject: [PATCH v5] net/ice: support IEEE 1588 PTP
> 
> Add ice support for new ethdev APIs to enable/disable and read/write/adjust
> IEEE1588 PTP timestamps. Currently, only scalar path supports 1588 PTP,
> vector path doesn't.
> 
> The example command for running ptpclient is as below:
> ./build/examples/dpdk-ptpclient -c 1 -n 3 -- -T 0 -p 0x1
> 
> Signed-off-by: Simei Su <simei.su@intel.com>
> ---
> v5:
> * Refine patch title and commit log.
> * Simplify judge logic in ice_timesync_enable and ice_program_hw_rx_queue.
> * Add flag reset in ice_timesync_disable.
> 
> v4:
> * Rework code to consider ice_dev_start and ice_timesync_enable order.
> 
> v3:
> * Rework code to support scalar path only.
> * Update the doc/guides/nics/features/ice.ini to add "Timesync" feature.
> * Add release notes.
> 
> v2:
> * Change patchset to one patch based on share code update.
> * Change per device offload to per queue offload.
> 
>  doc/guides/nics/features/ice.ini       |   1 +
>  doc/guides/rel_notes/release_21_11.rst |   1 +
>  drivers/net/ice/ice_ethdev.c           | 201
> ++++++++++++++++++++++++++++++++-
>  drivers/net/ice/ice_ethdev.h           |   6 +
>  drivers/net/ice/ice_rxtx.c             |  41 ++++++-
>  drivers/net/ice/ice_rxtx.h             |   1 +
>  6 files changed, 248 insertions(+), 3 deletions(-)
> 
......
> 
>  static int
> +ice_timesync_enable(struct rte_eth_dev *dev) {
> +	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	struct ice_adapter *ad =
> +			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> +	int ret;
> +
> +	if (hw->func_caps.ts_func_info.src_tmr_owned) {
> +		ret = ice_ptp_init_phc(hw);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Failed to initialize PHC");
> +			return -1;
> +		}
> +
> +		ret = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR,
> +				"Failed to write PHC increment time value");
> +			return -1;
> +		}
> +	}
> +
> +	/* Initialize cycle counters for system time/RX/TX timestamp */
> +	memset(&ad->systime_tc, 0, sizeof(struct rte_timecounter));
> +	memset(&ad->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
> +	memset(&ad->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
> +
> +	ad->systime_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
> +	ad->systime_tc.cc_shift = 0;
> +	ad->systime_tc.nsec_mask = 0;
> +
> +	ad->rx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
> +	ad->rx_tstamp_tc.cc_shift = 0;
> +	ad->rx_tstamp_tc.nsec_mask = 0;
> +
> +	ad->tx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
> +	ad->tx_tstamp_tc.cc_shift = 0;
> +	ad->tx_tstamp_tc.nsec_mask = 0;
> +
> +	if (dev->data->dev_started && !(dev->data->dev_conf.rxmode.offloads &
> +	    DEV_RX_OFFLOAD_TIMESTAMP)) {
> +		PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
> +		return -1;
> +	} else {
> +		ad->ptp_ena = 1;
> +	}

No need "else" branch if already return in "if" branch.

> diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index
> bb75183..7089202 100644
> --- a/drivers/net/ice/ice_rxtx.c
> +++ b/drivers/net/ice/ice_rxtx.c
> @@ -270,6 +270,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
>  	struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
>  	uint32_t rxdid = ICE_RXDID_COMMS_OVS;
>  	uint32_t regval;
> +	struct ice_adapter *ad = rxq->vsi->adapter;
> 
>  	/* Set buffer size as the head split is disabled. */
>  	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - @@ -366,7
> +367,8 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
>  	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
>  		QRXFLXP_CNTXT_RXDID_PRIO_M;
> 
> -	if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
> +	if ((!ad->ptp_ena && (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)) ||
> +	    ad->ptp_ena)

it can be simplified to ptp_ena || offloads & TIMESTAMP.



^ permalink raw reply	[flat|nested] 13+ messages in thread

* [dpdk-dev] [PATCH v6] net/ice: support IEEE 1588 PTP
  2021-09-27  8:28       ` [dpdk-dev] [PATCH v5] net/ice: support IEEE 1588 PTP Simei Su
  2021-09-28  2:16         ` Zhang, Qi Z
@ 2021-09-28  6:27         ` Simei Su
  2021-09-28 11:13           ` Zhang, Qi Z
  1 sibling, 1 reply; 13+ messages in thread
From: Simei Su @ 2021-09-28  6:27 UTC (permalink / raw)
  To: qi.z.zhang; +Cc: dev, haiyue.wang, Simei Su

Add ice support for new ethdev APIs to enable/disable and read/write/adjust
IEEE1588 PTP timestamps. Currently, only scalar path supports 1588 PTP,
vector path doesn't.

The example command for running ptpclient is as below:
./build/examples/dpdk-ptpclient -c 1 -n 3 -- -T 0 -p 0x1

Signed-off-by: Simei Su <simei.su@intel.com>
---
v6:
* Refine to simplify judge logic in ice_program_hw_rx_queue.
* Remove "else" branch if already return in "if" branch.  

v5:
* Refine patch title and commit log.
* Simplify judge logic in ice_timesync_enable and ice_program_hw_rx_queue.
* Add flag reset in ice_timesync_disable. 

v4:
* Rework code to consider ice_dev_start and ice_timesync_enable order.

v3:
* Rework code to support scalar path only.
* Update the doc/guides/nics/features/ice.ini to add "Timesync" feature.
* Add release notes.

v2:
* Change patchset to one patch based on share code update.
* Change per device offload to per queue offload.

 doc/guides/nics/features/ice.ini       |   1 +
 doc/guides/rel_notes/release_21_11.rst |   1 +
 drivers/net/ice/ice_ethdev.c           | 201 ++++++++++++++++++++++++++++++++-
 drivers/net/ice/ice_ethdev.h           |   6 +
 drivers/net/ice/ice_rxtx.c             |  40 ++++++-
 drivers/net/ice/ice_rxtx.h             |   1 +
 6 files changed, 247 insertions(+), 3 deletions(-)

diff --git a/doc/guides/nics/features/ice.ini b/doc/guides/nics/features/ice.ini
index e066787..a7978d2 100644
--- a/doc/guides/nics/features/ice.ini
+++ b/doc/guides/nics/features/ice.ini
@@ -43,6 +43,7 @@ Linux                = Y
 Windows              = Y
 x86-32               = Y
 x86-64               = Y
+Timesync             = Y
 
 [rte_flow items]
 ah                   = Y
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 8e29833..33527ed 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -82,6 +82,7 @@ New Features
   * Added 1PPS out support by a devargs.
   * Added IPv4 and L4(TCP/UDP/SCTP) checksum hash support in RSS flow.
   * Added DEV_RX_OFFLOAD_TIMESTAMP support.
+  * Added timesync API support under scalar path.
 
 * **Updated Marvell cnxk ethdev driver.**
 
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 6b85f68..ea3b5c0 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -18,6 +18,7 @@
 #include "base/ice_flow.h"
 #include "base/ice_dcb.h"
 #include "base/ice_common.h"
+#include "base/ice_ptp_hw.h"
 
 #include "rte_pmd_ice.h"
 #include "ice_ethdev.h"
@@ -32,6 +33,8 @@
 #define ICE_ONE_PPS_OUT_ARG       "pps_out"
 #define ICE_RX_LOW_LATENCY_ARG    "rx_low_latency"
 
+#define ICE_CYCLECOUNTER_MASK  0xffffffffffffffffULL
+
 uint64_t ice_timestamp_dynflag;
 int ice_timestamp_dynfield_offset = -1;
 
@@ -45,7 +48,6 @@ static const char * const ice_valid_args[] = {
 	NULL
 };
 
-#define NSEC_PER_SEC      1000000000
 #define PPS_OUT_DELAY_NS  1
 
 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {
@@ -151,6 +153,18 @@ static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 			struct rte_eth_udp_tunnel *udp_tunnel);
 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 			struct rte_eth_udp_tunnel *udp_tunnel);
+static int ice_timesync_enable(struct rte_eth_dev *dev);
+static int ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+					  struct timespec *timestamp,
+					  uint32_t flags);
+static int ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+					  struct timespec *timestamp);
+static int ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+static int ice_timesync_read_time(struct rte_eth_dev *dev,
+				  struct timespec *timestamp);
+static int ice_timesync_write_time(struct rte_eth_dev *dev,
+				   const struct timespec *timestamp);
+static int ice_timesync_disable(struct rte_eth_dev *dev);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
@@ -234,6 +248,13 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
 	.tx_done_cleanup              = ice_tx_done_cleanup,
 	.get_monitor_addr             = ice_get_monitor_addr,
+	.timesync_enable              = ice_timesync_enable,
+	.timesync_read_rx_timestamp   = ice_timesync_read_rx_timestamp,
+	.timesync_read_tx_timestamp   = ice_timesync_read_tx_timestamp,
+	.timesync_adjust_time         = ice_timesync_adjust_time,
+	.timesync_read_time           = ice_timesync_read_time,
+	.timesync_write_time          = ice_timesync_write_time,
+	.timesync_disable             = ice_timesync_disable,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -5487,6 +5508,184 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 }
 
 static int
+ice_timesync_enable(struct rte_eth_dev *dev)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	int ret;
+
+	if (dev->data->dev_started && !(dev->data->dev_conf.rxmode.offloads &
+	    DEV_RX_OFFLOAD_TIMESTAMP)) {
+		PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
+		return -1;
+	}
+
+	if (hw->func_caps.ts_func_info.src_tmr_owned) {
+		ret = ice_ptp_init_phc(hw);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to initialize PHC");
+			return -1;
+		}
+
+		ret = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
+		if (ret) {
+			PMD_DRV_LOG(ERR,
+				"Failed to write PHC increment time value");
+			return -1;
+		}
+	}
+
+	/* Initialize cycle counters for system time/RX/TX timestamp */
+	memset(&ad->systime_tc, 0, sizeof(struct rte_timecounter));
+	memset(&ad->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+	memset(&ad->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+	ad->systime_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+	ad->systime_tc.cc_shift = 0;
+	ad->systime_tc.nsec_mask = 0;
+
+	ad->rx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+	ad->rx_tstamp_tc.cc_shift = 0;
+	ad->rx_tstamp_tc.nsec_mask = 0;
+
+	ad->tx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+	ad->tx_tstamp_tc.cc_shift = 0;
+	ad->tx_tstamp_tc.nsec_mask = 0;
+
+	ad->ptp_ena = 1;
+
+	return 0;
+}
+
+static int
+ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+			       struct timespec *timestamp, uint32_t flags)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct ice_rx_queue *rxq;
+	uint32_t ts_high;
+	uint64_t ts_ns, ns;
+
+	rxq = dev->data->rx_queues[flags];
+
+	ts_high = rxq->time_high;
+	ts_ns = ice_tstamp_convert_32b_64b(hw, ts_high);
+	ns = rte_timecounter_update(&ad->rx_tstamp_tc, ts_ns);
+	*timestamp = rte_ns_to_timespec(ns);
+
+	return 0;
+}
+
+static int
+ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+			       struct timespec *timestamp)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	uint8_t lport;
+	uint64_t ts_ns, ns, tstamp;
+	const uint64_t mask = 0xFFFFFFFF;
+	int ret;
+
+	lport = hw->port_info->lport;
+
+	ret = ice_read_phy_tstamp(hw, lport, 0, &tstamp);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to read phy timestamp");
+		return -1;
+	}
+
+	ts_ns = ice_tstamp_convert_32b_64b(hw, (tstamp >> 8) & mask);
+	ns = rte_timecounter_update(&ad->tx_tstamp_tc, ts_ns);
+	*timestamp = rte_ns_to_timespec(ns);
+
+	return 0;
+}
+
+static int
+ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+	ad->systime_tc.nsec += delta;
+	ad->rx_tstamp_tc.nsec += delta;
+	ad->tx_tstamp_tc.nsec += delta;
+
+	return 0;
+}
+
+static int
+ice_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	uint64_t ns;
+
+	ns = rte_timespec_to_ns(ts);
+
+	ad->systime_tc.nsec = ns;
+	ad->rx_tstamp_tc.nsec = ns;
+	ad->tx_tstamp_tc.nsec = ns;
+
+	return 0;
+}
+
+static int
+ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	uint32_t hi, lo, lo2;
+	uint64_t time, ns;
+
+	lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+	hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
+	lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+
+	if (lo2 < lo) {
+		lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
+		hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
+	}
+
+	time = ((uint64_t)hi << 32) | lo;
+	ns = rte_timecounter_update(&ad->systime_tc, time);
+	*ts = rte_ns_to_timespec(ns);
+
+	return 0;
+}
+
+static int
+ice_timesync_disable(struct rte_eth_dev *dev)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *ad =
+			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	uint64_t val;
+	uint8_t lport;
+
+	lport = hw->port_info->lport;
+
+	ice_clear_phy_tstamp(hw, lport, 0);
+
+	val = ICE_READ_REG(hw, GLTSYN_ENA(0));
+	val &= ~GLTSYN_ENA_TSYN_ENA_M;
+	ICE_WRITE_REG(hw, GLTSYN_ENA(0), val);
+
+	ICE_WRITE_REG(hw, GLTSYN_INCVAL_L(0), 0);
+	ICE_WRITE_REG(hw, GLTSYN_INCVAL_H(0), 0);
+
+	ad->ptp_ena = 0;
+
+	return 0;
+}
+
+static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
 {
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 26f5c56..5845f44 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -6,6 +6,7 @@
 #define _ICE_ETHDEV_H_
 
 #include <rte_kvargs.h>
+#include <rte_time.h>
 
 #include <ethdev_driver.h>
 
@@ -502,6 +503,11 @@ struct ice_adapter {
 	struct ice_devargs devargs;
 	enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
 	uint16_t fdir_ref_cnt;
+	/* For PTP */
+	struct rte_timecounter systime_tc;
+	struct rte_timecounter rx_tstamp_tc;
+	struct rte_timecounter tx_tstamp_tc;
+	bool ptp_ena;
 #ifdef RTE_ARCH_X86
 	bool rx_use_avx2;
 	bool rx_use_avx512;
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index bb75183..83fb788 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -270,6 +270,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 	struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
 	uint32_t rxdid = ICE_RXDID_COMMS_OVS;
 	uint32_t regval;
+	struct ice_adapter *ad = rxq->vsi->adapter;
 
 	/* Set buffer size as the head split is disabled. */
 	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
@@ -366,7 +367,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
 		QRXFLXP_CNTXT_RXDID_PRIO_M;
 
-	if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (ad->ptp_ena || rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
 		regval |= QRXFLXP_CNTXT_TS_M;
 
 	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
@@ -704,6 +705,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	tx_ctx.tso_ena = 1; /* tso enable */
 	tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
 	tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
+	tx_ctx.tsyn_ena = 1;
 
 	ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
 		    ice_tlan_ctx_info);
@@ -1564,6 +1566,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
 	struct ice_vsi *vsi = rxq->vsi;
 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
 	uint64_t ts_ns;
+	struct ice_adapter *ad = rxq->vsi->adapter;
 
 	rxdp = &rxq->rx_ring[rxq->rx_tail];
 	rxep = &rxq->sw_ring[rxq->rx_tail];
@@ -1618,6 +1621,14 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
 				}
 			}
 
+			if (ad->ptp_ena && ((mb->packet_type &
+			    RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+				rxq->time_high =
+				   rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
+				mb->timesync = rxq->queue_id;
+				pkt_flags |= PKT_RX_IEEE1588_PTP;
+			}
+
 			mb->ol_flags |= pkt_flags;
 		}
 
@@ -1804,6 +1815,7 @@ ice_recv_scattered_pkts(void *rx_queue,
 	struct ice_vsi *vsi = rxq->vsi;
 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
 	uint64_t ts_ns;
+	struct ice_adapter *ad = rxq->vsi->adapter;
 
 	while (nb_rx < nb_pkts) {
 		rxdp = &rx_ring[rx_id];
@@ -1926,6 +1938,14 @@ ice_recv_scattered_pkts(void *rx_queue,
 			}
 		}
 
+		if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
+		    == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+			rxq->time_high =
+			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+			first_seg->timesync = rxq->queue_id;
+			pkt_flags |= PKT_RX_IEEE1588_PTP;
+		}
+
 		first_seg->ol_flags |= pkt_flags;
 		/* Prefetch data of first segment, if configured to do so. */
 		rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
@@ -2284,6 +2304,7 @@ ice_recv_pkts(void *rx_queue,
 	struct ice_vsi *vsi = rxq->vsi;
 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
 	uint64_t ts_ns;
+	struct ice_adapter *ad = rxq->vsi->adapter;
 
 	while (nb_rx < nb_pkts) {
 		rxdp = &rx_ring[rx_id];
@@ -2347,6 +2368,14 @@ ice_recv_pkts(void *rx_queue,
 			}
 		}
 
+		if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
+		    RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+			rxq->time_high =
+			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+			rxm->timesync = rxq->queue_id;
+			pkt_flags |= PKT_RX_IEEE1588_PTP;
+		}
+
 		rxm->ol_flags |= pkt_flags;
 		/* copy old mbuf to rx_pkts */
 		rx_pkts[nb_rx++] = rxm;
@@ -2558,7 +2587,8 @@ ice_calc_context_desc(uint64_t flags)
 	static uint64_t mask = PKT_TX_TCP_SEG |
 		PKT_TX_QINQ |
 		PKT_TX_OUTER_IP_CKSUM |
-		PKT_TX_TUNNEL_MASK;
+		PKT_TX_TUNNEL_MASK |
+		PKT_TX_IEEE1588_TMST;
 
 	return (flags & mask) ? 1 : 0;
 }
@@ -2726,6 +2756,10 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			if (ol_flags & PKT_TX_TCP_SEG)
 				cd_type_cmd_tso_mss |=
 					ice_set_tso_ctx(tx_pkt, tx_offload);
+			else if (ol_flags & PKT_TX_IEEE1588_TMST)
+				cd_type_cmd_tso_mss |=
+					((uint64_t)ICE_TX_CTX_DESC_TSYN <<
+					ICE_TXD_CTX_QW1_CMD_S);
 
 			ctx_txd->tunneling_params =
 				rte_cpu_to_le_32(cd_tunneling_params);
@@ -3127,6 +3161,8 @@ ice_set_rx_function(struct rte_eth_dev *dev)
 		ad->rx_use_avx512 = false;
 		ad->rx_use_avx2 = false;
 		rx_check_ret = ice_rx_vec_dev_check(dev);
+		if (ad->ptp_ena)
+			rx_check_ret = -1;
 		if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
 		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
 			ad->rx_vec_allowed = true;
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index 4c8b6f7..eef76ff 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -92,6 +92,7 @@ struct ice_rx_queue {
 	ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
 	ice_rx_release_mbufs_t rx_rel_mbufs;
 	uint64_t offloads;
+	uint32_t time_high;
 };
 
 struct ice_tx_entry {
-- 
2.9.5


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [dpdk-dev] [PATCH v6] net/ice: support IEEE 1588 PTP
  2021-09-28  6:27         ` [dpdk-dev] [PATCH v6] " Simei Su
@ 2021-09-28 11:13           ` Zhang, Qi Z
  0 siblings, 0 replies; 13+ messages in thread
From: Zhang, Qi Z @ 2021-09-28 11:13 UTC (permalink / raw)
  To: Su, Simei; +Cc: dev, Wang, Haiyue



> -----Original Message-----
> From: Su, Simei <simei.su@intel.com>
> Sent: Tuesday, September 28, 2021 2:28 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org; Wang, Haiyue <haiyue.wang@intel.com>; Su, Simei
> <simei.su@intel.com>
> Subject: [PATCH v6] net/ice: support IEEE 1588 PTP
> 
> Add ice support for new ethdev APIs to enable/disable and read/write/adjust
> IEEE1588 PTP timestamps. Currently, only scalar path supports 1588 PTP,
> vector path doesn't.
> 
> The example command for running ptpclient is as below:
> ./build/examples/dpdk-ptpclient -c 1 -n 3 -- -T 0 -p 0x1
> 
> Signed-off-by: Simei Su <simei.su@intel.com>

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

Applied to dpdk-next-net-intel.

Thanks
Qi


^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2021-09-28 11:14 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-06  1:34 [dpdk-dev] [PATCH 0/4] net/ice: support IEEE 1588 Simei Su
2021-08-06  1:34 ` [dpdk-dev] [PATCH 1/4] net/ice/base: add 1588 capability probe Simei Su
2021-08-06  1:34 ` [dpdk-dev] [PATCH 2/4] net/ice/base: add low level functions for device clock control Simei Su
2021-08-06  1:34 ` [dpdk-dev] [PATCH 3/4] net/ice/base: add clock initialization function Simei Su
2021-08-06  1:34 ` [dpdk-dev] [PATCH 4/4] net/ice: support IEEE 1588 PTP Simei Su
2021-09-02  1:37 ` [dpdk-dev] [PATCH v2] net/ice: support IEEE 1588 PTP for E810 Simei Su
2021-09-09  1:30   ` [dpdk-dev] [PATCH v3] " Simei Su
2021-09-22  8:46     ` [dpdk-dev] [PATCH v4] " Simei Su
2021-09-26 11:16       ` Zhang, Qi Z
2021-09-27  8:28       ` [dpdk-dev] [PATCH v5] net/ice: support IEEE 1588 PTP Simei Su
2021-09-28  2:16         ` Zhang, Qi Z
2021-09-28  6:27         ` [dpdk-dev] [PATCH v6] " Simei Su
2021-09-28 11:13           ` Zhang, Qi Z

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.