All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/3] Add MACsec offload support for ixgbe
@ 2016-12-03 14:59 Tiwei Bie
  2016-12-03 14:59 ` [PATCH 1/3] lib: add MACsec offload flags Tiwei Bie
                   ` (3 more replies)
  0 siblings, 4 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-03 14:59 UTC (permalink / raw)
  To: dev

This patch set adds the MACsec offload support for ixgbe.
The testpmd is also updated to support MACsec cmds.

Tiwei Bie (3):
  lib: add MACsec offload flags
  net/ixgbe: add MACsec offload support
  app/testpmd: add ixgbe MACsec offload support

 app/test-pmd/cmdline.c                      | 389 +++++++++++++++++++++++++
 app/test-pmd/macfwd.c                       |   2 +
 app/test-pmd/macswap.c                      |   2 +
 app/test-pmd/testpmd.h                      |   2 +
 app/test-pmd/txonly.c                       |   2 +
 drivers/net/ixgbe/ixgbe_ethdev.c            | 436 +++++++++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_ethdev.h            |  41 +++
 drivers/net/ixgbe/ixgbe_rxtx.c              |   3 +
 drivers/net/ixgbe/rte_pmd_ixgbe.h           |  98 +++++++
 drivers/net/ixgbe/rte_pmd_ixgbe_version.map |  11 +
 lib/librte_ether/rte_ethdev.h               |   2 +
 lib/librte_mbuf/rte_mbuf.h                  |   5 +
 12 files changed, 986 insertions(+), 7 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 79+ messages in thread

* [PATCH 1/3] lib: add MACsec offload flags
  2016-12-03 14:59 [PATCH 0/3] Add MACsec offload support for ixgbe Tiwei Bie
@ 2016-12-03 14:59 ` Tiwei Bie
  2016-12-03 14:59 ` [PATCH 2/3] net/ixgbe: add MACsec offload support Tiwei Bie
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-03 14:59 UTC (permalink / raw)
  To: dev

These flags will be used in next commits in the ixgbe pmd.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 lib/librte_ether/rte_ethdev.h | 2 ++
 lib/librte_mbuf/rte_mbuf.h    | 5 +++++
 2 files changed, 7 insertions(+)

diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 9678179..25a33e9 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -857,6 +857,7 @@ struct rte_eth_conf {
 #define DEV_RX_OFFLOAD_TCP_LRO     0x00000010
 #define DEV_RX_OFFLOAD_QINQ_STRIP  0x00000020
 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
+#define DEV_RX_OFFLOAD_MACSEC_STRIP     0x00000080
 
 /**
  * TX offload capabilities of a device.
@@ -874,6 +875,7 @@ struct rte_eth_conf {
 #define DEV_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_MACSEC_INSERT    0x00002000
 
 /**
  * Ethernet device information
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index ead7c6e..46bb23f 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -182,6 +182,11 @@ extern "C" {
 /* add new TX flags here */
 
 /**
+ * MACsec offload flag.
+ */
+#define PKT_TX_MACSEC        (1ULL << 44)
+
+/**
  * Bits 45:48 used for the tunnel type.
  * When doing Tx offload like TSO or checksum, the HW needs to configure the
  * tunnel type into the HW descriptors.
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH 2/3] net/ixgbe: add MACsec offload support
  2016-12-03 14:59 [PATCH 0/3] Add MACsec offload support for ixgbe Tiwei Bie
  2016-12-03 14:59 ` [PATCH 1/3] lib: add MACsec offload flags Tiwei Bie
@ 2016-12-03 14:59 ` Tiwei Bie
  2016-12-03 14:59 ` [PATCH 3/3] app/testpmd: add ixgbe " Tiwei Bie
  2016-12-16  1:43 ` [PATCH v2 0/4] Add MACsec offload support for ixgbe Tiwei Bie
  3 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-03 14:59 UTC (permalink / raw)
  To: dev

MACsec (or LinkSec, 802.1AE) is a MAC level encryption/authentication
scheme defined in IEEE 802.1AE that uses symmetric cryptography.
This commit adds the MACsec offload support for ixgbe.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c            | 436 +++++++++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_ethdev.h            |  41 +++
 drivers/net/ixgbe/ixgbe_rxtx.c              |   3 +
 drivers/net/ixgbe/rte_pmd_ixgbe.h           |  98 +++++++
 drivers/net/ixgbe/rte_pmd_ixgbe_version.map |  11 +
 5 files changed, 582 insertions(+), 7 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index edc9b22..2684097 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -232,6 +232,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
@@ -744,6 +745,51 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
 			   sizeof(rte_ixgbe_stats_strings[0]))
 
+/* MACsec statistics */
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
+	{"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+		out_pkts_untagged)},
+	{"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
+		out_pkts_encrypted)},
+	{"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
+		out_pkts_protected)},
+	{"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
+		out_octets_encrypted)},
+	{"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
+		out_octets_protected)},
+	{"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_untagged)},
+	{"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_badtag)},
+	{"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_nosci)},
+	{"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_unknownsci)},
+	{"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
+		in_octets_decrypted)},
+	{"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
+		in_octets_validated)},
+	{"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_unchecked)},
+	{"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_delayed)},
+	{"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_late)},
+	{"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_ok)},
+	{"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_invalid)},
+	{"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_notvalid)},
+	{"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_unusedsa)},
+	{"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_notusingsa)},
+};
+
+#define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
+			   sizeof(rte_ixgbe_macsec_strings[0]))
+
 /* Per-queue statistics */
 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
 	{"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
@@ -2380,6 +2426,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 	    rte_intr_dp_is_en(intr_handle))
 		ixgbe_dev_rxq_interrupt_setup(dev);
 
+	ixgbe_dev_macsec_interrupt_setup(dev);
+
 	/* enable uio/vfio intr/eventfd mapping */
 	rte_intr_enable(intr_handle);
 
@@ -2557,6 +2605,7 @@ ixgbe_dev_close(struct rte_eth_dev *dev)
 static void
 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
 			   struct ixgbe_hw_stats *hw_stats,
+			   struct ixgbe_macsec_stats *macsec_stats,
 			   uint64_t *total_missed_rx, uint64_t *total_qbrc,
 			   uint64_t *total_qprc, uint64_t *total_qprdc)
 {
@@ -2726,6 +2775,40 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
 	/* Flow Director Stats registers */
 	hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
 	hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+
+	/* MACsec Stats registers */
+	macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
+	macsec_stats->out_pkts_encrypted +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
+	macsec_stats->out_pkts_protected +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
+	macsec_stats->out_octets_encrypted +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
+	macsec_stats->out_octets_protected +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
+	macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
+	macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
+	macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
+	macsec_stats->in_pkts_unknownsci +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
+	macsec_stats->in_octets_decrypted +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
+	macsec_stats->in_octets_validated +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
+	macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
+	macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
+	macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
+	for (i = 0; i < 2; i++) {
+		macsec_stats->in_pkts_ok +=
+			IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
+		macsec_stats->in_pkts_invalid +=
+			IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
+		macsec_stats->in_pkts_notvalid +=
+			IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
+	}
+	macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
+	macsec_stats->in_pkts_notusingsa +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
 }
 
 /*
@@ -2738,6 +2821,9 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_hw_stats *hw_stats =
 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	struct ixgbe_macsec_stats *macsec_stats =
+			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+				dev->data->dev_private);
 	uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
 	unsigned i;
 
@@ -2746,8 +2832,8 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 	total_qprc = 0;
 	total_qprdc = 0;
 
-	ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
-			&total_qprc, &total_qprdc);
+	ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+			&total_qbrc, &total_qprc, &total_qprdc);
 
 	if (stats == NULL)
 		return;
@@ -2799,7 +2885,7 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
 /* This function calculates the number of xstats based on the current config */
 static unsigned
 ixgbe_xstats_calc_num(void) {
-	return IXGBE_NB_HW_STATS +
+	return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
 		(IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
 		(IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
 }
@@ -2826,6 +2912,15 @@ static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 			count++;
 		}
 
+		/* MACsec Stats */
+		for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+			snprintf(xstats_names[count].name,
+				sizeof(xstats_names[count].name),
+				"%s",
+				rte_ixgbe_macsec_strings[i].name);
+			count++;
+		}
+
 		/* RX Priority Stats */
 		for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
 			for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
@@ -2875,6 +2970,9 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_hw_stats *hw_stats =
 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	struct ixgbe_macsec_stats *macsec_stats =
+			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+				dev->data->dev_private);
 	uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
 	unsigned i, stat, count = 0;
 
@@ -2888,8 +2986,8 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 	total_qprc = 0;
 	total_qprdc = 0;
 
-	ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
-				   &total_qprc, &total_qprdc);
+	ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+			&total_qbrc, &total_qprc, &total_qprdc);
 
 	/* If this is a reset xstats is NULL, and we have cleared the
 	 * registers by reading them.
@@ -2905,6 +3003,13 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		count++;
 	}
 
+	/* MACsec Stats */
+	for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+		xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
+				rte_ixgbe_macsec_strings[i].offset);
+		count++;
+	}
+
 	/* RX Priority Stats */
 	for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
 		for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
@@ -2932,6 +3037,9 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw_stats *stats =
 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	struct ixgbe_macsec_stats *macsec_stats =
+			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+				dev->data->dev_private);
 
 	unsigned count = ixgbe_xstats_calc_num();
 
@@ -2940,6 +3048,7 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
 
 	/* Reset software totals */
 	memset(stats, 0, sizeof(*stats));
+	memset(macsec_stats, 0, sizeof(*macsec_stats));
 }
 
 static void
@@ -3179,13 +3288,15 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
 				DEV_RX_OFFLOAD_IPV4_CKSUM |
 				DEV_RX_OFFLOAD_UDP_CKSUM  |
-				DEV_RX_OFFLOAD_TCP_CKSUM;
+				DEV_RX_OFFLOAD_TCP_CKSUM  |
+				DEV_RX_OFFLOAD_MACSEC_STRIP;
 	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
 				DEV_TX_OFFLOAD_IPV4_CKSUM  |
 				DEV_TX_OFFLOAD_UDP_CKSUM   |
 				DEV_TX_OFFLOAD_TCP_CKSUM   |
 				DEV_TX_OFFLOAD_SCTP_CKSUM  |
-				DEV_TX_OFFLOAD_TCP_TSO;
+				DEV_TX_OFFLOAD_TCP_TSO     |
+				DEV_TX_OFFLOAD_MACSEC_INSERT;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -3378,6 +3489,28 @@ ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
 	return 0;
 }
 
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
+{
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+	intr->mask |= IXGBE_EICR_LINKSEC;
+
+	return 0;
+}
+
 /*
  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
  *
@@ -3412,6 +3545,9 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
 	if (eicr & IXGBE_EICR_MAILBOX)
 		intr->flags |= IXGBE_FLAG_MAILBOX;
 
+	if (eicr & IXGBE_EICR_LINKSEC)
+		intr->flags |= IXGBE_FLAG_MACSEC;
+
 	if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
 	    hw->phy.type == ixgbe_phy_x550em_ext_t &&
 	    (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
@@ -3562,6 +3698,12 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
 		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
 	}
 
+	if (intr->flags & IXGBE_FLAG_MACSEC) {
+		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_QUEUE_STATE,
+					      NULL);
+		intr->flags &= ~IXGBE_FLAG_MACSEC;
+	}
+
 	PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
 	ixgbe_enable_intr(dev);
 	rte_intr_enable(&(dev->pci_dev->intr_handle));
@@ -7592,6 +7734,286 @@ ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
 	ixgbevf_dev_interrupt_action(dev);
 }
 
+static void
+ixgbe_dev_macsec_start_data_paths(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw;
+	uint32_t ctrl;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Start the data paths */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	ctrl &= ~IXGBE_SECTXCTRL_TX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	ctrl &= ~IXGBE_SECRXCTRL_RX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+}
+
+static void
+ixgbe_dev_macsec_stop_data_paths(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw;
+	uint32_t ctrl;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Stop the data paths */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	ctrl |= IXGBE_SECTXCTRL_TX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	ctrl |= IXGBE_SECRXCTRL_RX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+	/* Wait for hardware to empty the data paths */
+	while (true) {
+		ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
+		if (ctrl & IXGBE_SECTXSTAT_SECTX_RDY)
+			break;
+	}
+
+	while (true) {
+		ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
+		if (ctrl & IXGBE_SECRXSTAT_SECRX_RDY)
+			break;
+	}
+}
+
+int
+rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	ixgbe_dev_macsec_stop_data_paths(dev);
+
+	/* Enable Ethernet CRC (required by MACsec offload) */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+	ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
+	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
+
+	/* Enable the TX and RX crypto engines */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+	ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
+	ctrl |= 0x3;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
+
+	/* Enable SA lookup */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+	ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+	ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
+		     IXGBE_LSECTXCTRL_AUTH;
+	ctrl |= IXGBE_LSECTXCTRL_AISCI;
+	ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+	ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+	ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
+	ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
+	if (rp)
+		ctrl |= IXGBE_LSECRXCTRL_RP;
+	else
+		ctrl &= ~IXGBE_LSECRXCTRL_RP;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+	ixgbe_dev_macsec_start_data_paths(dev);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_disable(uint8_t port)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	ixgbe_dev_macsec_stop_data_paths(dev);
+
+	/* Disable the TX and RX crypto engines */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+	/* Disable SA lookup */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+	ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECTXCTRL_DISABLE;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+	ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+	ixgbe_dev_macsec_start_data_paths(dev);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
+
+	ctrl = mac[4] | (mac[5] << 8);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
+
+	pi = rte_cpu_to_be_16(pi);
+	ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+				 uint32_t pn, uint8_t *key)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl, i;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (idx != 0 && idx != 1)
+		return -EINVAL;
+
+	if (an >= 4)
+		return -EINVAL;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Set the PN and key */
+	pn = rte_cpu_to_be_32(pn);
+	if (idx == 0) {
+		IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
+
+		for (i = 0; i < 4; i++) {
+			ctrl = (key[i * 4 + 0] <<  0) |
+			       (key[i * 4 + 1] <<  8) |
+			       (key[i * 4 + 2] << 16) |
+			       (key[i * 4 + 3] << 24);
+			IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
+		}
+	} else {
+		IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
+
+		for (i = 0; i < 4; i++) {
+			ctrl = (key[i * 4 + 0] <<  0) |
+			       (key[i * 4 + 1] <<  8) |
+			       (key[i * 4 + 2] << 16) |
+			       (key[i * 4 + 3] << 24);
+			IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
+		}
+	}
+
+	/* Set AN and select the SA */
+	ctrl = (an << idx * 2) | (idx << 4);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+				 uint32_t pn, uint8_t *key)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl, i;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (idx != 0 && idx != 1)
+		return -EINVAL;
+
+	if (an >= 4)
+		return -EINVAL;
+
+	/* Set the PN */
+	pn = rte_cpu_to_be_32(pn);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
+
+	/* Set the key */
+	for (i = 0; i < 4; i++) {
+		ctrl = (key[i * 4 + 0] <<  0) |
+		       (key[i * 4 + 1] <<  8) |
+		       (key[i * 4 + 2] << 16) |
+		       (key[i * 4 + 3] << 24);
+		IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
+	}
+
+	/* Set the AN and validate the SA */
+	ctrl = an | (1 << 2);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
+
+	return 0;
+}
+
 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv);
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 4ff6338..ccff0ba 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -43,6 +43,7 @@
 #define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
 #define IXGBE_FLAG_MAILBOX          (uint32_t)(1 << 1)
 #define IXGBE_FLAG_PHY_INTERRUPT    (uint32_t)(1 << 2)
+#define IXGBE_FLAG_MACSEC           (uint32_t)(1 << 3)
 
 /*
  * Defines that were not part of ixgbe_type.h as they are not used by the
@@ -130,6 +131,10 @@
 #define IXGBE_MISC_VEC_ID               RTE_INTR_VEC_ZERO_OFFSET
 #define IXGBE_RX_VEC_START              RTE_INTR_VEC_RXTX_OFFSET
 
+#define IXGBE_SECTX_MINSECIFG_MASK      0x0000000F
+
+#define IXGBE_MACSEC_PNTHRSH            0xFFFFFE00
+
 /*
  * Information about the fdir mode.
  */
@@ -265,11 +270,44 @@ struct ixgbe_filter_info {
 };
 
 /*
+ * Statistics counters collected by the MACsec
+ */
+struct ixgbe_macsec_stats {
+	/* TX port statistics */
+	uint64_t out_pkts_untagged;
+	uint64_t out_pkts_encrypted;
+	uint64_t out_pkts_protected;
+	uint64_t out_octets_encrypted;
+	uint64_t out_octets_protected;
+
+	/* RX port statistics */
+	uint64_t in_pkts_untagged;
+	uint64_t in_pkts_badtag;
+	uint64_t in_pkts_nosci;
+	uint64_t in_pkts_unknownsci;
+	uint64_t in_octets_decrypted;
+	uint64_t in_octets_validated;
+
+	/* RX SC statistics */
+	uint64_t in_pkts_unchecked;
+	uint64_t in_pkts_delayed;
+	uint64_t in_pkts_late;
+
+	/* RX SA statistics */
+	uint64_t in_pkts_ok;
+	uint64_t in_pkts_invalid;
+	uint64_t in_pkts_notvalid;
+	uint64_t in_pkts_unusedsa;
+	uint64_t in_pkts_notusingsa;
+};
+
+/*
  * Structure to store private data for each driver instance (for each port).
  */
 struct ixgbe_adapter {
 	struct ixgbe_hw             hw;
 	struct ixgbe_hw_stats       stats;
+	struct ixgbe_macsec_stats   macsec_stats;
 	struct ixgbe_hw_fdir_info   fdir;
 	struct ixgbe_interrupt      intr;
 	struct ixgbe_stat_mapping_registers stat_mappings;
@@ -297,6 +335,9 @@ struct ixgbe_adapter {
 #define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \
 	(&((struct ixgbe_adapter *)adapter)->stats)
 
+#define IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(adapter) \
+	(&((struct ixgbe_adapter *)adapter)->macsec_stats)
+
 #define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \
 	(&((struct ixgbe_adapter *)adapter)->intr)
 
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index b2d9f45..9201c25 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -85,6 +85,7 @@
 		PKT_TX_IP_CKSUM |		 \
 		PKT_TX_L4_MASK |		 \
 		PKT_TX_TCP_SEG |		 \
+		PKT_TX_MACSEC |			 \
 		PKT_TX_OUTER_IP_CKSUM)
 
 #if 1
@@ -519,6 +520,8 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
 		cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
 	if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
 		cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
+	if (ol_flags & PKT_TX_MACSEC)
+		cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
 	return cmdtype;
 }
 
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index c2fb826..2ba9372 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -183,6 +183,104 @@ int
 rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
 
 /**
+ * Enable MACsec offloading.
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param en
+ *    1 - Enable encryption (encrypt and add integrity signature).
+ *    0 - Disable encryption (only add integrity signature).
+ * @param rp
+ *    1 - Enable replay protection.
+ *    0 - Disable replay protection.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ */
+int rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp);
+
+/**
+ * Disable MACsec offloading.
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ */
+int rte_pmd_ixgbe_macsec_disable(uint8_t port);
+
+/**
+ * Configure TX SC (Secure Connection)
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param mac
+ *   The MAC address on the local side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ */
+int rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac);
+
+/**
+ * Configure RX SC (Secure Connection)
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param mac
+ *   The MAC address on the remote side.
+ * @param pi
+ *   The PI (port identifier) on the remote side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ */
+int rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi);
+
+/**
+ * Enable TX SA (Secure Association)
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param idx
+ *   The SA to be enabled (0 or 1).
+ * @param an
+ *   The association number on the local side.
+ * @param pn
+ *   The packet number on the local side.
+ * @param key
+ *   The key on the local side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+		uint32_t pn, uint8_t *key);
+
+/**
+ * Enable RX SA (Secure Association)
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param idx
+ *   The SA to be enabled (0 or 1)
+ * @param an
+ *   The association number on the remote side.
+ * @param pn
+ *   The packet number on the remote side.
+ * @param key
+ *   The key on the remote side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+		uint32_t pn, uint8_t *key);
+
+/**
  * Response sent back to ixgbe driver from user app after callback
  */
 enum rte_pmd_ixgbe_mb_event_rsp {
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
index 92434f3..5482a0c 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
@@ -15,3 +15,14 @@ DPDK_16.11 {
 	rte_pmd_ixgbe_set_vf_vlan_insert;
 	rte_pmd_ixgbe_set_vf_vlan_stripq;
 } DPDK_2.0;
+
+DPDK_17.02 {
+	global:
+
+	rte_pmd_ixgbe_macsec_enable;
+	rte_pmd_ixgbe_macsec_disable;
+	rte_pmd_ixgbe_macsec_config_txsc;
+	rte_pmd_ixgbe_macsec_config_rxsc;
+	rte_pmd_ixgbe_macsec_select_txsa;
+	rte_pmd_ixgbe_macsec_select_rxsa;
+};
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH 3/3] app/testpmd: add ixgbe MACsec offload support
  2016-12-03 14:59 [PATCH 0/3] Add MACsec offload support for ixgbe Tiwei Bie
  2016-12-03 14:59 ` [PATCH 1/3] lib: add MACsec offload flags Tiwei Bie
  2016-12-03 14:59 ` [PATCH 2/3] net/ixgbe: add MACsec offload support Tiwei Bie
@ 2016-12-03 14:59 ` Tiwei Bie
  2016-12-16  1:43 ` [PATCH v2 0/4] Add MACsec offload support for ixgbe Tiwei Bie
  3 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-03 14:59 UTC (permalink / raw)
  To: dev

add test for set macsec offload
add test for set macsec sc
add test for set macsec sa

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 app/test-pmd/cmdline.c | 389 +++++++++++++++++++++++++++++++++++++++++++++++++
 app/test-pmd/macfwd.c  |   2 +
 app/test-pmd/macswap.c |   2 +
 app/test-pmd/testpmd.h |   2 +
 app/test-pmd/txonly.c  |   2 +
 5 files changed, 397 insertions(+)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 63b55dc..6d61c88 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -274,6 +274,18 @@ static void cmd_help_long_parsed(void *parsed_result,
 
 			"set vf mac antispoof (port_id) (vf_id) (on|off).\n"
 			"    Set MAC antispoof for a VF from the PF.\n\n"
+
+			"set macsec offload (port_id) on encrypt (on|off) replay-protect (on|off)\n"
+			"    Enable MACsec offload.\n\n"
+
+			"set macsec offload (port_id) off\n"
+			"    Disable MACsec offload.\n\n"
+
+			"set macsec sc (tx|rx) (port_id) (mac) (pi)\n"
+			"    Configure MACsec secure connection (SC).\n\n"
+
+			"set macsec sa (tx|rx) (port_id) (idx) (an) (pn) (key)\n"
+			"    Configure MACsec secure association (SA).\n\n"
 #endif
 
 			"vlan set strip (on|off) (port_id)\n"
@@ -11409,6 +11421,379 @@ cmdline_parse_inst_t cmd_set_vf_mac_addr = {
 		NULL,
 	},
 };
+
+/* MACsec configuration */
+
+/* Common result structure for MACsec offload enable */
+struct cmd_macsec_offload_on_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t offload;
+	uint8_t port_id;
+	cmdline_fixed_string_t on;
+	cmdline_fixed_string_t encrypt;
+	cmdline_fixed_string_t en_on_off;
+	cmdline_fixed_string_t replay_protect;
+	cmdline_fixed_string_t rp_on_off;
+};
+
+/* Common CLI fields for MACsec offload disable */
+cmdline_parse_token_string_t cmd_macsec_offload_on_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_offload_on_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_offload_on_offload =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 offload, "offload");
+cmdline_parse_token_string_t cmd_macsec_offload_on_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 port_id, UINT8);
+cmdline_parse_token_string_t cmd_macsec_offload_on_on =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 on, "on");
+cmdline_parse_token_string_t cmd_macsec_offload_on_encrypt =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 encrypt, "encrypt");
+cmdline_parse_token_string_t cmd_macsec_offload_on_en_on_off =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 en_on_off, "on#off");
+cmdline_parse_token_string_t cmd_macsec_offload_on_replay_protect =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 replay_protect, "replay-protect");
+cmdline_parse_token_string_t cmd_macsec_offload_on_rp_on_off =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 rp_on_off, "on#off");
+
+static void
+cmd_set_macsec_offload_on_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_offload_on_result *res = parsed_result;
+	int ret;
+	portid_t port_id = res->port_id;
+	int en = (strcmp(res->en_on_off, "on") == 0) ? 1 : 0;
+	int rp = (strcmp(res->rp_on_off, "on") == 0) ? 1 : 0;
+
+	if (port_id_is_invalid(port_id, ENABLED_WARN))
+		return;
+
+	ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_MACSEC;
+	ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
+
+	switch (ret) {
+	case 0:
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_offload_on = {
+	.f = cmd_set_macsec_offload_on_parsed,
+	.data = NULL,
+	.help_str = "set macsec offload port_id on "
+		"encrypt on|off replay-protect on|off",
+	.tokens = {
+		(void *)&cmd_macsec_offload_on_set,
+		(void *)&cmd_macsec_offload_on_macsec,
+		(void *)&cmd_macsec_offload_on_offload,
+		(void *)&cmd_macsec_offload_on_port_id,
+		(void *)&cmd_macsec_offload_on_on,
+		(void *)&cmd_macsec_offload_on_encrypt,
+		(void *)&cmd_macsec_offload_on_en_on_off,
+		(void *)&cmd_macsec_offload_on_replay_protect,
+		(void *)&cmd_macsec_offload_on_rp_on_off,
+		NULL,
+	},
+};
+
+/* Common result structure for MACsec offload disable */
+struct cmd_macsec_offload_off_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t offload;
+	uint8_t port_id;
+	cmdline_fixed_string_t off;
+};
+
+/* Common CLI fields for MACsec offload disable */
+cmdline_parse_token_string_t cmd_macsec_offload_off_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_offload_off_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_offload_off_offload =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 offload, "offload");
+cmdline_parse_token_string_t cmd_macsec_offload_off_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 port_id, UINT8);
+cmdline_parse_token_string_t cmd_macsec_offload_off_off =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 off, "off");
+
+static void
+cmd_set_macsec_offload_off_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_offload_off_result *res = parsed_result;
+	int ret;
+	portid_t port_id = res->port_id;
+
+	if (port_id_is_invalid(port_id, ENABLED_WARN))
+		return;
+
+	ports[port_id].tx_ol_flags &= ~TESTPMD_TX_OFFLOAD_MACSEC;
+	ret = rte_pmd_ixgbe_macsec_disable(port_id);
+
+	switch (ret) {
+	case 0:
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_offload_off = {
+	.f = cmd_set_macsec_offload_off_parsed,
+	.data = NULL,
+	.help_str = "set macsec offload port_id off",
+	.tokens = {
+		(void *)&cmd_macsec_offload_off_set,
+		(void *)&cmd_macsec_offload_off_macsec,
+		(void *)&cmd_macsec_offload_off_offload,
+		(void *)&cmd_macsec_offload_off_port_id,
+		(void *)&cmd_macsec_offload_off_off,
+		NULL,
+	},
+};
+
+/* Common result structure for MACsec secure connection configure */
+struct cmd_macsec_sc_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t sc;
+	cmdline_fixed_string_t tx_rx;
+	uint8_t port_id;
+	struct ether_addr mac;
+	uint16_t pi;
+};
+
+/* Common CLI fields for MACsec secure connection configure */
+cmdline_parse_token_string_t cmd_macsec_sc_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_sc_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_sc_sc =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 sc, "sc");
+cmdline_parse_token_string_t cmd_macsec_sc_tx_rx =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 tx_rx, "tx#rx");
+cmdline_parse_token_num_t cmd_macsec_sc_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 port_id, UINT8);
+cmdline_parse_token_etheraddr_t cmd_macsec_sc_mac =
+	TOKEN_ETHERADDR_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 mac);
+cmdline_parse_token_num_t cmd_macsec_sc_pi =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 pi, UINT16);
+
+static void
+cmd_set_macsec_sc_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_sc_result *res = parsed_result;
+	int ret;
+	int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0;
+
+	ret = is_tx ?
+		rte_pmd_ixgbe_macsec_config_txsc(res->port_id,
+				res->mac.addr_bytes) :
+		rte_pmd_ixgbe_macsec_config_rxsc(res->port_id,
+				res->mac.addr_bytes, res->pi);
+	switch (ret) {
+	case 0:
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", res->port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_sc = {
+	.f = cmd_set_macsec_sc_parsed,
+	.data = NULL,
+	.help_str = "set macsec sc tx|rx port_id mac pi",
+	.tokens = {
+		(void *)&cmd_macsec_sc_set,
+		(void *)&cmd_macsec_sc_macsec,
+		(void *)&cmd_macsec_sc_sc,
+		(void *)&cmd_macsec_sc_tx_rx,
+		(void *)&cmd_macsec_sc_port_id,
+		(void *)&cmd_macsec_sc_mac,
+		(void *)&cmd_macsec_sc_pi,
+		NULL,
+	},
+};
+
+/* Common result structure for MACsec secure connection configure */
+struct cmd_macsec_sa_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t sa;
+	cmdline_fixed_string_t tx_rx;
+	uint8_t port_id;
+	uint8_t idx;
+	uint8_t an;
+	uint32_t pn;
+	cmdline_fixed_string_t key;
+};
+
+/* Common CLI fields for MACsec secure connection configure */
+cmdline_parse_token_string_t cmd_macsec_sa_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_sa_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_sa_sa =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 sa, "sa");
+cmdline_parse_token_string_t cmd_macsec_sa_tx_rx =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 tx_rx, "tx#rx");
+cmdline_parse_token_num_t cmd_macsec_sa_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 port_id, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_idx =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 idx, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_an =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 an, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_pn =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 pn, UINT32);
+cmdline_parse_token_string_t cmd_macsec_sa_key =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 key, NULL);
+
+static void
+cmd_set_macsec_sa_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_sa_result *res = parsed_result;
+	int ret;
+	int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0;
+	uint8_t key[16] = { 0 };
+	uint8_t xdgt0;
+	uint8_t xdgt1;
+	int key_len;
+	int i;
+
+	key_len = strlen(res->key) / 2;
+	if (key_len > 16)
+		key_len = 16;
+
+	for (i = 0; i < key_len; i++) {
+		xdgt0 = parse_and_check_key_hexa_digit(res->key, (i * 2));
+		if (xdgt0 == 0xFF)
+			return;
+		xdgt1 = parse_and_check_key_hexa_digit(res->key, (i * 2) + 1);
+		if (xdgt1 == 0xFF)
+			return;
+		key[i] = (uint8_t) ((xdgt0 * 16) + xdgt1);
+	}
+
+	ret = is_tx ?
+		rte_pmd_ixgbe_macsec_select_txsa(res->port_id,
+			res->idx, res->an, res->pn, key) :
+		rte_pmd_ixgbe_macsec_select_rxsa(res->port_id,
+			res->idx, res->an, res->pn, key);
+	switch (ret) {
+	case 0:
+		break;
+	case -EINVAL:
+		printf("invalid idx %d or an %d\n", res->idx, res->an);
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", res->port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_sa = {
+	.f = cmd_set_macsec_sa_parsed,
+	.data = NULL,
+	.help_str = "set macsec sa tx|rx port_id 0|1 an pn key",
+	.tokens = {
+		(void *)&cmd_macsec_sa_set,
+		(void *)&cmd_macsec_sa_macsec,
+		(void *)&cmd_macsec_sa_sa,
+		(void *)&cmd_macsec_sa_tx_rx,
+		(void *)&cmd_macsec_sa_port_id,
+		(void *)&cmd_macsec_sa_idx,
+		(void *)&cmd_macsec_sa_an,
+		(void *)&cmd_macsec_sa_pn,
+		(void *)&cmd_macsec_sa_key,
+		NULL,
+	},
+};
 #endif
 
 /* ******************************************************************************** */
@@ -11576,6 +11961,10 @@ cmdline_parse_ctx_t main_ctx[] = {
 	(cmdline_parse_inst_t *)&cmd_set_all_queues_drop_en,
 	(cmdline_parse_inst_t *)&cmd_set_vf_split_drop_en,
 	(cmdline_parse_inst_t *)&cmd_set_vf_mac_addr,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_offload_on,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_offload_off,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_sc,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_sa,
 #endif
 	NULL,
 };
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index 86e01de..8ab529b 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -112,6 +112,8 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
 		ol_flags = PKT_TX_VLAN_PKT;
 	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
 		ol_flags |= PKT_TX_QINQ_PKT;
+	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+		ol_flags |= PKT_TX_MACSEC;
 	for (i = 0; i < nb_rx; i++) {
 		if (likely(i < nb_rx - 1))
 			rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index 36e139f..855f2f0 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -112,6 +112,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
 		ol_flags = PKT_TX_VLAN_PKT;
 	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
 		ol_flags |= PKT_TX_QINQ_PKT;
+	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+		ol_flags |= PKT_TX_MACSEC;
 	for (i = 0; i < nb_rx; i++) {
 		if (likely(i < nb_rx - 1))
 			rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 9c1e703..5d40fc6 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -143,6 +143,8 @@ struct fwd_stream {
 #define TESTPMD_TX_OFFLOAD_INSERT_VLAN       0x0040
 /** Insert double VLAN header in forward engine */
 #define TESTPMD_TX_OFFLOAD_INSERT_QINQ       0x0080
+/** Offload MACsec in forward engine */
+#define TESTPMD_TX_OFFLOAD_MACSEC            0x0100
 
 /**
  * The data structure associated with each port.
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 8513a06..44f0548 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -214,6 +214,8 @@ pkt_burst_transmit(struct fwd_stream *fs)
 		ol_flags = PKT_TX_VLAN_PKT;
 	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
 		ol_flags |= PKT_TX_QINQ_PKT;
+	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+		ol_flags |= PKT_TX_MACSEC;
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_mbuf_raw_alloc(mbp);
 		if (pkt == NULL) {
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v2 0/4] Add MACsec offload support for ixgbe
  2016-12-03 14:59 [PATCH 0/3] Add MACsec offload support for ixgbe Tiwei Bie
                   ` (2 preceding siblings ...)
  2016-12-03 14:59 ` [PATCH 3/3] app/testpmd: add ixgbe " Tiwei Bie
@ 2016-12-16  1:43 ` Tiwei Bie
  2016-12-16  1:43   ` [PATCH v2 1/4] lib: add MACsec offload flags Tiwei Bie
                     ` (4 more replies)
  3 siblings, 5 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-16  1:43 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei.dai, xiao.w.wang, konstantin.ananyev, helin.zhang

This patch set adds the MACsec offload support for ixgbe.
The testpmd is also updated to support MACsec cmds.

v2:
- Update the documents for testpmd;
- Update the release notes;
- Reuse the functions provided by base code;

Tiwei Bie (4):
  lib: add MACsec offload flags
  net/ixgbe: add MACsec offload support
  app/testpmd: add ixgbe MACsec offload support
  doc: add ixgbe specific APIs

 app/test-pmd/cmdline.c                      | 389 +++++++++++++++++++++++
 app/test-pmd/macfwd.c                       |   2 +
 app/test-pmd/macswap.c                      |   2 +
 app/test-pmd/testpmd.h                      |   2 +
 app/test-pmd/txonly.c                       |   2 +
 doc/guides/rel_notes/release_17_02.rst      |   6 +
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  32 ++
 drivers/net/ixgbe/ixgbe_ethdev.c            | 476 +++++++++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_ethdev.h            |  45 +++
 drivers/net/ixgbe/ixgbe_rxtx.c              |   3 +
 drivers/net/ixgbe/rte_pmd_ixgbe.h           | 100 ++++++
 drivers/net/ixgbe/rte_pmd_ixgbe_version.map |  11 +
 lib/librte_ether/rte_ethdev.h               |   2 +
 lib/librte_mbuf/rte_mbuf.h                  |   5 +
 14 files changed, 1070 insertions(+), 7 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 79+ messages in thread

* [PATCH v2 1/4] lib: add MACsec offload flags
  2016-12-16  1:43 ` [PATCH v2 0/4] Add MACsec offload support for ixgbe Tiwei Bie
@ 2016-12-16  1:43   ` Tiwei Bie
  2016-12-16  1:43   ` [PATCH v2 2/4] net/ixgbe: add MACsec offload support Tiwei Bie
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-16  1:43 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei.dai, xiao.w.wang, konstantin.ananyev, helin.zhang

These flags will be used in next commits in the ixgbe pmd.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 lib/librte_ether/rte_ethdev.h | 2 ++
 lib/librte_mbuf/rte_mbuf.h    | 5 +++++
 2 files changed, 7 insertions(+)

diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 9678179..25a33e9 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -857,6 +857,7 @@ struct rte_eth_conf {
 #define DEV_RX_OFFLOAD_TCP_LRO     0x00000010
 #define DEV_RX_OFFLOAD_QINQ_STRIP  0x00000020
 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
+#define DEV_RX_OFFLOAD_MACSEC_STRIP     0x00000080
 
 /**
  * TX offload capabilities of a device.
@@ -874,6 +875,7 @@ struct rte_eth_conf {
 #define DEV_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_MACSEC_INSERT    0x00002000
 
 /**
  * Ethernet device information
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index ead7c6e..b2073f0 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -182,6 +182,11 @@ extern "C" {
 /* add new TX flags here */
 
 /**
+ * MACsec offloading flag.
+ */
+#define PKT_TX_MACSEC        (1ULL << 44)
+
+/**
  * Bits 45:48 used for the tunnel type.
  * When doing Tx offload like TSO or checksum, the HW needs to configure the
  * tunnel type into the HW descriptors.
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v2 2/4] net/ixgbe: add MACsec offload support
  2016-12-16  1:43 ` [PATCH v2 0/4] Add MACsec offload support for ixgbe Tiwei Bie
  2016-12-16  1:43   ` [PATCH v2 1/4] lib: add MACsec offload flags Tiwei Bie
@ 2016-12-16  1:43   ` Tiwei Bie
  2016-12-16  1:43   ` [PATCH v2 3/4] app/testpmd: add ixgbe " Tiwei Bie
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-16  1:43 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei.dai, xiao.w.wang, konstantin.ananyev, helin.zhang

MACsec (or LinkSec, 802.1AE) is a MAC level encryption/authentication
scheme defined in IEEE 802.1AE that uses symmetric cryptography.
This commit adds the MACsec offload support for ixgbe.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c            | 476 +++++++++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_ethdev.h            |  45 +++
 drivers/net/ixgbe/ixgbe_rxtx.c              |   3 +
 drivers/net/ixgbe/rte_pmd_ixgbe.h           | 100 ++++++
 drivers/net/ixgbe/rte_pmd_ixgbe_version.map |  11 +
 5 files changed, 628 insertions(+), 7 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index edc9b22..6d32798 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -231,6 +231,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 			uint16_t reta_size);
 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
@@ -744,6 +745,51 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
 			   sizeof(rte_ixgbe_stats_strings[0]))
 
+/* MACsec statistics */
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
+	{"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+		out_pkts_untagged)},
+	{"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
+		out_pkts_encrypted)},
+	{"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
+		out_pkts_protected)},
+	{"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
+		out_octets_encrypted)},
+	{"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
+		out_octets_protected)},
+	{"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_untagged)},
+	{"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_badtag)},
+	{"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_nosci)},
+	{"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_unknownsci)},
+	{"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
+		in_octets_decrypted)},
+	{"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
+		in_octets_validated)},
+	{"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_unchecked)},
+	{"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_delayed)},
+	{"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_late)},
+	{"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_ok)},
+	{"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_invalid)},
+	{"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_notvalid)},
+	{"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_unusedsa)},
+	{"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_notusingsa)},
+};
+
+#define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
+			   sizeof(rte_ixgbe_macsec_strings[0]))
+
 /* Per-queue statistics */
 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
 	{"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
@@ -2380,6 +2426,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 	    rte_intr_dp_is_en(intr_handle))
 		ixgbe_dev_rxq_interrupt_setup(dev);
 
+	ixgbe_dev_macsec_interrupt_setup(dev);
+
 	/* enable uio/vfio intr/eventfd mapping */
 	rte_intr_enable(intr_handle);
 
@@ -2557,6 +2605,7 @@ ixgbe_dev_close(struct rte_eth_dev *dev)
 static void
 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
 			   struct ixgbe_hw_stats *hw_stats,
+			   struct ixgbe_macsec_stats *macsec_stats,
 			   uint64_t *total_missed_rx, uint64_t *total_qbrc,
 			   uint64_t *total_qprc, uint64_t *total_qprdc)
 {
@@ -2726,6 +2775,40 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
 	/* Flow Director Stats registers */
 	hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
 	hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+
+	/* MACsec Stats registers */
+	macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
+	macsec_stats->out_pkts_encrypted +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
+	macsec_stats->out_pkts_protected +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
+	macsec_stats->out_octets_encrypted +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
+	macsec_stats->out_octets_protected +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
+	macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
+	macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
+	macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
+	macsec_stats->in_pkts_unknownsci +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
+	macsec_stats->in_octets_decrypted +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
+	macsec_stats->in_octets_validated +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
+	macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
+	macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
+	macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
+	for (i = 0; i < 2; i++) {
+		macsec_stats->in_pkts_ok +=
+			IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
+		macsec_stats->in_pkts_invalid +=
+			IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
+		macsec_stats->in_pkts_notvalid +=
+			IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
+	}
+	macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
+	macsec_stats->in_pkts_notusingsa +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
 }
 
 /*
@@ -2738,6 +2821,9 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_hw_stats *hw_stats =
 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	struct ixgbe_macsec_stats *macsec_stats =
+			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+				dev->data->dev_private);
 	uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
 	unsigned i;
 
@@ -2746,8 +2832,8 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 	total_qprc = 0;
 	total_qprdc = 0;
 
-	ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
-			&total_qprc, &total_qprdc);
+	ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+			&total_qbrc, &total_qprc, &total_qprdc);
 
 	if (stats == NULL)
 		return;
@@ -2799,7 +2885,7 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
 /* This function calculates the number of xstats based on the current config */
 static unsigned
 ixgbe_xstats_calc_num(void) {
-	return IXGBE_NB_HW_STATS +
+	return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
 		(IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
 		(IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
 }
@@ -2826,6 +2912,15 @@ static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 			count++;
 		}
 
+		/* MACsec Stats */
+		for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+			snprintf(xstats_names[count].name,
+				sizeof(xstats_names[count].name),
+				"%s",
+				rte_ixgbe_macsec_strings[i].name);
+			count++;
+		}
+
 		/* RX Priority Stats */
 		for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
 			for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
@@ -2875,6 +2970,9 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_hw_stats *hw_stats =
 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	struct ixgbe_macsec_stats *macsec_stats =
+			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+				dev->data->dev_private);
 	uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
 	unsigned i, stat, count = 0;
 
@@ -2888,8 +2986,8 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 	total_qprc = 0;
 	total_qprdc = 0;
 
-	ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
-				   &total_qprc, &total_qprdc);
+	ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+			&total_qbrc, &total_qprc, &total_qprdc);
 
 	/* If this is a reset xstats is NULL, and we have cleared the
 	 * registers by reading them.
@@ -2905,6 +3003,13 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		count++;
 	}
 
+	/* MACsec Stats */
+	for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+		xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
+				rte_ixgbe_macsec_strings[i].offset);
+		count++;
+	}
+
 	/* RX Priority Stats */
 	for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
 		for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
@@ -2932,6 +3037,9 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw_stats *stats =
 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	struct ixgbe_macsec_stats *macsec_stats =
+			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+				dev->data->dev_private);
 
 	unsigned count = ixgbe_xstats_calc_num();
 
@@ -2940,6 +3048,7 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
 
 	/* Reset software totals */
 	memset(stats, 0, sizeof(*stats));
+	memset(macsec_stats, 0, sizeof(*macsec_stats));
 }
 
 static void
@@ -3179,13 +3288,15 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
 				DEV_RX_OFFLOAD_IPV4_CKSUM |
 				DEV_RX_OFFLOAD_UDP_CKSUM  |
-				DEV_RX_OFFLOAD_TCP_CKSUM;
+				DEV_RX_OFFLOAD_TCP_CKSUM  |
+				DEV_RX_OFFLOAD_MACSEC_STRIP;
 	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
 				DEV_TX_OFFLOAD_IPV4_CKSUM  |
 				DEV_TX_OFFLOAD_UDP_CKSUM   |
 				DEV_TX_OFFLOAD_TCP_CKSUM   |
 				DEV_TX_OFFLOAD_SCTP_CKSUM  |
-				DEV_TX_OFFLOAD_TCP_TSO;
+				DEV_TX_OFFLOAD_TCP_TSO     |
+				DEV_TX_OFFLOAD_MACSEC_INSERT;
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -3378,6 +3489,28 @@ ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
 	return 0;
 }
 
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
+{
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+	intr->mask |= IXGBE_EICR_LINKSEC;
+
+	return 0;
+}
+
 /*
  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
  *
@@ -3412,6 +3545,9 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
 	if (eicr & IXGBE_EICR_MAILBOX)
 		intr->flags |= IXGBE_FLAG_MAILBOX;
 
+	if (eicr & IXGBE_EICR_LINKSEC)
+		intr->flags |= IXGBE_FLAG_MACSEC;
+
 	if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
 	    hw->phy.type == ixgbe_phy_x550em_ext_t &&
 	    (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
@@ -3562,6 +3698,12 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
 		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
 	}
 
+	if (intr->flags & IXGBE_FLAG_MACSEC) {
+		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_QUEUE_STATE,
+					      NULL);
+		intr->flags &= ~IXGBE_FLAG_MACSEC;
+	}
+
 	PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
 	ixgbe_enable_intr(dev);
 	rte_intr_enable(&(dev->pci_dev->intr_handle));
@@ -7592,6 +7734,326 @@ ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
 	ixgbevf_dev_interrupt_action(dev);
 }
 
+/**
+ *  ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Stops the transmit data path and waits for the HW to internally empty
+ *  the Tx security block
+ **/
+int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
+{
+#define IXGBE_MAX_SECTX_POLL 40
+
+	int i;
+	int sectxreg;
+
+	sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
+	for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
+		sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
+		if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
+			break;
+		/* Use interrupt-safe sleep just in case */
+		usec_delay(1000);
+	}
+
+	/* For informational purposes only */
+	if (i >= IXGBE_MAX_SECTX_POLL)
+		PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
+			 "path fully disabled.  Continuing with init.\n");
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Enables the transmit data path.
+ **/
+int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
+{
+	uint32_t sectxreg;
+
+	sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return IXGBE_SUCCESS;
+}
+
+int
+rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Stop the data paths */
+	if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+		return -ENOTSUP;
+	/*
+	 * Workaround:
+	 * As no ixgbe_disable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 */
+	ixgbe_disable_sec_tx_path_generic(hw);
+
+	/* Enable Ethernet CRC (required by MACsec offload) */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+	ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
+	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
+
+	/* Enable the TX and RX crypto engines */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+	ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
+	ctrl |= 0x3;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
+
+	/* Enable SA lookup */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+	ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+	ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
+		     IXGBE_LSECTXCTRL_AUTH;
+	ctrl |= IXGBE_LSECTXCTRL_AISCI;
+	ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+	ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+	ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
+	ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
+	if (rp)
+		ctrl |= IXGBE_LSECRXCTRL_RP;
+	else
+		ctrl &= ~IXGBE_LSECRXCTRL_RP;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+	/* Start the data paths */
+	ixgbe_enable_sec_rx_path(hw);
+	/*
+	 * Workaround:
+	 * As no ixgbe_enable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 */
+	ixgbe_enable_sec_tx_path_generic(hw);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_disable(uint8_t port)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Stop the data paths */
+	if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+		return -ENOTSUP;
+	/*
+	 * Workaround:
+	 * As no ixgbe_disable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 */
+	ixgbe_disable_sec_tx_path_generic(hw);
+
+	/* Disable the TX and RX crypto engines */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+	/* Disable SA lookup */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+	ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECTXCTRL_DISABLE;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+	ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+	/* Start the data paths */
+	ixgbe_enable_sec_rx_path(hw);
+	/*
+	 * Workaround:
+	 * As no ixgbe_enable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 */
+	ixgbe_enable_sec_tx_path_generic(hw);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
+
+	ctrl = mac[4] | (mac[5] << 8);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
+
+	pi = rte_cpu_to_be_16(pi);
+	ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+				 uint32_t pn, uint8_t *key)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl, i;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (idx != 0 && idx != 1)
+		return -EINVAL;
+
+	if (an >= 4)
+		return -EINVAL;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Set the PN and key */
+	pn = rte_cpu_to_be_32(pn);
+	if (idx == 0) {
+		IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
+
+		for (i = 0; i < 4; i++) {
+			ctrl = (key[i * 4 + 0] <<  0) |
+			       (key[i * 4 + 1] <<  8) |
+			       (key[i * 4 + 2] << 16) |
+			       (key[i * 4 + 3] << 24);
+			IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
+		}
+	} else {
+		IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
+
+		for (i = 0; i < 4; i++) {
+			ctrl = (key[i * 4 + 0] <<  0) |
+			       (key[i * 4 + 1] <<  8) |
+			       (key[i * 4 + 2] << 16) |
+			       (key[i * 4 + 3] << 24);
+			IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
+		}
+	}
+
+	/* Set AN and select the SA */
+	ctrl = (an << idx * 2) | (idx << 4);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+				 uint32_t pn, uint8_t *key)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl, i;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (idx != 0 && idx != 1)
+		return -EINVAL;
+
+	if (an >= 4)
+		return -EINVAL;
+
+	/* Set the PN */
+	pn = rte_cpu_to_be_32(pn);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
+
+	/* Set the key */
+	for (i = 0; i < 4; i++) {
+		ctrl = (key[i * 4 + 0] <<  0) |
+		       (key[i * 4 + 1] <<  8) |
+		       (key[i * 4 + 2] << 16) |
+		       (key[i * 4 + 3] << 24);
+		IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
+	}
+
+	/* Set the AN and validate the SA */
+	ctrl = an | (1 << 2);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
+
+	return 0;
+}
+
 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv);
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 4ff6338..f8a0c02 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -43,6 +43,7 @@
 #define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
 #define IXGBE_FLAG_MAILBOX          (uint32_t)(1 << 1)
 #define IXGBE_FLAG_PHY_INTERRUPT    (uint32_t)(1 << 2)
+#define IXGBE_FLAG_MACSEC           (uint32_t)(1 << 3)
 
 /*
  * Defines that were not part of ixgbe_type.h as they are not used by the
@@ -130,6 +131,10 @@
 #define IXGBE_MISC_VEC_ID               RTE_INTR_VEC_ZERO_OFFSET
 #define IXGBE_RX_VEC_START              RTE_INTR_VEC_RXTX_OFFSET
 
+#define IXGBE_SECTX_MINSECIFG_MASK      0x0000000F
+
+#define IXGBE_MACSEC_PNTHRSH            0xFFFFFE00
+
 /*
  * Information about the fdir mode.
  */
@@ -265,11 +270,44 @@ struct ixgbe_filter_info {
 };
 
 /*
+ * Statistics counters collected by the MACsec
+ */
+struct ixgbe_macsec_stats {
+	/* TX port statistics */
+	uint64_t out_pkts_untagged;
+	uint64_t out_pkts_encrypted;
+	uint64_t out_pkts_protected;
+	uint64_t out_octets_encrypted;
+	uint64_t out_octets_protected;
+
+	/* RX port statistics */
+	uint64_t in_pkts_untagged;
+	uint64_t in_pkts_badtag;
+	uint64_t in_pkts_nosci;
+	uint64_t in_pkts_unknownsci;
+	uint64_t in_octets_decrypted;
+	uint64_t in_octets_validated;
+
+	/* RX SC statistics */
+	uint64_t in_pkts_unchecked;
+	uint64_t in_pkts_delayed;
+	uint64_t in_pkts_late;
+
+	/* RX SA statistics */
+	uint64_t in_pkts_ok;
+	uint64_t in_pkts_invalid;
+	uint64_t in_pkts_notvalid;
+	uint64_t in_pkts_unusedsa;
+	uint64_t in_pkts_notusingsa;
+};
+
+/*
  * Structure to store private data for each driver instance (for each port).
  */
 struct ixgbe_adapter {
 	struct ixgbe_hw             hw;
 	struct ixgbe_hw_stats       stats;
+	struct ixgbe_macsec_stats   macsec_stats;
 	struct ixgbe_hw_fdir_info   fdir;
 	struct ixgbe_interrupt      intr;
 	struct ixgbe_stat_mapping_registers stat_mappings;
@@ -297,6 +335,9 @@ struct ixgbe_adapter {
 #define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \
 	(&((struct ixgbe_adapter *)adapter)->stats)
 
+#define IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(adapter) \
+	(&((struct ixgbe_adapter *)adapter)->macsec_stats)
+
 #define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \
 	(&((struct ixgbe_adapter *)adapter)->intr)
 
@@ -442,4 +483,8 @@ uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
 
 int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
 			enum rte_filter_op filter_op, void *arg);
+
+int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw);
+
+int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw);
 #endif /* _IXGBE_ETHDEV_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index b2d9f45..9201c25 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -85,6 +85,7 @@
 		PKT_TX_IP_CKSUM |		 \
 		PKT_TX_L4_MASK |		 \
 		PKT_TX_TCP_SEG |		 \
+		PKT_TX_MACSEC |			 \
 		PKT_TX_OUTER_IP_CKSUM)
 
 #if 1
@@ -519,6 +520,8 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
 		cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
 	if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
 		cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
+	if (ol_flags & PKT_TX_MACSEC)
+		cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
 	return cmdtype;
 }
 
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index c2fb826..968288b 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -183,6 +183,106 @@ int
 rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
 
 /**
+ * Enable MACsec offloading.
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param en
+ *    1 - Enable encryption (encrypt and add integrity signature).
+ *    0 - Disable encryption (only add integrity signature).
+ * @param rp
+ *    1 - Enable replay protection.
+ *    0 - Disable replay protection.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp);
+
+/**
+ * Disable MACsec offloading.
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_disable(uint8_t port);
+
+/**
+ * Configure TX SC (Secure Connection)
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param mac
+ *   The MAC address on the local side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ */
+int rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac);
+
+/**
+ * Configure RX SC (Secure Connection)
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param mac
+ *   The MAC address on the remote side.
+ * @param pi
+ *   The PI (port identifier) on the remote side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ */
+int rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi);
+
+/**
+ * Enable TX SA (Secure Association)
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param idx
+ *   The SA to be enabled (0 or 1).
+ * @param an
+ *   The association number on the local side.
+ * @param pn
+ *   The packet number on the local side.
+ * @param key
+ *   The key on the local side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+		uint32_t pn, uint8_t *key);
+
+/**
+ * Enable RX SA (Secure Association)
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param idx
+ *   The SA to be enabled (0 or 1)
+ * @param an
+ *   The association number on the remote side.
+ * @param pn
+ *   The packet number on the remote side.
+ * @param key
+ *   The key on the remote side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+		uint32_t pn, uint8_t *key);
+
+/**
  * Response sent back to ixgbe driver from user app after callback
  */
 enum rte_pmd_ixgbe_mb_event_rsp {
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
index 92434f3..5482a0c 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
@@ -15,3 +15,14 @@ DPDK_16.11 {
 	rte_pmd_ixgbe_set_vf_vlan_insert;
 	rte_pmd_ixgbe_set_vf_vlan_stripq;
 } DPDK_2.0;
+
+DPDK_17.02 {
+	global:
+
+	rte_pmd_ixgbe_macsec_enable;
+	rte_pmd_ixgbe_macsec_disable;
+	rte_pmd_ixgbe_macsec_config_txsc;
+	rte_pmd_ixgbe_macsec_config_rxsc;
+	rte_pmd_ixgbe_macsec_select_txsa;
+	rte_pmd_ixgbe_macsec_select_rxsa;
+};
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v2 3/4] app/testpmd: add ixgbe MACsec offload support
  2016-12-16  1:43 ` [PATCH v2 0/4] Add MACsec offload support for ixgbe Tiwei Bie
  2016-12-16  1:43   ` [PATCH v2 1/4] lib: add MACsec offload flags Tiwei Bie
  2016-12-16  1:43   ` [PATCH v2 2/4] net/ixgbe: add MACsec offload support Tiwei Bie
@ 2016-12-16  1:43   ` Tiwei Bie
  2016-12-16  1:43   ` [PATCH v2 4/4] doc: add ixgbe specific APIs Tiwei Bie
  2016-12-25 14:57   ` [PATCH v3 0/6] Add MACsec offload support for ixgbe Tiwei Bie
  4 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-16  1:43 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei.dai, xiao.w.wang, konstantin.ananyev, helin.zhang

- add test for set macsec offload
- add test for set macsec sc
- add test for set macsec sa

Also update the testpmd user guide.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 app/test-pmd/cmdline.c                      | 389 ++++++++++++++++++++++++++++
 app/test-pmd/macfwd.c                       |   2 +
 app/test-pmd/macswap.c                      |   2 +
 app/test-pmd/testpmd.h                      |   2 +
 app/test-pmd/txonly.c                       |   2 +
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  32 +++
 6 files changed, 429 insertions(+)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index d03a592..635bb1f 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -274,6 +274,18 @@ static void cmd_help_long_parsed(void *parsed_result,
 
 			"set vf mac antispoof (port_id) (vf_id) (on|off).\n"
 			"    Set MAC antispoof for a VF from the PF.\n\n"
+
+			"set macsec offload (port_id) on encrypt (on|off) replay-protect (on|off)\n"
+			"    Enable MACsec offload.\n\n"
+
+			"set macsec offload (port_id) off\n"
+			"    Disable MACsec offload.\n\n"
+
+			"set macsec sc (tx|rx) (port_id) (mac) (pi)\n"
+			"    Configure MACsec secure connection (SC).\n\n"
+
+			"set macsec sa (tx|rx) (port_id) (idx) (an) (pn) (key)\n"
+			"    Configure MACsec secure association (SA).\n\n"
 #endif
 
 			"vlan set strip (on|off) (port_id)\n"
@@ -11457,6 +11469,379 @@ cmdline_parse_inst_t cmd_set_vf_mac_addr = {
 		NULL,
 	},
 };
+
+/* MACsec configuration */
+
+/* Common result structure for MACsec offload enable */
+struct cmd_macsec_offload_on_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t offload;
+	uint8_t port_id;
+	cmdline_fixed_string_t on;
+	cmdline_fixed_string_t encrypt;
+	cmdline_fixed_string_t en_on_off;
+	cmdline_fixed_string_t replay_protect;
+	cmdline_fixed_string_t rp_on_off;
+};
+
+/* Common CLI fields for MACsec offload disable */
+cmdline_parse_token_string_t cmd_macsec_offload_on_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_offload_on_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_offload_on_offload =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 offload, "offload");
+cmdline_parse_token_string_t cmd_macsec_offload_on_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 port_id, UINT8);
+cmdline_parse_token_string_t cmd_macsec_offload_on_on =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 on, "on");
+cmdline_parse_token_string_t cmd_macsec_offload_on_encrypt =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 encrypt, "encrypt");
+cmdline_parse_token_string_t cmd_macsec_offload_on_en_on_off =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 en_on_off, "on#off");
+cmdline_parse_token_string_t cmd_macsec_offload_on_replay_protect =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 replay_protect, "replay-protect");
+cmdline_parse_token_string_t cmd_macsec_offload_on_rp_on_off =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 rp_on_off, "on#off");
+
+static void
+cmd_set_macsec_offload_on_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_offload_on_result *res = parsed_result;
+	int ret;
+	portid_t port_id = res->port_id;
+	int en = (strcmp(res->en_on_off, "on") == 0) ? 1 : 0;
+	int rp = (strcmp(res->rp_on_off, "on") == 0) ? 1 : 0;
+
+	if (port_id_is_invalid(port_id, ENABLED_WARN))
+		return;
+
+	ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_MACSEC;
+	ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
+
+	switch (ret) {
+	case 0:
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_offload_on = {
+	.f = cmd_set_macsec_offload_on_parsed,
+	.data = NULL,
+	.help_str = "set macsec offload <port_id> on "
+		"encrypt on|off replay-protect on|off",
+	.tokens = {
+		(void *)&cmd_macsec_offload_on_set,
+		(void *)&cmd_macsec_offload_on_macsec,
+		(void *)&cmd_macsec_offload_on_offload,
+		(void *)&cmd_macsec_offload_on_port_id,
+		(void *)&cmd_macsec_offload_on_on,
+		(void *)&cmd_macsec_offload_on_encrypt,
+		(void *)&cmd_macsec_offload_on_en_on_off,
+		(void *)&cmd_macsec_offload_on_replay_protect,
+		(void *)&cmd_macsec_offload_on_rp_on_off,
+		NULL,
+	},
+};
+
+/* Common result structure for MACsec offload disable */
+struct cmd_macsec_offload_off_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t offload;
+	uint8_t port_id;
+	cmdline_fixed_string_t off;
+};
+
+/* Common CLI fields for MACsec offload disable */
+cmdline_parse_token_string_t cmd_macsec_offload_off_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_offload_off_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_offload_off_offload =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 offload, "offload");
+cmdline_parse_token_string_t cmd_macsec_offload_off_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 port_id, UINT8);
+cmdline_parse_token_string_t cmd_macsec_offload_off_off =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 off, "off");
+
+static void
+cmd_set_macsec_offload_off_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_offload_off_result *res = parsed_result;
+	int ret;
+	portid_t port_id = res->port_id;
+
+	if (port_id_is_invalid(port_id, ENABLED_WARN))
+		return;
+
+	ports[port_id].tx_ol_flags &= ~TESTPMD_TX_OFFLOAD_MACSEC;
+	ret = rte_pmd_ixgbe_macsec_disable(port_id);
+
+	switch (ret) {
+	case 0:
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_offload_off = {
+	.f = cmd_set_macsec_offload_off_parsed,
+	.data = NULL,
+	.help_str = "set macsec offload <port_id> off",
+	.tokens = {
+		(void *)&cmd_macsec_offload_off_set,
+		(void *)&cmd_macsec_offload_off_macsec,
+		(void *)&cmd_macsec_offload_off_offload,
+		(void *)&cmd_macsec_offload_off_port_id,
+		(void *)&cmd_macsec_offload_off_off,
+		NULL,
+	},
+};
+
+/* Common result structure for MACsec secure connection configure */
+struct cmd_macsec_sc_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t sc;
+	cmdline_fixed_string_t tx_rx;
+	uint8_t port_id;
+	struct ether_addr mac;
+	uint16_t pi;
+};
+
+/* Common CLI fields for MACsec secure connection configure */
+cmdline_parse_token_string_t cmd_macsec_sc_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_sc_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_sc_sc =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 sc, "sc");
+cmdline_parse_token_string_t cmd_macsec_sc_tx_rx =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 tx_rx, "tx#rx");
+cmdline_parse_token_num_t cmd_macsec_sc_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 port_id, UINT8);
+cmdline_parse_token_etheraddr_t cmd_macsec_sc_mac =
+	TOKEN_ETHERADDR_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 mac);
+cmdline_parse_token_num_t cmd_macsec_sc_pi =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 pi, UINT16);
+
+static void
+cmd_set_macsec_sc_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_sc_result *res = parsed_result;
+	int ret;
+	int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0;
+
+	ret = is_tx ?
+		rte_pmd_ixgbe_macsec_config_txsc(res->port_id,
+				res->mac.addr_bytes) :
+		rte_pmd_ixgbe_macsec_config_rxsc(res->port_id,
+				res->mac.addr_bytes, res->pi);
+	switch (ret) {
+	case 0:
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", res->port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_sc = {
+	.f = cmd_set_macsec_sc_parsed,
+	.data = NULL,
+	.help_str = "set macsec sc tx|rx <port_id> <mac> <pi>",
+	.tokens = {
+		(void *)&cmd_macsec_sc_set,
+		(void *)&cmd_macsec_sc_macsec,
+		(void *)&cmd_macsec_sc_sc,
+		(void *)&cmd_macsec_sc_tx_rx,
+		(void *)&cmd_macsec_sc_port_id,
+		(void *)&cmd_macsec_sc_mac,
+		(void *)&cmd_macsec_sc_pi,
+		NULL,
+	},
+};
+
+/* Common result structure for MACsec secure connection configure */
+struct cmd_macsec_sa_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t sa;
+	cmdline_fixed_string_t tx_rx;
+	uint8_t port_id;
+	uint8_t idx;
+	uint8_t an;
+	uint32_t pn;
+	cmdline_fixed_string_t key;
+};
+
+/* Common CLI fields for MACsec secure connection configure */
+cmdline_parse_token_string_t cmd_macsec_sa_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_sa_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_sa_sa =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 sa, "sa");
+cmdline_parse_token_string_t cmd_macsec_sa_tx_rx =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 tx_rx, "tx#rx");
+cmdline_parse_token_num_t cmd_macsec_sa_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 port_id, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_idx =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 idx, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_an =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 an, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_pn =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 pn, UINT32);
+cmdline_parse_token_string_t cmd_macsec_sa_key =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 key, NULL);
+
+static void
+cmd_set_macsec_sa_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_sa_result *res = parsed_result;
+	int ret;
+	int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0;
+	uint8_t key[16] = { 0 };
+	uint8_t xdgt0;
+	uint8_t xdgt1;
+	int key_len;
+	int i;
+
+	key_len = strlen(res->key) / 2;
+	if (key_len > 16)
+		key_len = 16;
+
+	for (i = 0; i < key_len; i++) {
+		xdgt0 = parse_and_check_key_hexa_digit(res->key, (i * 2));
+		if (xdgt0 == 0xFF)
+			return;
+		xdgt1 = parse_and_check_key_hexa_digit(res->key, (i * 2) + 1);
+		if (xdgt1 == 0xFF)
+			return;
+		key[i] = (uint8_t) ((xdgt0 * 16) + xdgt1);
+	}
+
+	ret = is_tx ?
+		rte_pmd_ixgbe_macsec_select_txsa(res->port_id,
+			res->idx, res->an, res->pn, key) :
+		rte_pmd_ixgbe_macsec_select_rxsa(res->port_id,
+			res->idx, res->an, res->pn, key);
+	switch (ret) {
+	case 0:
+		break;
+	case -EINVAL:
+		printf("invalid idx %d or an %d\n", res->idx, res->an);
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", res->port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_sa = {
+	.f = cmd_set_macsec_sa_parsed,
+	.data = NULL,
+	.help_str = "set macsec sa tx|rx <port_id> <idx> <an> <pn> <key>",
+	.tokens = {
+		(void *)&cmd_macsec_sa_set,
+		(void *)&cmd_macsec_sa_macsec,
+		(void *)&cmd_macsec_sa_sa,
+		(void *)&cmd_macsec_sa_tx_rx,
+		(void *)&cmd_macsec_sa_port_id,
+		(void *)&cmd_macsec_sa_idx,
+		(void *)&cmd_macsec_sa_an,
+		(void *)&cmd_macsec_sa_pn,
+		(void *)&cmd_macsec_sa_key,
+		NULL,
+	},
+};
 #endif
 
 /* ******************************************************************************** */
@@ -11624,6 +12009,10 @@ cmdline_parse_ctx_t main_ctx[] = {
 	(cmdline_parse_inst_t *)&cmd_set_all_queues_drop_en,
 	(cmdline_parse_inst_t *)&cmd_set_vf_split_drop_en,
 	(cmdline_parse_inst_t *)&cmd_set_vf_mac_addr,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_offload_on,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_offload_off,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_sc,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_sa,
 #endif
 	NULL,
 };
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index 86e01de..8ab529b 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -112,6 +112,8 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
 		ol_flags = PKT_TX_VLAN_PKT;
 	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
 		ol_flags |= PKT_TX_QINQ_PKT;
+	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+		ol_flags |= PKT_TX_MACSEC;
 	for (i = 0; i < nb_rx; i++) {
 		if (likely(i < nb_rx - 1))
 			rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index 36e139f..855f2f0 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -112,6 +112,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
 		ol_flags = PKT_TX_VLAN_PKT;
 	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
 		ol_flags |= PKT_TX_QINQ_PKT;
+	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+		ol_flags |= PKT_TX_MACSEC;
 	for (i = 0; i < nb_rx; i++) {
 		if (likely(i < nb_rx - 1))
 			rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 9c1e703..5d40fc6 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -143,6 +143,8 @@ struct fwd_stream {
 #define TESTPMD_TX_OFFLOAD_INSERT_VLAN       0x0040
 /** Insert double VLAN header in forward engine */
 #define TESTPMD_TX_OFFLOAD_INSERT_QINQ       0x0080
+/** Offload MACsec in forward engine */
+#define TESTPMD_TX_OFFLOAD_MACSEC            0x0100
 
 /**
  * The data structure associated with each port.
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 8513a06..44f0548 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -214,6 +214,8 @@ pkt_burst_transmit(struct fwd_stream *fs)
 		ol_flags = PKT_TX_VLAN_PKT;
 	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
 		ol_flags |= PKT_TX_QINQ_PKT;
+	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+		ol_flags |= PKT_TX_MACSEC;
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_mbuf_raw_alloc(mbp);
 		if (pkt == NULL) {
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index f1c269a..bf6a483 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -507,6 +507,38 @@ Set mac antispoof for a VF from the PF::
 
    testpmd> set vf mac antispoof  (port_id) (vf_id) (on|off)
 
+set macsec offload
+~~~~~~~~~~~~~~~~~~
+
+Enable/disable MACsec offload::
+
+   testpmd> set macsec offload (port_id) on encrypt (on|off) replay-protect (on|off)
+   testpmd> set macsec offload (port_id) off
+
+set macsec sc
+~~~~~~~~~~~~~
+
+Configure MACsec secure connection (SC)::
+
+   testpmd> set macsec sc (tx|rx) (port_id) (mac) (pi)
+
+.. note::
+
+   The pi argument is ignored for tx.
+   Check the NIC Datasheet for hardware limits.
+
+set macsec sa
+~~~~~~~~~~~~~
+
+Configure MACsec secure association (SA)::
+
+   testpmd> set macsec sa (tx|rx) (port_id) (idx) (an) (pn) (key)
+
+.. note::
+
+   The IDX value must be 0 or 1.
+   Check the NIC Datasheet for hardware limits.
+
 vlan set strip
 ~~~~~~~~~~~~~~
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v2 4/4] doc: add ixgbe specific APIs
  2016-12-16  1:43 ` [PATCH v2 0/4] Add MACsec offload support for ixgbe Tiwei Bie
                     ` (2 preceding siblings ...)
  2016-12-16  1:43   ` [PATCH v2 3/4] app/testpmd: add ixgbe " Tiwei Bie
@ 2016-12-16  1:43   ` Tiwei Bie
  2016-12-16 10:29     ` Mcnamara, John
  2016-12-25 14:57   ` [PATCH v3 0/6] Add MACsec offload support for ixgbe Tiwei Bie
  4 siblings, 1 reply; 79+ messages in thread
From: Tiwei Bie @ 2016-12-16  1:43 UTC (permalink / raw)
  To: dev; +Cc: wenzhuo.lu, wei.dai, xiao.w.wang, konstantin.ananyev, helin.zhang

Add information about the new ixgbe PMD APIs in the release note.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 doc/guides/rel_notes/release_17_02.rst | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/doc/guides/rel_notes/release_17_02.rst b/doc/guides/rel_notes/release_17_02.rst
index 3b65038..c76cf5f 100644
--- a/doc/guides/rel_notes/release_17_02.rst
+++ b/doc/guides/rel_notes/release_17_02.rst
@@ -39,6 +39,12 @@ New Features
      =========================================================
 
 
+* **Added APIs for MACsec offload support to the ixgbe PMD.**
+
+  Six new APIs have been added to the ixgbe PMD for MACsec offload support.
+  The declarations for the APIs can be found in ``rte_pmd_ixgbe.h``.
+
+
 Resolved Issues
 ---------------
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* Re: [PATCH v2 4/4] doc: add ixgbe specific APIs
  2016-12-16  1:43   ` [PATCH v2 4/4] doc: add ixgbe specific APIs Tiwei Bie
@ 2016-12-16 10:29     ` Mcnamara, John
  0 siblings, 0 replies; 79+ messages in thread
From: Mcnamara, John @ 2016-12-16 10:29 UTC (permalink / raw)
  To: Bie, Tiwei, dev
  Cc: Lu, Wenzhuo, Dai, Wei, Wang, Xiao W, Ananyev, Konstantin, Zhang, Helin



> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Tiwei Bie
> Sent: Friday, December 16, 2016 1:44 AM
> To: dev@dpdk.org
> Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Dai, Wei <wei.dai@intel.com>;
> Wang, Xiao W <xiao.w.wang@intel.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; Zhang, Helin <helin.zhang@intel.com>
> Subject: [dpdk-dev] [PATCH v2 4/4] doc: add ixgbe specific APIs
> 
> Add information about the new ixgbe PMD APIs in the release note.
> 
> Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>

Acked-by: John McNamara <john.mcnamara@intel.com>

^ permalink raw reply	[flat|nested] 79+ messages in thread

* [PATCH v3 0/6] Add MACsec offload support for ixgbe
  2016-12-16  1:43 ` [PATCH v2 0/4] Add MACsec offload support for ixgbe Tiwei Bie
                     ` (3 preceding siblings ...)
  2016-12-16  1:43   ` [PATCH v2 4/4] doc: add ixgbe specific APIs Tiwei Bie
@ 2016-12-25 14:57   ` Tiwei Bie
  2016-12-25 14:57     ` [PATCH v3 1/6] mbuf: add flag for MACsec Tiwei Bie
                       ` (7 more replies)
  4 siblings, 8 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-25 14:57 UTC (permalink / raw)
  To: dev
  Cc: wenzhuo.lu, wei.dai, xiao.w.wang, olivier.matz, thomas.monjalon,
	konstantin.ananyev, helin.zhang

This patch set adds the MACsec offload support for ixgbe.
The testpmd is also updated to support MACsec cmds.

v2:
- Update the documents for testpmd;
- Update the release notes;
- Reuse the functions provided by base code;

v3:
- Add the missing parts of MACsec mbuf flag and reorganize the patch set;
- Add an ethdev event type for MACsec;
- Advertise the MACsec offload capabilities based on the mac type;
- Minor fixes and improvements;

Tiwei Bie (6):
  mbuf: add flag for MACsec
  ethdev: add event type for MACsec
  ethdev: add MACsec offload capability flags
  net/ixgbe: add MACsec offload support
  app/testpmd: add MACsec offload commands
  doc: add ixgbe specific APIs

 app/test-pmd/cmdline.c                      | 389 ++++++++++++++++++++++
 app/test-pmd/macfwd.c                       |   2 +
 app/test-pmd/macswap.c                      |   2 +
 app/test-pmd/testpmd.h                      |   2 +
 app/test-pmd/txonly.c                       |   2 +
 doc/guides/rel_notes/release_17_02.rst      |  10 +
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  32 ++
 drivers/net/ixgbe/ixgbe_ethdev.c            | 481 +++++++++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_ethdev.h            |  45 +++
 drivers/net/ixgbe/ixgbe_rxtx.c              |   3 +
 drivers/net/ixgbe/rte_pmd_ixgbe.h           | 100 ++++++
 drivers/net/ixgbe/rte_pmd_ixgbe_version.map |  11 +
 lib/librte_ether/rte_ethdev.h               |   3 +
 lib/librte_mbuf/rte_mbuf.c                  |   2 +
 lib/librte_mbuf/rte_mbuf.h                  |   6 +
 15 files changed, 1085 insertions(+), 5 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 79+ messages in thread

* [PATCH v3 1/6] mbuf: add flag for MACsec
  2016-12-25 14:57   ` [PATCH v3 0/6] Add MACsec offload support for ixgbe Tiwei Bie
@ 2016-12-25 14:57     ` Tiwei Bie
  2016-12-25 14:57     ` [PATCH v3 2/6] ethdev: add event type " Tiwei Bie
                       ` (6 subsequent siblings)
  7 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-25 14:57 UTC (permalink / raw)
  To: dev
  Cc: wenzhuo.lu, wei.dai, xiao.w.wang, olivier.matz, thomas.monjalon,
	konstantin.ananyev, helin.zhang

Add a new Tx flag in mbuf, that can be set by applications to
enable the MACsec offload for a packet to be transmitted.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 doc/guides/rel_notes/release_17_02.rst | 5 +++++
 lib/librte_mbuf/rte_mbuf.c             | 2 ++
 lib/librte_mbuf/rte_mbuf.h             | 6 ++++++
 3 files changed, 13 insertions(+)

diff --git a/doc/guides/rel_notes/release_17_02.rst b/doc/guides/rel_notes/release_17_02.rst
index 180af82..d278b3f 100644
--- a/doc/guides/rel_notes/release_17_02.rst
+++ b/doc/guides/rel_notes/release_17_02.rst
@@ -52,6 +52,11 @@ New Features
   See the :ref:`Generic flow API <Generic_flow_API>` documentation for more
   information.
 
+* **Improved offload support in mbuf.**
+
+  Added a new Tx MACsec mbuf flag, set by applications to enable the MACsec
+  offload for a packet to be transmitted.
+
 
 Resolved Issues
 ---------------
diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c
index 63f43c8..72ad91e 100644
--- a/lib/librte_mbuf/rte_mbuf.c
+++ b/lib/librte_mbuf/rte_mbuf.c
@@ -404,6 +404,7 @@ const char *rte_get_tx_ol_flag_name(uint64_t mask)
 	case PKT_TX_TUNNEL_GRE: return "PKT_TX_TUNNEL_GRE";
 	case PKT_TX_TUNNEL_IPIP: return "PKT_TX_TUNNEL_IPIP";
 	case PKT_TX_TUNNEL_GENEVE: return "PKT_TX_TUNNEL_GENEVE";
+	case PKT_TX_MACSEC: return "PKT_TX_MACSEC";
 	default: return NULL;
 	}
 }
@@ -434,6 +435,7 @@ rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
 		  "PKT_TX_TUNNEL_NONE" },
 		{ PKT_TX_TUNNEL_GENEVE, PKT_TX_TUNNEL_MASK,
 		  "PKT_TX_TUNNEL_NONE" },
+		{ PKT_TX_MACSEC, PKT_TX_MACSEC, NULL },
 	};
 	const char *name;
 	unsigned int i;
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index ead7c6e..752dca4 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -182,6 +182,12 @@ extern "C" {
 /* add new TX flags here */
 
 /**
+ * Offload the MACsec. This flag must be set by the application to enable
+ * this offload feature for a packet to be transmitted.
+ */
+#define PKT_TX_MACSEC        (1ULL << 44)
+
+/**
  * Bits 45:48 used for the tunnel type.
  * When doing Tx offload like TSO or checksum, the HW needs to configure the
  * tunnel type into the HW descriptors.
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v3 2/6] ethdev: add event type for MACsec
  2016-12-25 14:57   ` [PATCH v3 0/6] Add MACsec offload support for ixgbe Tiwei Bie
  2016-12-25 14:57     ` [PATCH v3 1/6] mbuf: add flag for MACsec Tiwei Bie
@ 2016-12-25 14:57     ` Tiwei Bie
  2016-12-25 14:57     ` [PATCH v3 3/6] ethdev: add MACsec offload capability flags Tiwei Bie
                       ` (5 subsequent siblings)
  7 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-25 14:57 UTC (permalink / raw)
  To: dev
  Cc: wenzhuo.lu, wei.dai, xiao.w.wang, olivier.matz, thomas.monjalon,
	konstantin.ananyev, helin.zhang

This commit adds a below event type:

- RTE_ETH_EVENT_MACSEC

This event will occur when the PN counter in a MACsec connection
reaches the exhaustion threshold.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 lib/librte_ether/rte_ethdev.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 272fd41..b0b2678 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -3043,6 +3043,7 @@ enum rte_eth_event_type {
 	RTE_ETH_EVENT_INTR_RESET,
 			/**< reset interrupt event, sent to VF on PF reset */
 	RTE_ETH_EVENT_VF_MBOX,  /**< message from the VF received by PF */
+	RTE_ETH_EVENT_MACSEC,   /**< MACsec offload related event */
 	RTE_ETH_EVENT_MAX       /**< max value of this enum */
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v3 3/6] ethdev: add MACsec offload capability flags
  2016-12-25 14:57   ` [PATCH v3 0/6] Add MACsec offload support for ixgbe Tiwei Bie
  2016-12-25 14:57     ` [PATCH v3 1/6] mbuf: add flag for MACsec Tiwei Bie
  2016-12-25 14:57     ` [PATCH v3 2/6] ethdev: add event type " Tiwei Bie
@ 2016-12-25 14:57     ` Tiwei Bie
  2016-12-25 14:57     ` [PATCH v3 4/6] net/ixgbe: add MACsec offload support Tiwei Bie
                       ` (4 subsequent siblings)
  7 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-25 14:57 UTC (permalink / raw)
  To: dev
  Cc: wenzhuo.lu, wei.dai, xiao.w.wang, olivier.matz, thomas.monjalon,
	konstantin.ananyev, helin.zhang

If these flags are advertised by a PMD, the NIC supports the MACsec
offload. The incoming MACsec traffics can be offloaded transparently
after the MACsec offload is configured correctly by the application.
And the application can set the PKT_TX_MACSEC flag in mbufs to enable
the MACsec offload for the packets to be transmitted.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 lib/librte_ether/rte_ethdev.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index b0b2678..faec56f 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -857,6 +857,7 @@ struct rte_eth_conf {
 #define DEV_RX_OFFLOAD_TCP_LRO     0x00000010
 #define DEV_RX_OFFLOAD_QINQ_STRIP  0x00000020
 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
+#define DEV_RX_OFFLOAD_MACSEC_STRIP     0x00000080
 
 /**
  * TX offload capabilities of a device.
@@ -874,6 +875,7 @@ struct rte_eth_conf {
 #define DEV_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_MACSEC_INSERT    0x00002000
 
 /**
  * Ethernet device information
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v3 4/6] net/ixgbe: add MACsec offload support
  2016-12-25 14:57   ` [PATCH v3 0/6] Add MACsec offload support for ixgbe Tiwei Bie
                       ` (2 preceding siblings ...)
  2016-12-25 14:57     ` [PATCH v3 3/6] ethdev: add MACsec offload capability flags Tiwei Bie
@ 2016-12-25 14:57     ` Tiwei Bie
  2016-12-25 14:57     ` [PATCH v3 5/6] app/testpmd: add MACsec offload commands Tiwei Bie
                       ` (3 subsequent siblings)
  7 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-25 14:57 UTC (permalink / raw)
  To: dev
  Cc: wenzhuo.lu, wei.dai, xiao.w.wang, olivier.matz, thomas.monjalon,
	konstantin.ananyev, helin.zhang

MACsec (or LinkSec, 802.1AE) is a MAC level encryption/authentication
scheme defined in IEEE 802.1AE that uses symmetric cryptography.
This commit adds the MACsec offload support for ixgbe.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c            | 481 +++++++++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_ethdev.h            |  45 +++
 drivers/net/ixgbe/ixgbe_rxtx.c              |   3 +
 drivers/net/ixgbe/rte_pmd_ixgbe.h           | 100 ++++++
 drivers/net/ixgbe/rte_pmd_ixgbe_version.map |  11 +
 5 files changed, 635 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index baffc71..a06ed07 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -231,6 +231,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 			uint16_t reta_size);
 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
@@ -744,6 +745,51 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
 			   sizeof(rte_ixgbe_stats_strings[0]))
 
+/* MACsec statistics */
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
+	{"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+		out_pkts_untagged)},
+	{"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
+		out_pkts_encrypted)},
+	{"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
+		out_pkts_protected)},
+	{"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
+		out_octets_encrypted)},
+	{"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
+		out_octets_protected)},
+	{"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_untagged)},
+	{"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_badtag)},
+	{"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_nosci)},
+	{"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_unknownsci)},
+	{"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
+		in_octets_decrypted)},
+	{"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
+		in_octets_validated)},
+	{"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_unchecked)},
+	{"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_delayed)},
+	{"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_late)},
+	{"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_ok)},
+	{"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_invalid)},
+	{"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_notvalid)},
+	{"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_unusedsa)},
+	{"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_notusingsa)},
+};
+
+#define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
+			   sizeof(rte_ixgbe_macsec_strings[0]))
+
 /* Per-queue statistics */
 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
 	{"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
@@ -2366,6 +2412,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 		/* check if lsc interrupt is enabled */
 		if (dev->data->dev_conf.intr_conf.lsc != 0)
 			ixgbe_dev_lsc_interrupt_setup(dev);
+		ixgbe_dev_macsec_interrupt_setup(dev);
 	} else {
 		rte_intr_callback_unregister(intr_handle,
 					     ixgbe_dev_interrupt_handler,
@@ -2557,6 +2604,7 @@ ixgbe_dev_close(struct rte_eth_dev *dev)
 static void
 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
 			   struct ixgbe_hw_stats *hw_stats,
+			   struct ixgbe_macsec_stats *macsec_stats,
 			   uint64_t *total_missed_rx, uint64_t *total_qbrc,
 			   uint64_t *total_qprc, uint64_t *total_qprdc)
 {
@@ -2726,6 +2774,40 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
 	/* Flow Director Stats registers */
 	hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
 	hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+
+	/* MACsec Stats registers */
+	macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
+	macsec_stats->out_pkts_encrypted +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
+	macsec_stats->out_pkts_protected +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
+	macsec_stats->out_octets_encrypted +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
+	macsec_stats->out_octets_protected +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
+	macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
+	macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
+	macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
+	macsec_stats->in_pkts_unknownsci +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
+	macsec_stats->in_octets_decrypted +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
+	macsec_stats->in_octets_validated +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
+	macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
+	macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
+	macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
+	for (i = 0; i < 2; i++) {
+		macsec_stats->in_pkts_ok +=
+			IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
+		macsec_stats->in_pkts_invalid +=
+			IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
+		macsec_stats->in_pkts_notvalid +=
+			IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
+	}
+	macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
+	macsec_stats->in_pkts_notusingsa +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
 }
 
 /*
@@ -2738,6 +2820,9 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_hw_stats *hw_stats =
 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	struct ixgbe_macsec_stats *macsec_stats =
+			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+				dev->data->dev_private);
 	uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
 	unsigned i;
 
@@ -2746,8 +2831,8 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 	total_qprc = 0;
 	total_qprdc = 0;
 
-	ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
-			&total_qprc, &total_qprdc);
+	ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+			&total_qbrc, &total_qprc, &total_qprdc);
 
 	if (stats == NULL)
 		return;
@@ -2799,7 +2884,7 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
 /* This function calculates the number of xstats based on the current config */
 static unsigned
 ixgbe_xstats_calc_num(void) {
-	return IXGBE_NB_HW_STATS +
+	return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
 		(IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
 		(IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
 }
@@ -2826,6 +2911,15 @@ static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 			count++;
 		}
 
+		/* MACsec Stats */
+		for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+			snprintf(xstats_names[count].name,
+				sizeof(xstats_names[count].name),
+				"%s",
+				rte_ixgbe_macsec_strings[i].name);
+			count++;
+		}
+
 		/* RX Priority Stats */
 		for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
 			for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
@@ -2875,6 +2969,9 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_hw_stats *hw_stats =
 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	struct ixgbe_macsec_stats *macsec_stats =
+			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+				dev->data->dev_private);
 	uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
 	unsigned i, stat, count = 0;
 
@@ -2888,8 +2985,8 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 	total_qprc = 0;
 	total_qprdc = 0;
 
-	ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
-				   &total_qprc, &total_qprdc);
+	ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+			&total_qbrc, &total_qprc, &total_qprdc);
 
 	/* If this is a reset xstats is NULL, and we have cleared the
 	 * registers by reading them.
@@ -2905,6 +3002,13 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		count++;
 	}
 
+	/* MACsec Stats */
+	for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+		xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
+				rte_ixgbe_macsec_strings[i].offset);
+		count++;
+	}
+
 	/* RX Priority Stats */
 	for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
 		for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
@@ -2932,6 +3036,9 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw_stats *stats =
 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	struct ixgbe_macsec_stats *macsec_stats =
+			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+				dev->data->dev_private);
 
 	unsigned count = ixgbe_xstats_calc_num();
 
@@ -2940,6 +3047,7 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
 
 	/* Reset software totals */
 	memset(stats, 0, sizeof(*stats));
+	memset(macsec_stats, 0, sizeof(*macsec_stats));
 }
 
 static void
@@ -3070,6 +3178,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	    !RTE_ETH_DEV_SRIOV(dev).active)
 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
 
+	if (hw->mac.type == ixgbe_mac_82599EB ||
+	    hw->mac.type == ixgbe_mac_X540)
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
@@ -3083,6 +3195,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		DEV_TX_OFFLOAD_SCTP_CKSUM  |
 		DEV_TX_OFFLOAD_TCP_TSO;
 
+	if (hw->mac.type == ixgbe_mac_82599EB ||
+	    hw->mac.type == ixgbe_mac_X540)
+		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
@@ -3378,6 +3494,28 @@ ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
 	return 0;
 }
 
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
+{
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+	intr->mask |= IXGBE_EICR_LINKSEC;
+
+	return 0;
+}
+
 /*
  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
  *
@@ -3412,6 +3550,9 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
 	if (eicr & IXGBE_EICR_MAILBOX)
 		intr->flags |= IXGBE_FLAG_MAILBOX;
 
+	if (eicr & IXGBE_EICR_LINKSEC)
+		intr->flags |= IXGBE_FLAG_MACSEC;
+
 	if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
 	    hw->phy.type == ixgbe_phy_x550em_ext_t &&
 	    (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
@@ -3562,6 +3703,12 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
 		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
 	}
 
+	if (intr->flags & IXGBE_FLAG_MACSEC) {
+		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
+					      NULL);
+		intr->flags &= ~IXGBE_FLAG_MACSEC;
+	}
+
 	PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
 	ixgbe_enable_intr(dev);
 	rte_intr_enable(&(dev->pci_dev->intr_handle));
@@ -7592,6 +7739,330 @@ ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
 	ixgbevf_dev_interrupt_action(dev);
 }
 
+/**
+ *  ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Stops the transmit data path and waits for the HW to internally empty
+ *  the Tx security block
+ **/
+int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
+{
+#define IXGBE_MAX_SECTX_POLL 40
+
+	int i;
+	int sectxreg;
+
+	sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
+	for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
+		sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
+		if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
+			break;
+		/* Use interrupt-safe sleep just in case */
+		usec_delay(1000);
+	}
+
+	/* For informational purposes only */
+	if (i >= IXGBE_MAX_SECTX_POLL)
+		PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
+			 "path fully disabled.  Continuing with init.\n");
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Enables the transmit data path.
+ **/
+int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
+{
+	uint32_t sectxreg;
+
+	sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return IXGBE_SUCCESS;
+}
+
+int
+rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Stop the data paths */
+	if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+		return -ENOTSUP;
+	/*
+	 * Workaround:
+	 * As no ixgbe_disable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 * The hardware support has been checked by
+	 * ixgbe_disable_sec_rx_path().
+	 */
+	ixgbe_disable_sec_tx_path_generic(hw);
+
+	/* Enable Ethernet CRC (required by MACsec offload) */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+	ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
+	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
+
+	/* Enable the TX and RX crypto engines */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+	ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
+	ctrl |= 0x3;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
+
+	/* Enable SA lookup */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+	ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+	ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
+		     IXGBE_LSECTXCTRL_AUTH;
+	ctrl |= IXGBE_LSECTXCTRL_AISCI;
+	ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+	ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+	ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
+	ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
+	if (rp)
+		ctrl |= IXGBE_LSECRXCTRL_RP;
+	else
+		ctrl &= ~IXGBE_LSECRXCTRL_RP;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+	/* Start the data paths */
+	ixgbe_enable_sec_rx_path(hw);
+	/*
+	 * Workaround:
+	 * As no ixgbe_enable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 */
+	ixgbe_enable_sec_tx_path_generic(hw);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_disable(uint8_t port)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Stop the data paths */
+	if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+		return -ENOTSUP;
+	/*
+	 * Workaround:
+	 * As no ixgbe_disable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 * The hardware support has been checked by
+	 * ixgbe_disable_sec_rx_path().
+	 */
+	ixgbe_disable_sec_tx_path_generic(hw);
+
+	/* Disable the TX and RX crypto engines */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+	/* Disable SA lookup */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+	ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECTXCTRL_DISABLE;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+	ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+	/* Start the data paths */
+	ixgbe_enable_sec_rx_path(hw);
+	/*
+	 * Workaround:
+	 * As no ixgbe_enable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 */
+	ixgbe_enable_sec_tx_path_generic(hw);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
+
+	ctrl = mac[4] | (mac[5] << 8);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
+
+	pi = rte_cpu_to_be_16(pi);
+	ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+				 uint32_t pn, uint8_t *key)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl, i;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (idx != 0 && idx != 1)
+		return -EINVAL;
+
+	if (an >= 4)
+		return -EINVAL;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Set the PN and key */
+	pn = rte_cpu_to_be_32(pn);
+	if (idx == 0) {
+		IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
+
+		for (i = 0; i < 4; i++) {
+			ctrl = (key[i * 4 + 0] <<  0) |
+			       (key[i * 4 + 1] <<  8) |
+			       (key[i * 4 + 2] << 16) |
+			       (key[i * 4 + 3] << 24);
+			IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
+		}
+	} else {
+		IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
+
+		for (i = 0; i < 4; i++) {
+			ctrl = (key[i * 4 + 0] <<  0) |
+			       (key[i * 4 + 1] <<  8) |
+			       (key[i * 4 + 2] << 16) |
+			       (key[i * 4 + 3] << 24);
+			IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
+		}
+	}
+
+	/* Set AN and select the SA */
+	ctrl = (an << idx * 2) | (idx << 4);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+				 uint32_t pn, uint8_t *key)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl, i;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (idx != 0 && idx != 1)
+		return -EINVAL;
+
+	if (an >= 4)
+		return -EINVAL;
+
+	/* Set the PN */
+	pn = rte_cpu_to_be_32(pn);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
+
+	/* Set the key */
+	for (i = 0; i < 4; i++) {
+		ctrl = (key[i * 4 + 0] <<  0) |
+		       (key[i * 4 + 1] <<  8) |
+		       (key[i * 4 + 2] << 16) |
+		       (key[i * 4 + 3] << 24);
+		IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
+	}
+
+	/* Set the AN and validate the SA */
+	ctrl = an | (1 << 2);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
+
+	return 0;
+}
+
 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 4ff6338..f8a0c02 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -43,6 +43,7 @@
 #define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
 #define IXGBE_FLAG_MAILBOX          (uint32_t)(1 << 1)
 #define IXGBE_FLAG_PHY_INTERRUPT    (uint32_t)(1 << 2)
+#define IXGBE_FLAG_MACSEC           (uint32_t)(1 << 3)
 
 /*
  * Defines that were not part of ixgbe_type.h as they are not used by the
@@ -130,6 +131,10 @@
 #define IXGBE_MISC_VEC_ID               RTE_INTR_VEC_ZERO_OFFSET
 #define IXGBE_RX_VEC_START              RTE_INTR_VEC_RXTX_OFFSET
 
+#define IXGBE_SECTX_MINSECIFG_MASK      0x0000000F
+
+#define IXGBE_MACSEC_PNTHRSH            0xFFFFFE00
+
 /*
  * Information about the fdir mode.
  */
@@ -265,11 +270,44 @@ struct ixgbe_filter_info {
 };
 
 /*
+ * Statistics counters collected by the MACsec
+ */
+struct ixgbe_macsec_stats {
+	/* TX port statistics */
+	uint64_t out_pkts_untagged;
+	uint64_t out_pkts_encrypted;
+	uint64_t out_pkts_protected;
+	uint64_t out_octets_encrypted;
+	uint64_t out_octets_protected;
+
+	/* RX port statistics */
+	uint64_t in_pkts_untagged;
+	uint64_t in_pkts_badtag;
+	uint64_t in_pkts_nosci;
+	uint64_t in_pkts_unknownsci;
+	uint64_t in_octets_decrypted;
+	uint64_t in_octets_validated;
+
+	/* RX SC statistics */
+	uint64_t in_pkts_unchecked;
+	uint64_t in_pkts_delayed;
+	uint64_t in_pkts_late;
+
+	/* RX SA statistics */
+	uint64_t in_pkts_ok;
+	uint64_t in_pkts_invalid;
+	uint64_t in_pkts_notvalid;
+	uint64_t in_pkts_unusedsa;
+	uint64_t in_pkts_notusingsa;
+};
+
+/*
  * Structure to store private data for each driver instance (for each port).
  */
 struct ixgbe_adapter {
 	struct ixgbe_hw             hw;
 	struct ixgbe_hw_stats       stats;
+	struct ixgbe_macsec_stats   macsec_stats;
 	struct ixgbe_hw_fdir_info   fdir;
 	struct ixgbe_interrupt      intr;
 	struct ixgbe_stat_mapping_registers stat_mappings;
@@ -297,6 +335,9 @@ struct ixgbe_adapter {
 #define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \
 	(&((struct ixgbe_adapter *)adapter)->stats)
 
+#define IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(adapter) \
+	(&((struct ixgbe_adapter *)adapter)->macsec_stats)
+
 #define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \
 	(&((struct ixgbe_adapter *)adapter)->intr)
 
@@ -442,4 +483,8 @@ uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
 
 int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
 			enum rte_filter_op filter_op, void *arg);
+
+int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw);
+
+int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw);
 #endif /* _IXGBE_ETHDEV_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index b2d9f45..9201c25 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -85,6 +85,7 @@
 		PKT_TX_IP_CKSUM |		 \
 		PKT_TX_L4_MASK |		 \
 		PKT_TX_TCP_SEG |		 \
+		PKT_TX_MACSEC |			 \
 		PKT_TX_OUTER_IP_CKSUM)
 
 #if 1
@@ -519,6 +520,8 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
 		cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
 	if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
 		cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
+	if (ol_flags & PKT_TX_MACSEC)
+		cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
 	return cmdtype;
 }
 
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index c2fb826..968288b 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -183,6 +183,106 @@ int
 rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
 
 /**
+ * Enable MACsec offloading.
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param en
+ *    1 - Enable encryption (encrypt and add integrity signature).
+ *    0 - Disable encryption (only add integrity signature).
+ * @param rp
+ *    1 - Enable replay protection.
+ *    0 - Disable replay protection.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp);
+
+/**
+ * Disable MACsec offloading.
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_disable(uint8_t port);
+
+/**
+ * Configure TX SC (Secure Connection)
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param mac
+ *   The MAC address on the local side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ */
+int rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac);
+
+/**
+ * Configure RX SC (Secure Connection)
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param mac
+ *   The MAC address on the remote side.
+ * @param pi
+ *   The PI (port identifier) on the remote side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ */
+int rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi);
+
+/**
+ * Enable TX SA (Secure Association)
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param idx
+ *   The SA to be enabled (0 or 1).
+ * @param an
+ *   The association number on the local side.
+ * @param pn
+ *   The packet number on the local side.
+ * @param key
+ *   The key on the local side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+		uint32_t pn, uint8_t *key);
+
+/**
+ * Enable RX SA (Secure Association)
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param idx
+ *   The SA to be enabled (0 or 1)
+ * @param an
+ *   The association number on the remote side.
+ * @param pn
+ *   The packet number on the remote side.
+ * @param key
+ *   The key on the remote side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+		uint32_t pn, uint8_t *key);
+
+/**
  * Response sent back to ixgbe driver from user app after callback
  */
 enum rte_pmd_ixgbe_mb_event_rsp {
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
index 92434f3..6d68934 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
@@ -15,3 +15,14 @@ DPDK_16.11 {
 	rte_pmd_ixgbe_set_vf_vlan_insert;
 	rte_pmd_ixgbe_set_vf_vlan_stripq;
 } DPDK_2.0;
+
+DPDK_17.02 {
+	global:
+
+	rte_pmd_ixgbe_macsec_enable;
+	rte_pmd_ixgbe_macsec_disable;
+	rte_pmd_ixgbe_macsec_config_txsc;
+	rte_pmd_ixgbe_macsec_config_rxsc;
+	rte_pmd_ixgbe_macsec_select_txsa;
+	rte_pmd_ixgbe_macsec_select_rxsa;
+} DPDK_16.11;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v3 5/6] app/testpmd: add MACsec offload commands
  2016-12-25 14:57   ` [PATCH v3 0/6] Add MACsec offload support for ixgbe Tiwei Bie
                       ` (3 preceding siblings ...)
  2016-12-25 14:57     ` [PATCH v3 4/6] net/ixgbe: add MACsec offload support Tiwei Bie
@ 2016-12-25 14:57     ` Tiwei Bie
  2016-12-25 14:58     ` [PATCH v3 6/6] doc: add ixgbe specific APIs Tiwei Bie
                       ` (2 subsequent siblings)
  7 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-25 14:57 UTC (permalink / raw)
  To: dev
  Cc: wenzhuo.lu, wei.dai, xiao.w.wang, olivier.matz, thomas.monjalon,
	konstantin.ananyev, helin.zhang

Below MACsec offload commands are added:

- set macsec offload <port_id> on encrypt on|off replay-protect on|off
- set macsec offload <port_id> off
- set macsec sc tx|rx <port_id> <mac> <pi>
- set macsec sa tx|rx <port_id> <idx> <an> <pn> <key>

Also update the testpmd user guide.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 app/test-pmd/cmdline.c                      | 389 ++++++++++++++++++++++++++++
 app/test-pmd/macfwd.c                       |   2 +
 app/test-pmd/macswap.c                      |   2 +
 app/test-pmd/testpmd.h                      |   2 +
 app/test-pmd/txonly.c                       |   2 +
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  32 +++
 6 files changed, 429 insertions(+)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index f768b6b..4a67894 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -275,6 +275,18 @@ static void cmd_help_long_parsed(void *parsed_result,
 
 			"set vf mac antispoof (port_id) (vf_id) (on|off).\n"
 			"    Set MAC antispoof for a VF from the PF.\n\n"
+
+			"set macsec offload (port_id) on encrypt (on|off) replay-protect (on|off)\n"
+			"    Enable MACsec offload.\n\n"
+
+			"set macsec offload (port_id) off\n"
+			"    Disable MACsec offload.\n\n"
+
+			"set macsec sc (tx|rx) (port_id) (mac) (pi)\n"
+			"    Configure MACsec secure connection (SC).\n\n"
+
+			"set macsec sa (tx|rx) (port_id) (idx) (an) (pn) (key)\n"
+			"    Configure MACsec secure association (SA).\n\n"
 #endif
 
 			"vlan set strip (on|off) (port_id)\n"
@@ -11488,6 +11500,379 @@ cmdline_parse_inst_t cmd_set_vf_mac_addr = {
 		NULL,
 	},
 };
+
+/* MACsec configuration */
+
+/* Common result structure for MACsec offload enable */
+struct cmd_macsec_offload_on_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t offload;
+	uint8_t port_id;
+	cmdline_fixed_string_t on;
+	cmdline_fixed_string_t encrypt;
+	cmdline_fixed_string_t en_on_off;
+	cmdline_fixed_string_t replay_protect;
+	cmdline_fixed_string_t rp_on_off;
+};
+
+/* Common CLI fields for MACsec offload disable */
+cmdline_parse_token_string_t cmd_macsec_offload_on_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_offload_on_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_offload_on_offload =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 offload, "offload");
+cmdline_parse_token_string_t cmd_macsec_offload_on_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 port_id, UINT8);
+cmdline_parse_token_string_t cmd_macsec_offload_on_on =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 on, "on");
+cmdline_parse_token_string_t cmd_macsec_offload_on_encrypt =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 encrypt, "encrypt");
+cmdline_parse_token_string_t cmd_macsec_offload_on_en_on_off =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 en_on_off, "on#off");
+cmdline_parse_token_string_t cmd_macsec_offload_on_replay_protect =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 replay_protect, "replay-protect");
+cmdline_parse_token_string_t cmd_macsec_offload_on_rp_on_off =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 rp_on_off, "on#off");
+
+static void
+cmd_set_macsec_offload_on_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_offload_on_result *res = parsed_result;
+	int ret;
+	portid_t port_id = res->port_id;
+	int en = (strcmp(res->en_on_off, "on") == 0) ? 1 : 0;
+	int rp = (strcmp(res->rp_on_off, "on") == 0) ? 1 : 0;
+
+	if (port_id_is_invalid(port_id, ENABLED_WARN))
+		return;
+
+	ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_MACSEC;
+	ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
+
+	switch (ret) {
+	case 0:
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_offload_on = {
+	.f = cmd_set_macsec_offload_on_parsed,
+	.data = NULL,
+	.help_str = "set macsec offload <port_id> on "
+		"encrypt on|off replay-protect on|off",
+	.tokens = {
+		(void *)&cmd_macsec_offload_on_set,
+		(void *)&cmd_macsec_offload_on_macsec,
+		(void *)&cmd_macsec_offload_on_offload,
+		(void *)&cmd_macsec_offload_on_port_id,
+		(void *)&cmd_macsec_offload_on_on,
+		(void *)&cmd_macsec_offload_on_encrypt,
+		(void *)&cmd_macsec_offload_on_en_on_off,
+		(void *)&cmd_macsec_offload_on_replay_protect,
+		(void *)&cmd_macsec_offload_on_rp_on_off,
+		NULL,
+	},
+};
+
+/* Common result structure for MACsec offload disable */
+struct cmd_macsec_offload_off_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t offload;
+	uint8_t port_id;
+	cmdline_fixed_string_t off;
+};
+
+/* Common CLI fields for MACsec offload disable */
+cmdline_parse_token_string_t cmd_macsec_offload_off_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_offload_off_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_offload_off_offload =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 offload, "offload");
+cmdline_parse_token_string_t cmd_macsec_offload_off_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 port_id, UINT8);
+cmdline_parse_token_string_t cmd_macsec_offload_off_off =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 off, "off");
+
+static void
+cmd_set_macsec_offload_off_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_offload_off_result *res = parsed_result;
+	int ret;
+	portid_t port_id = res->port_id;
+
+	if (port_id_is_invalid(port_id, ENABLED_WARN))
+		return;
+
+	ports[port_id].tx_ol_flags &= ~TESTPMD_TX_OFFLOAD_MACSEC;
+	ret = rte_pmd_ixgbe_macsec_disable(port_id);
+
+	switch (ret) {
+	case 0:
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_offload_off = {
+	.f = cmd_set_macsec_offload_off_parsed,
+	.data = NULL,
+	.help_str = "set macsec offload <port_id> off",
+	.tokens = {
+		(void *)&cmd_macsec_offload_off_set,
+		(void *)&cmd_macsec_offload_off_macsec,
+		(void *)&cmd_macsec_offload_off_offload,
+		(void *)&cmd_macsec_offload_off_port_id,
+		(void *)&cmd_macsec_offload_off_off,
+		NULL,
+	},
+};
+
+/* Common result structure for MACsec secure connection configure */
+struct cmd_macsec_sc_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t sc;
+	cmdline_fixed_string_t tx_rx;
+	uint8_t port_id;
+	struct ether_addr mac;
+	uint16_t pi;
+};
+
+/* Common CLI fields for MACsec secure connection configure */
+cmdline_parse_token_string_t cmd_macsec_sc_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_sc_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_sc_sc =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 sc, "sc");
+cmdline_parse_token_string_t cmd_macsec_sc_tx_rx =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 tx_rx, "tx#rx");
+cmdline_parse_token_num_t cmd_macsec_sc_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 port_id, UINT8);
+cmdline_parse_token_etheraddr_t cmd_macsec_sc_mac =
+	TOKEN_ETHERADDR_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 mac);
+cmdline_parse_token_num_t cmd_macsec_sc_pi =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 pi, UINT16);
+
+static void
+cmd_set_macsec_sc_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_sc_result *res = parsed_result;
+	int ret;
+	int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0;
+
+	ret = is_tx ?
+		rte_pmd_ixgbe_macsec_config_txsc(res->port_id,
+				res->mac.addr_bytes) :
+		rte_pmd_ixgbe_macsec_config_rxsc(res->port_id,
+				res->mac.addr_bytes, res->pi);
+	switch (ret) {
+	case 0:
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", res->port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_sc = {
+	.f = cmd_set_macsec_sc_parsed,
+	.data = NULL,
+	.help_str = "set macsec sc tx|rx <port_id> <mac> <pi>",
+	.tokens = {
+		(void *)&cmd_macsec_sc_set,
+		(void *)&cmd_macsec_sc_macsec,
+		(void *)&cmd_macsec_sc_sc,
+		(void *)&cmd_macsec_sc_tx_rx,
+		(void *)&cmd_macsec_sc_port_id,
+		(void *)&cmd_macsec_sc_mac,
+		(void *)&cmd_macsec_sc_pi,
+		NULL,
+	},
+};
+
+/* Common result structure for MACsec secure connection configure */
+struct cmd_macsec_sa_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t sa;
+	cmdline_fixed_string_t tx_rx;
+	uint8_t port_id;
+	uint8_t idx;
+	uint8_t an;
+	uint32_t pn;
+	cmdline_fixed_string_t key;
+};
+
+/* Common CLI fields for MACsec secure connection configure */
+cmdline_parse_token_string_t cmd_macsec_sa_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_sa_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_sa_sa =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 sa, "sa");
+cmdline_parse_token_string_t cmd_macsec_sa_tx_rx =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 tx_rx, "tx#rx");
+cmdline_parse_token_num_t cmd_macsec_sa_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 port_id, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_idx =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 idx, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_an =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 an, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_pn =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 pn, UINT32);
+cmdline_parse_token_string_t cmd_macsec_sa_key =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 key, NULL);
+
+static void
+cmd_set_macsec_sa_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_sa_result *res = parsed_result;
+	int ret;
+	int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0;
+	uint8_t key[16] = { 0 };
+	uint8_t xdgt0;
+	uint8_t xdgt1;
+	int key_len;
+	int i;
+
+	key_len = strlen(res->key) / 2;
+	if (key_len > 16)
+		key_len = 16;
+
+	for (i = 0; i < key_len; i++) {
+		xdgt0 = parse_and_check_key_hexa_digit(res->key, (i * 2));
+		if (xdgt0 == 0xFF)
+			return;
+		xdgt1 = parse_and_check_key_hexa_digit(res->key, (i * 2) + 1);
+		if (xdgt1 == 0xFF)
+			return;
+		key[i] = (uint8_t) ((xdgt0 * 16) + xdgt1);
+	}
+
+	ret = is_tx ?
+		rte_pmd_ixgbe_macsec_select_txsa(res->port_id,
+			res->idx, res->an, res->pn, key) :
+		rte_pmd_ixgbe_macsec_select_rxsa(res->port_id,
+			res->idx, res->an, res->pn, key);
+	switch (ret) {
+	case 0:
+		break;
+	case -EINVAL:
+		printf("invalid idx %d or an %d\n", res->idx, res->an);
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", res->port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_sa = {
+	.f = cmd_set_macsec_sa_parsed,
+	.data = NULL,
+	.help_str = "set macsec sa tx|rx <port_id> <idx> <an> <pn> <key>",
+	.tokens = {
+		(void *)&cmd_macsec_sa_set,
+		(void *)&cmd_macsec_sa_macsec,
+		(void *)&cmd_macsec_sa_sa,
+		(void *)&cmd_macsec_sa_tx_rx,
+		(void *)&cmd_macsec_sa_port_id,
+		(void *)&cmd_macsec_sa_idx,
+		(void *)&cmd_macsec_sa_an,
+		(void *)&cmd_macsec_sa_pn,
+		(void *)&cmd_macsec_sa_key,
+		NULL,
+	},
+};
 #endif
 
 /* ******************************************************************************** */
@@ -11656,6 +12041,10 @@ cmdline_parse_ctx_t main_ctx[] = {
 	(cmdline_parse_inst_t *)&cmd_set_all_queues_drop_en,
 	(cmdline_parse_inst_t *)&cmd_set_vf_split_drop_en,
 	(cmdline_parse_inst_t *)&cmd_set_vf_mac_addr,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_offload_on,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_offload_off,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_sc,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_sa,
 #endif
 	NULL,
 };
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index d361db1..cf7eab1 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -113,6 +113,8 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
 		ol_flags = PKT_TX_VLAN_PKT;
 	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
 		ol_flags |= PKT_TX_QINQ_PKT;
+	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+		ol_flags |= PKT_TX_MACSEC;
 	for (i = 0; i < nb_rx; i++) {
 		if (likely(i < nb_rx - 1))
 			rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index f996039..3a09351 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -113,6 +113,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
 		ol_flags = PKT_TX_VLAN_PKT;
 	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
 		ol_flags |= PKT_TX_QINQ_PKT;
+	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+		ol_flags |= PKT_TX_MACSEC;
 	for (i = 0; i < nb_rx; i++) {
 		if (likely(i < nb_rx - 1))
 			rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 22ce2d6..0a9a1af 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -143,6 +143,8 @@ struct fwd_stream {
 #define TESTPMD_TX_OFFLOAD_INSERT_VLAN       0x0040
 /** Insert double VLAN header in forward engine */
 #define TESTPMD_TX_OFFLOAD_INSERT_QINQ       0x0080
+/** Offload MACsec in forward engine */
+#define TESTPMD_TX_OFFLOAD_MACSEC            0x0100
 
 /** Descriptor for a single flow. */
 struct port_flow {
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index e996f35..8b1a2af 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -215,6 +215,8 @@ pkt_burst_transmit(struct fwd_stream *fs)
 		ol_flags = PKT_TX_VLAN_PKT;
 	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
 		ol_flags |= PKT_TX_QINQ_PKT;
+	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+		ol_flags |= PKT_TX_MACSEC;
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_mbuf_raw_alloc(mbp);
 		if (pkt == NULL) {
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index cacdef1..f4068d3 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -507,6 +507,38 @@ Set mac antispoof for a VF from the PF::
 
    testpmd> set vf mac antispoof  (port_id) (vf_id) (on|off)
 
+set macsec offload
+~~~~~~~~~~~~~~~~~~
+
+Enable/disable MACsec offload::
+
+   testpmd> set macsec offload (port_id) on encrypt (on|off) replay-protect (on|off)
+   testpmd> set macsec offload (port_id) off
+
+set macsec sc
+~~~~~~~~~~~~~
+
+Configure MACsec secure connection (SC)::
+
+   testpmd> set macsec sc (tx|rx) (port_id) (mac) (pi)
+
+.. note::
+
+   The pi argument is ignored for tx.
+   Check the NIC Datasheet for hardware limits.
+
+set macsec sa
+~~~~~~~~~~~~~
+
+Configure MACsec secure association (SA)::
+
+   testpmd> set macsec sa (tx|rx) (port_id) (idx) (an) (pn) (key)
+
+.. note::
+
+   The IDX value must be 0 or 1.
+   Check the NIC Datasheet for hardware limits.
+
 vlan set strip
 ~~~~~~~~~~~~~~
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v3 6/6] doc: add ixgbe specific APIs
  2016-12-25 14:57   ` [PATCH v3 0/6] Add MACsec offload support for ixgbe Tiwei Bie
                       ` (4 preceding siblings ...)
  2016-12-25 14:57     ` [PATCH v3 5/6] app/testpmd: add MACsec offload commands Tiwei Bie
@ 2016-12-25 14:58     ` Tiwei Bie
  2016-12-26 15:15     ` [PATCH v3 0/6] Add MACsec offload support for ixgbe Adrien Mazarguil
  2016-12-28 15:41     ` [PATCH v4 0/7] " Tiwei Bie
  7 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-25 14:58 UTC (permalink / raw)
  To: dev
  Cc: wenzhuo.lu, wei.dai, xiao.w.wang, olivier.matz, thomas.monjalon,
	konstantin.ananyev, helin.zhang

Add information about the new ixgbe PMD APIs in the release note.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Acked-by: John McNamara <john.mcnamara@intel.com>
---
 doc/guides/rel_notes/release_17_02.rst | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/doc/guides/rel_notes/release_17_02.rst b/doc/guides/rel_notes/release_17_02.rst
index d278b3f..867991d 100644
--- a/doc/guides/rel_notes/release_17_02.rst
+++ b/doc/guides/rel_notes/release_17_02.rst
@@ -52,6 +52,11 @@ New Features
   See the :ref:`Generic flow API <Generic_flow_API>` documentation for more
   information.
 
+* **Added APIs for MACsec offload support to the ixgbe PMD.**
+
+  Six new APIs have been added to the ixgbe PMD for MACsec offload support.
+  The declarations for the APIs can be found in ``rte_pmd_ixgbe.h``.
+
 * **Improved offload support in mbuf.**
 
   Added a new Tx MACsec mbuf flag, set by applications to enable the MACsec
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* Re: [PATCH v3 0/6] Add MACsec offload support for ixgbe
  2016-12-25 14:57   ` [PATCH v3 0/6] Add MACsec offload support for ixgbe Tiwei Bie
                       ` (5 preceding siblings ...)
  2016-12-25 14:58     ` [PATCH v3 6/6] doc: add ixgbe specific APIs Tiwei Bie
@ 2016-12-26 15:15     ` Adrien Mazarguil
  2016-12-27  1:33       ` Tiwei Bie
  2016-12-28 15:41     ` [PATCH v4 0/7] " Tiwei Bie
  7 siblings, 1 reply; 79+ messages in thread
From: Adrien Mazarguil @ 2016-12-26 15:15 UTC (permalink / raw)
  To: Tiwei Bie
  Cc: dev, wenzhuo.lu, wei.dai, xiao.w.wang, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang

Hi Tiwei,

On Sun, Dec 25, 2016 at 10:57:54PM +0800, Tiwei Bie wrote:
> This patch set adds the MACsec offload support for ixgbe.
> The testpmd is also updated to support MACsec cmds.

I'm not commenting on any specific patch from this series, however I'm
noticing this new trend of working around ethdev to add PMD-specific APIs.
I would like to make sure it's not getting out of hand.

To use the "rte_pmd_ixgbe_macsec_*()" API, applications must be linked with
librte_pmd_ixgbe directly and have the related code under #ifdef
RTE_LIBRTE_IXGBE_PMD like testpmd.

Here we can see this ixgbe-specific API affects rte_mbuf.h and rte_ethdev.h
(new PKT_TX_MACSEC, RTE_ETH_EVENT_MACSEC, DEV_RX_OFFLOAD_MACSEC_STRIP and
DEV_TX_OFFLOAD_MACSEC_INSERT flags).

- Shouldn't these flags have "IXGBE" somewhere in their name and/or be
  defined under #ifdef RTE_LIBRTE_IXGBE_PMD? 

- Why can't the MACsec API be defined globally, for instance won't i40e
  implement it as well someday?

- Why bothering with TX/RX offload capabilities if applications know the
  underlying PMD anyway?

Assuming these patches are kept as-is, I suggest we define a reserved space
documented as such for PMD-specific flags wherever they are used.

> v2:
> - Update the documents for testpmd;
> - Update the release notes;
> - Reuse the functions provided by base code;
> 
> v3:
> - Add the missing parts of MACsec mbuf flag and reorganize the patch set;
> - Add an ethdev event type for MACsec;
> - Advertise the MACsec offload capabilities based on the mac type;
> - Minor fixes and improvements;
> 
> Tiwei Bie (6):
>   mbuf: add flag for MACsec
>   ethdev: add event type for MACsec
>   ethdev: add MACsec offload capability flags
>   net/ixgbe: add MACsec offload support
>   app/testpmd: add MACsec offload commands
>   doc: add ixgbe specific APIs
> 
>  app/test-pmd/cmdline.c                      | 389 ++++++++++++++++++++++
>  app/test-pmd/macfwd.c                       |   2 +
>  app/test-pmd/macswap.c                      |   2 +
>  app/test-pmd/testpmd.h                      |   2 +
>  app/test-pmd/txonly.c                       |   2 +
>  doc/guides/rel_notes/release_17_02.rst      |  10 +
>  doc/guides/testpmd_app_ug/testpmd_funcs.rst |  32 ++
>  drivers/net/ixgbe/ixgbe_ethdev.c            | 481 +++++++++++++++++++++++++++-
>  drivers/net/ixgbe/ixgbe_ethdev.h            |  45 +++
>  drivers/net/ixgbe/ixgbe_rxtx.c              |   3 +
>  drivers/net/ixgbe/rte_pmd_ixgbe.h           | 100 ++++++
>  drivers/net/ixgbe/rte_pmd_ixgbe_version.map |  11 +
>  lib/librte_ether/rte_ethdev.h               |   3 +
>  lib/librte_mbuf/rte_mbuf.c                  |   2 +
>  lib/librte_mbuf/rte_mbuf.h                  |   6 +
>  15 files changed, 1085 insertions(+), 5 deletions(-)
> 
> -- 
> 2.7.4
> 

-- 
Adrien Mazarguil
6WIND

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v3 0/6] Add MACsec offload support for ixgbe
  2016-12-26 15:15     ` [PATCH v3 0/6] Add MACsec offload support for ixgbe Adrien Mazarguil
@ 2016-12-27  1:33       ` Tiwei Bie
  2016-12-27 14:37         ` Adrien Mazarguil
  0 siblings, 1 reply; 79+ messages in thread
From: Tiwei Bie @ 2016-12-27  1:33 UTC (permalink / raw)
  To: Adrien Mazarguil
  Cc: dev, wenzhuo.lu, wei.dai, xiao.w.wang, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang

Hi Adrien,

On Mon, Dec 26, 2016 at 04:15:37PM +0100, Adrien Mazarguil wrote:
> Hi Tiwei,
> 
> On Sun, Dec 25, 2016 at 10:57:54PM +0800, Tiwei Bie wrote:
> > This patch set adds the MACsec offload support for ixgbe.
> > The testpmd is also updated to support MACsec cmds.
> 
> I'm not commenting on any specific patch from this series, however I'm
> noticing this new trend of working around ethdev to add PMD-specific APIs.
> I would like to make sure it's not getting out of hand.
> 
> To use the "rte_pmd_ixgbe_macsec_*()" API, applications must be linked with
> librte_pmd_ixgbe directly and have the related code under #ifdef
> RTE_LIBRTE_IXGBE_PMD like testpmd.
> 
> Here we can see this ixgbe-specific API affects rte_mbuf.h and rte_ethdev.h
> (new PKT_TX_MACSEC, RTE_ETH_EVENT_MACSEC, DEV_RX_OFFLOAD_MACSEC_STRIP and
> DEV_TX_OFFLOAD_MACSEC_INSERT flags).
> 
> - Shouldn't these flags have "IXGBE" somewhere in their name and/or be
>   defined under #ifdef RTE_LIBRTE_IXGBE_PMD? 
> 

I think those flags are very generic, simple and innocent, so we could just
define them in the normal way. And currently it seems that we lack a way to
define the PMD-specific flags for mbuf, ethdev, etc.

> - Why can't the MACsec API be defined globally, for instance won't i40e
>   implement it as well someday?
> 

I think currently we prefer to implement some PMD features based on the
PMD-specific APIs at first to avoid the bloating of the ethdev APIs. And
when it proves to be generic (which means many people really need it and
care about it, and more drivers are really going to implement it), then
we make it as the global ethdev API.

> - Why bothering with TX/RX offload capabilities if applications know the
>   underlying PMD anyway?
> 

Not every NIC supported by IXGBE supports the MACsec offload. So even
if the application knows it's using IXGBE PMD, it still needs to check
whether the MACsec offload capabilities are supported.

> Assuming these patches are kept as-is, I suggest we define a reserved space
> documented as such for PMD-specific flags wherever they are used.
> 

It sounds good to me. But it may involve many pieces of the lib, such
as the mbuf ol_flags, ethdev event type, ethdev offload capabilities,
and so on. So maybe it can be done as another work.

Thank you very much for your comments! :-)

Best regards,
Tiwei Bie

> > v2:
> > - Update the documents for testpmd;
> > - Update the release notes;
> > - Reuse the functions provided by base code;
> > 
> > v3:
> > - Add the missing parts of MACsec mbuf flag and reorganize the patch set;
> > - Add an ethdev event type for MACsec;
> > - Advertise the MACsec offload capabilities based on the mac type;
> > - Minor fixes and improvements;
> > 
> > Tiwei Bie (6):
> >   mbuf: add flag for MACsec
> >   ethdev: add event type for MACsec
> >   ethdev: add MACsec offload capability flags
> >   net/ixgbe: add MACsec offload support
> >   app/testpmd: add MACsec offload commands
> >   doc: add ixgbe specific APIs
> > 
> >  app/test-pmd/cmdline.c                      | 389 ++++++++++++++++++++++
> >  app/test-pmd/macfwd.c                       |   2 +
> >  app/test-pmd/macswap.c                      |   2 +
> >  app/test-pmd/testpmd.h                      |   2 +
> >  app/test-pmd/txonly.c                       |   2 +
> >  doc/guides/rel_notes/release_17_02.rst      |  10 +
> >  doc/guides/testpmd_app_ug/testpmd_funcs.rst |  32 ++
> >  drivers/net/ixgbe/ixgbe_ethdev.c            | 481 +++++++++++++++++++++++++++-
> >  drivers/net/ixgbe/ixgbe_ethdev.h            |  45 +++
> >  drivers/net/ixgbe/ixgbe_rxtx.c              |   3 +
> >  drivers/net/ixgbe/rte_pmd_ixgbe.h           | 100 ++++++
> >  drivers/net/ixgbe/rte_pmd_ixgbe_version.map |  11 +
> >  lib/librte_ether/rte_ethdev.h               |   3 +
> >  lib/librte_mbuf/rte_mbuf.c                  |   2 +
> >  lib/librte_mbuf/rte_mbuf.h                  |   6 +
> >  15 files changed, 1085 insertions(+), 5 deletions(-)
> > 
> > -- 
> > 2.7.4
> > 
> 
> -- 
> Adrien Mazarguil
> 6WIND

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v3 0/6] Add MACsec offload support for ixgbe
  2016-12-27  1:33       ` Tiwei Bie
@ 2016-12-27 14:37         ` Adrien Mazarguil
  2016-12-27 17:42           ` Tiwei Bie
  0 siblings, 1 reply; 79+ messages in thread
From: Adrien Mazarguil @ 2016-12-27 14:37 UTC (permalink / raw)
  To: Tiwei Bie
  Cc: dev, wenzhuo.lu, wei.dai, xiao.w.wang, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang

Hi Tiwei,

On Tue, Dec 27, 2016 at 09:33:31AM +0800, Tiwei Bie wrote:
> Hi Adrien,
> 
> On Mon, Dec 26, 2016 at 04:15:37PM +0100, Adrien Mazarguil wrote:
> > Hi Tiwei,
> > 
> > On Sun, Dec 25, 2016 at 10:57:54PM +0800, Tiwei Bie wrote:
> > > This patch set adds the MACsec offload support for ixgbe.
> > > The testpmd is also updated to support MACsec cmds.
> > 
> > I'm not commenting on any specific patch from this series, however I'm
> > noticing this new trend of working around ethdev to add PMD-specific APIs.
> > I would like to make sure it's not getting out of hand.
> > 
> > To use the "rte_pmd_ixgbe_macsec_*()" API, applications must be linked with
> > librte_pmd_ixgbe directly and have the related code under #ifdef
> > RTE_LIBRTE_IXGBE_PMD like testpmd.
> > 
> > Here we can see this ixgbe-specific API affects rte_mbuf.h and rte_ethdev.h
> > (new PKT_TX_MACSEC, RTE_ETH_EVENT_MACSEC, DEV_RX_OFFLOAD_MACSEC_STRIP and
> > DEV_TX_OFFLOAD_MACSEC_INSERT flags).
> > 
> > - Shouldn't these flags have "IXGBE" somewhere in their name and/or be
> >   defined under #ifdef RTE_LIBRTE_IXGBE_PMD? 
> > 
> 
> I think those flags are very generic, simple and innocent, so we could just
> define them in the normal way. And currently it seems that we lack a way to
> define the PMD-specific flags for mbuf, ethdev, etc.

Yes, this is probably the least intrusive approach. I'm just pointing out
that in the meantime, assigned values are not available for other generic
uses which is a problem unless there is a plan to make this API available
globally.

Missing flags and bits could also be defined directly by rte_pmd_ixgbe.h.

> > - Why can't the MACsec API be defined globally, for instance won't i40e
> >   implement it as well someday?
> 
> I think currently we prefer to implement some PMD features based on the
> PMD-specific APIs at first to avoid the bloating of the ethdev APIs. And
> when it proves to be generic (which means many people really need it and
> care about it, and more drivers are really going to implement it), then
> we make it as the global ethdev API.

Understandable, but then unless the global API remains exactly the same
including the "rte_ixgbe_macsec" prefix (which I think won't happen),
applications need to be rewritten, it's not convenient, and as a result may
prevent adoption due to the following cycle:

- Applications wait for the API to evolve before using MACsec.
- DPDK waits for applications to use the ixgbe MACsec API to improve it.

In the end, flags tied to a single PMD remain allocated in the global
namespace while the API is kept in this temporary state forever.

I think doing the extra work to make it global from the start is worth the
trouble. This step could perhaps be made easier if people agreed that
struct eth_dev_ops (and friends) must be opaque to applications.

> > - Why bothering with TX/RX offload capabilities if applications know the
> >   underlying PMD anyway?
> > 
> 
> Not every NIC supported by IXGBE supports the MACsec offload. So even
> if the application knows it's using IXGBE PMD, it still needs to check
> whether the MACsec offload capabilities are supported.

OK, I assumed they all did.

> > Assuming these patches are kept as-is, I suggest we define a reserved space
> > documented as such for PMD-specific flags wherever they are used.
> > 
> 
> It sounds good to me. But it may involve many pieces of the lib, such
> as the mbuf ol_flags, ethdev event type, ethdev offload capabilities,
> and so on. So maybe it can be done as another work.

Right, that was just an idea from the top of my head, we could define
reserved values only when they become necessary for a PMD as in this case,
e.g. instead of defining PKT_TX_MACSEC, one would define PKT_TX_RESERVED_0
which would be interpreted as MACSEC by ixgbe until this API is exposed
globally.

Other PMDs could implement other unrelated PMD-specific APIs through
RESERVED_0 in the meantime, so we preserve the precious space we have
for global APIs (especially inside mbufs).

Thoughts?

Best regards,

-- 
Adrien Mazarguil
6WIND

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v3 0/6] Add MACsec offload support for ixgbe
  2016-12-27 14:37         ` Adrien Mazarguil
@ 2016-12-27 17:42           ` Tiwei Bie
  0 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-27 17:42 UTC (permalink / raw)
  To: Adrien Mazarguil
  Cc: dev, wenzhuo.lu, wei.dai, xiao.w.wang, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang

Hi Adrien,

On Tue, Dec 27, 2016 at 03:37:46PM +0100, Adrien Mazarguil wrote:
> Hi Tiwei,
> 
> On Tue, Dec 27, 2016 at 09:33:31AM +0800, Tiwei Bie wrote:
> > Hi Adrien,
> > 
> > On Mon, Dec 26, 2016 at 04:15:37PM +0100, Adrien Mazarguil wrote:
> > > Hi Tiwei,
> > > 
> > > On Sun, Dec 25, 2016 at 10:57:54PM +0800, Tiwei Bie wrote:
> > > > This patch set adds the MACsec offload support for ixgbe.
> > > > The testpmd is also updated to support MACsec cmds.
[...]
> > > - Why can't the MACsec API be defined globally, for instance won't i40e
> > >   implement it as well someday?
> > 
> > I think currently we prefer to implement some PMD features based on the
> > PMD-specific APIs at first to avoid the bloating of the ethdev APIs. And
> > when it proves to be generic (which means many people really need it and
> > care about it, and more drivers are really going to implement it), then
> > we make it as the global ethdev API.
> 
> Understandable, but then unless the global API remains exactly the same
> including the "rte_ixgbe_macsec" prefix (which I think won't happen),
> applications need to be rewritten, it's not convenient, and as a result may
> prevent adoption due to the following cycle:
> 
> - Applications wait for the API to evolve before using MACsec.
> - DPDK waits for applications to use the ixgbe MACsec API to improve it.
> 

Yeah, I also saw these problems. And I also don't think this is a
perfect way. But it seems that many of us prefer this way, so I just
choose to accept it.

> In the end, flags tied to a single PMD remain allocated in the global
> namespace while the API is kept in this temporary state forever.
> 
> I think doing the extra work to make it global from the start is worth the
> trouble. This step could perhaps be made easier if people agreed that
> struct eth_dev_ops (and friends) must be opaque to applications.
> 
[...]
> 
> > > Assuming these patches are kept as-is, I suggest we define a reserved space
> > > documented as such for PMD-specific flags wherever they are used.
> > > 
> > 
> > It sounds good to me. But it may involve many pieces of the lib, such
> > as the mbuf ol_flags, ethdev event type, ethdev offload capabilities,
> > and so on. So maybe it can be done as another work.
> 
> Right, that was just an idea from the top of my head, we could define
> reserved values only when they become necessary for a PMD as in this case,
> e.g. instead of defining PKT_TX_MACSEC, one would define PKT_TX_RESERVED_0
> which would be interpreted as MACSEC by ixgbe until this API is exposed
> globally.
> 
> Other PMDs could implement other unrelated PMD-specific APIs through
> RESERVED_0 in the meantime, so we preserve the precious space we have
> for global APIs (especially inside mbufs).
> 
> Thoughts?
> 

When a certain PMD implemented several different features based on
the PMD-specific API, and many of them need to define some flags in
mbuf or the like, it still needs to waste many bits (i.e. we'll need
to reserve many bits by that time). But any way, I love your idea!
It's much better than the current approach! I'll update my patch.
Thank you very much! ;-)

Best regards,
Tiwei Bie

^ permalink raw reply	[flat|nested] 79+ messages in thread

* [PATCH v4 0/7] Add MACsec offload support for ixgbe
  2016-12-25 14:57   ` [PATCH v3 0/6] Add MACsec offload support for ixgbe Tiwei Bie
                       ` (6 preceding siblings ...)
  2016-12-26 15:15     ` [PATCH v3 0/6] Add MACsec offload support for ixgbe Adrien Mazarguil
@ 2016-12-28 15:41     ` Tiwei Bie
  2016-12-28 15:41       ` [PATCH v4 1/7] mbuf: reserve a Tx offload flag for PMD-specific API Tiwei Bie
                         ` (8 more replies)
  7 siblings, 9 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-28 15:41 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

This patch set adds the MACsec offload support for ixgbe.
The testpmd is also updated to support MACsec cmds.

v2:
- Update the documents for testpmd;
- Update the release notes;
- Reuse the functions provided by base code;

v3:
- Add the missing parts of MACsec mbuf flag and reorganize the patch set;
- Add an ethdev event type for MACsec;
- Advertise the MACsec offload capabilities based on the mac type;
- Minor fixes and improvements;

v4:
- Reserve bits in mbuf and ethdev for PMD specific API;
- Use the reserved bits in PMD specific API;

Tiwei Bie (7):
  mbuf: reserve a Tx offload flag for PMD-specific API
  ethdev: reserve an event type for PMD-specific API
  ethdev: reserve capability flags for PMD-specific API
  net/ixgbe: add MACsec offload support
  app/testpmd: add MACsec offload commands
  doc: add ixgbe specific APIs
  doc: update the release notes for the reserved flags

 app/test-pmd/cmdline.c                      | 389 ++++++++++++++++++++++
 app/test-pmd/macfwd.c                       |   7 +
 app/test-pmd/macswap.c                      |   7 +
 app/test-pmd/testpmd.h                      |   2 +
 app/test-pmd/txonly.c                       |   7 +
 doc/guides/rel_notes/release_17_02.rst      |  18 ++
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  32 ++
 drivers/net/ixgbe/ixgbe_ethdev.c            | 481 +++++++++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_ethdev.h            |  45 +++
 drivers/net/ixgbe/ixgbe_rxtx.c              |   5 +
 drivers/net/ixgbe/rte_pmd_ixgbe.h           | 122 +++++++
 drivers/net/ixgbe/rte_pmd_ixgbe_version.map |  11 +
 lib/librte_ether/rte_ethdev.h               |   4 +
 lib/librte_mbuf/rte_mbuf.c                  |   2 +
 lib/librte_mbuf/rte_mbuf.h                  |   5 +
 15 files changed, 1132 insertions(+), 5 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 79+ messages in thread

* [PATCH v4 1/7] mbuf: reserve a Tx offload flag for PMD-specific API
  2016-12-28 15:41     ` [PATCH v4 0/7] " Tiwei Bie
@ 2016-12-28 15:41       ` Tiwei Bie
  2016-12-28 15:41       ` [PATCH v4 2/7] ethdev: reserve an event type " Tiwei Bie
                         ` (7 subsequent siblings)
  8 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-28 15:41 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

Reserve a Tx offload flag in mbuf, that can be used by PMD to define
its own Tx offload flag when implementing the PMD-specific API.

Suggested-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 lib/librte_mbuf/rte_mbuf.c | 2 ++
 lib/librte_mbuf/rte_mbuf.h | 5 +++++
 2 files changed, 7 insertions(+)

diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c
index 63f43c8..15c4f68 100644
--- a/lib/librte_mbuf/rte_mbuf.c
+++ b/lib/librte_mbuf/rte_mbuf.c
@@ -404,6 +404,7 @@ const char *rte_get_tx_ol_flag_name(uint64_t mask)
 	case PKT_TX_TUNNEL_GRE: return "PKT_TX_TUNNEL_GRE";
 	case PKT_TX_TUNNEL_IPIP: return "PKT_TX_TUNNEL_IPIP";
 	case PKT_TX_TUNNEL_GENEVE: return "PKT_TX_TUNNEL_GENEVE";
+	case PKT_TX_RESERVED_0: return "PKT_TX_RESERVED_0";
 	default: return NULL;
 	}
 }
@@ -434,6 +435,7 @@ rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
 		  "PKT_TX_TUNNEL_NONE" },
 		{ PKT_TX_TUNNEL_GENEVE, PKT_TX_TUNNEL_MASK,
 		  "PKT_TX_TUNNEL_NONE" },
+		{ PKT_TX_RESERVED_0, PKT_TX_RESERVED_0, NULL },
 	};
 	const char *name;
 	unsigned int i;
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index ead7c6e..6168a6d 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -182,6 +182,11 @@ extern "C" {
 /* add new TX flags here */
 
 /**
+ * Reserved Tx offload flag for PMD-specific API.
+ */
+#define PKT_TX_RESERVED_0     (0x1ULL << 44)
+
+/**
  * Bits 45:48 used for the tunnel type.
  * When doing Tx offload like TSO or checksum, the HW needs to configure the
  * tunnel type into the HW descriptors.
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v4 2/7] ethdev: reserve an event type for PMD-specific API
  2016-12-28 15:41     ` [PATCH v4 0/7] " Tiwei Bie
  2016-12-28 15:41       ` [PATCH v4 1/7] mbuf: reserve a Tx offload flag for PMD-specific API Tiwei Bie
@ 2016-12-28 15:41       ` Tiwei Bie
  2016-12-28 15:41       ` [PATCH v4 3/7] ethdev: reserve capability flags " Tiwei Bie
                         ` (6 subsequent siblings)
  8 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-28 15:41 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

Reserve an event type, that can be used by PMD to define its own
event type when implementing the PMD-specific API.

Suggested-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 lib/librte_ether/rte_ethdev.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index fb51754..d465825 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -3044,6 +3044,8 @@ enum rte_eth_event_type {
 	RTE_ETH_EVENT_INTR_RESET,
 			/**< reset interrupt event, sent to VF on PF reset */
 	RTE_ETH_EVENT_VF_MBOX,  /**< message from the VF received by PF */
+	RTE_ETH_EVENT_RESERVED_0,
+			/**< reserved event type for PMD-specific API */
 	RTE_ETH_EVENT_MAX       /**< max value of this enum */
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v4 3/7] ethdev: reserve capability flags for PMD-specific API
  2016-12-28 15:41     ` [PATCH v4 0/7] " Tiwei Bie
  2016-12-28 15:41       ` [PATCH v4 1/7] mbuf: reserve a Tx offload flag for PMD-specific API Tiwei Bie
  2016-12-28 15:41       ` [PATCH v4 2/7] ethdev: reserve an event type " Tiwei Bie
@ 2016-12-28 15:41       ` Tiwei Bie
  2016-12-28 15:41       ` [PATCH v4 4/7] net/ixgbe: add MACsec offload support Tiwei Bie
                         ` (5 subsequent siblings)
  8 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-28 15:41 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

Reserve a Tx capability flag and a Rx capability flag, that can be
used by PMD to define its own capability flags when implementing the
PMD-specific API.

Suggested-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 lib/librte_ether/rte_ethdev.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index d465825..8800b39 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -857,6 +857,7 @@ struct rte_eth_conf {
 #define DEV_RX_OFFLOAD_TCP_LRO     0x00000010
 #define DEV_RX_OFFLOAD_QINQ_STRIP  0x00000020
 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
+#define DEV_RX_OFFLOAD_RESERVED_0  0x00000080 /**< Used for PMD-specific API. */
 
 /**
  * TX offload capabilities of a device.
@@ -874,6 +875,7 @@ struct rte_eth_conf {
 #define DEV_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_RESERVED_0  0x00002000 /**< Used for PMD-specific API. */
 
 /**
  * Ethernet device information
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v4 4/7] net/ixgbe: add MACsec offload support
  2016-12-28 15:41     ` [PATCH v4 0/7] " Tiwei Bie
                         ` (2 preceding siblings ...)
  2016-12-28 15:41       ` [PATCH v4 3/7] ethdev: reserve capability flags " Tiwei Bie
@ 2016-12-28 15:41       ` Tiwei Bie
  2016-12-28 15:41       ` [PATCH v4 5/7] app/testpmd: add MACsec offload commands Tiwei Bie
                         ` (4 subsequent siblings)
  8 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-28 15:41 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

MACsec (or LinkSec, 802.1AE) is a MAC level encryption/authentication
scheme defined in IEEE 802.1AE that uses symmetric cryptography.
This commit adds the MACsec offload support for ixgbe.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c            | 481 +++++++++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_ethdev.h            |  45 +++
 drivers/net/ixgbe/ixgbe_rxtx.c              |   5 +
 drivers/net/ixgbe/rte_pmd_ixgbe.h           | 122 +++++++
 drivers/net/ixgbe/rte_pmd_ixgbe_version.map |  11 +
 5 files changed, 659 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index ec2edad..653ddd2 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -231,6 +231,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 			uint16_t reta_size);
 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
@@ -745,6 +746,51 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
 			   sizeof(rte_ixgbe_stats_strings[0]))
 
+/* MACsec statistics */
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
+	{"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+		out_pkts_untagged)},
+	{"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
+		out_pkts_encrypted)},
+	{"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
+		out_pkts_protected)},
+	{"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
+		out_octets_encrypted)},
+	{"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
+		out_octets_protected)},
+	{"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_untagged)},
+	{"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_badtag)},
+	{"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_nosci)},
+	{"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_unknownsci)},
+	{"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
+		in_octets_decrypted)},
+	{"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
+		in_octets_validated)},
+	{"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_unchecked)},
+	{"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_delayed)},
+	{"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_late)},
+	{"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_ok)},
+	{"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_invalid)},
+	{"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_notvalid)},
+	{"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_unusedsa)},
+	{"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_notusingsa)},
+};
+
+#define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
+			   sizeof(rte_ixgbe_macsec_strings[0]))
+
 /* Per-queue statistics */
 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
 	{"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
@@ -2367,6 +2413,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 		/* check if lsc interrupt is enabled */
 		if (dev->data->dev_conf.intr_conf.lsc != 0)
 			ixgbe_dev_lsc_interrupt_setup(dev);
+		ixgbe_dev_macsec_interrupt_setup(dev);
 	} else {
 		rte_intr_callback_unregister(intr_handle,
 					     ixgbe_dev_interrupt_handler, dev);
@@ -2557,6 +2604,7 @@ ixgbe_dev_close(struct rte_eth_dev *dev)
 static void
 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
 			   struct ixgbe_hw_stats *hw_stats,
+			   struct ixgbe_macsec_stats *macsec_stats,
 			   uint64_t *total_missed_rx, uint64_t *total_qbrc,
 			   uint64_t *total_qprc, uint64_t *total_qprdc)
 {
@@ -2726,6 +2774,40 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
 	/* Flow Director Stats registers */
 	hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
 	hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+
+	/* MACsec Stats registers */
+	macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
+	macsec_stats->out_pkts_encrypted +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
+	macsec_stats->out_pkts_protected +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
+	macsec_stats->out_octets_encrypted +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
+	macsec_stats->out_octets_protected +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
+	macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
+	macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
+	macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
+	macsec_stats->in_pkts_unknownsci +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
+	macsec_stats->in_octets_decrypted +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
+	macsec_stats->in_octets_validated +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
+	macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
+	macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
+	macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
+	for (i = 0; i < 2; i++) {
+		macsec_stats->in_pkts_ok +=
+			IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
+		macsec_stats->in_pkts_invalid +=
+			IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
+		macsec_stats->in_pkts_notvalid +=
+			IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
+	}
+	macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
+	macsec_stats->in_pkts_notusingsa +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
 }
 
 /*
@@ -2738,6 +2820,9 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_hw_stats *hw_stats =
 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	struct ixgbe_macsec_stats *macsec_stats =
+			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+				dev->data->dev_private);
 	uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
 	unsigned i;
 
@@ -2746,8 +2831,8 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 	total_qprc = 0;
 	total_qprdc = 0;
 
-	ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
-			&total_qprc, &total_qprdc);
+	ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+			&total_qbrc, &total_qprc, &total_qprdc);
 
 	if (stats == NULL)
 		return;
@@ -2799,7 +2884,7 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
 /* This function calculates the number of xstats based on the current config */
 static unsigned
 ixgbe_xstats_calc_num(void) {
-	return IXGBE_NB_HW_STATS +
+	return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
 		(IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
 		(IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
 }
@@ -2826,6 +2911,15 @@ static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 			count++;
 		}
 
+		/* MACsec Stats */
+		for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+			snprintf(xstats_names[count].name,
+				sizeof(xstats_names[count].name),
+				"%s",
+				rte_ixgbe_macsec_strings[i].name);
+			count++;
+		}
+
 		/* RX Priority Stats */
 		for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
 			for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
@@ -2875,6 +2969,9 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_hw_stats *hw_stats =
 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	struct ixgbe_macsec_stats *macsec_stats =
+			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+				dev->data->dev_private);
 	uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
 	unsigned i, stat, count = 0;
 
@@ -2888,8 +2985,8 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 	total_qprc = 0;
 	total_qprdc = 0;
 
-	ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
-				   &total_qprc, &total_qprdc);
+	ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+			&total_qbrc, &total_qprc, &total_qprdc);
 
 	/* If this is a reset xstats is NULL, and we have cleared the
 	 * registers by reading them.
@@ -2905,6 +3002,13 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		count++;
 	}
 
+	/* MACsec Stats */
+	for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+		xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
+				rte_ixgbe_macsec_strings[i].offset);
+		count++;
+	}
+
 	/* RX Priority Stats */
 	for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
 		for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
@@ -2932,6 +3036,9 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw_stats *stats =
 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	struct ixgbe_macsec_stats *macsec_stats =
+			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+				dev->data->dev_private);
 
 	unsigned count = ixgbe_xstats_calc_num();
 
@@ -2940,6 +3047,7 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
 
 	/* Reset software totals */
 	memset(stats, 0, sizeof(*stats));
+	memset(macsec_stats, 0, sizeof(*macsec_stats));
 }
 
 static void
@@ -3072,6 +3180,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	    !RTE_ETH_DEV_SRIOV(dev).active)
 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
 
+	if (hw->mac.type == ixgbe_mac_82599EB ||
+	    hw->mac.type == ixgbe_mac_X540)
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IXGBE_MACSEC_STRIP;
+
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
@@ -3085,6 +3197,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		DEV_TX_OFFLOAD_SCTP_CKSUM  |
 		DEV_TX_OFFLOAD_TCP_TSO;
 
+	if (hw->mac.type == ixgbe_mac_82599EB ||
+	    hw->mac.type == ixgbe_mac_X540)
+		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IXGBE_MACSEC_INSERT;
+
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
@@ -3382,6 +3498,28 @@ ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
 	return 0;
 }
 
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
+{
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+	intr->mask |= IXGBE_EICR_LINKSEC;
+
+	return 0;
+}
+
 /*
  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
  *
@@ -3416,6 +3554,9 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
 	if (eicr & IXGBE_EICR_MAILBOX)
 		intr->flags |= IXGBE_FLAG_MAILBOX;
 
+	if (eicr & IXGBE_EICR_LINKSEC)
+		intr->flags |= IXGBE_FLAG_MACSEC;
+
 	if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
 	    hw->phy.type == ixgbe_phy_x550em_ext_t &&
 	    (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
@@ -3570,6 +3711,12 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
 		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
 	}
 
+	if (intr->flags & IXGBE_FLAG_MACSEC) {
+		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_IXGBE_MACSEC,
+					      NULL);
+		intr->flags &= ~IXGBE_FLAG_MACSEC;
+	}
+
 	PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
 	ixgbe_enable_intr(dev);
 	rte_intr_enable(intr_handle);
@@ -7610,6 +7757,330 @@ ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
 	ixgbevf_dev_interrupt_action(dev);
 }
 
+/**
+ *  ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Stops the transmit data path and waits for the HW to internally empty
+ *  the Tx security block
+ **/
+int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
+{
+#define IXGBE_MAX_SECTX_POLL 40
+
+	int i;
+	int sectxreg;
+
+	sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
+	for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
+		sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
+		if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
+			break;
+		/* Use interrupt-safe sleep just in case */
+		usec_delay(1000);
+	}
+
+	/* For informational purposes only */
+	if (i >= IXGBE_MAX_SECTX_POLL)
+		PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
+			 "path fully disabled.  Continuing with init.\n");
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Enables the transmit data path.
+ **/
+int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
+{
+	uint32_t sectxreg;
+
+	sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return IXGBE_SUCCESS;
+}
+
+int
+rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Stop the data paths */
+	if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+		return -ENOTSUP;
+	/*
+	 * Workaround:
+	 * As no ixgbe_disable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 * The hardware support has been checked by
+	 * ixgbe_disable_sec_rx_path().
+	 */
+	ixgbe_disable_sec_tx_path_generic(hw);
+
+	/* Enable Ethernet CRC (required by MACsec offload) */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+	ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
+	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
+
+	/* Enable the TX and RX crypto engines */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+	ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
+	ctrl |= 0x3;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
+
+	/* Enable SA lookup */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+	ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+	ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
+		     IXGBE_LSECTXCTRL_AUTH;
+	ctrl |= IXGBE_LSECTXCTRL_AISCI;
+	ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+	ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+	ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
+	ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
+	if (rp)
+		ctrl |= IXGBE_LSECRXCTRL_RP;
+	else
+		ctrl &= ~IXGBE_LSECRXCTRL_RP;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+	/* Start the data paths */
+	ixgbe_enable_sec_rx_path(hw);
+	/*
+	 * Workaround:
+	 * As no ixgbe_enable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 */
+	ixgbe_enable_sec_tx_path_generic(hw);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_disable(uint8_t port)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Stop the data paths */
+	if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+		return -ENOTSUP;
+	/*
+	 * Workaround:
+	 * As no ixgbe_disable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 * The hardware support has been checked by
+	 * ixgbe_disable_sec_rx_path().
+	 */
+	ixgbe_disable_sec_tx_path_generic(hw);
+
+	/* Disable the TX and RX crypto engines */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+	/* Disable SA lookup */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+	ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECTXCTRL_DISABLE;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+	ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+	/* Start the data paths */
+	ixgbe_enable_sec_rx_path(hw);
+	/*
+	 * Workaround:
+	 * As no ixgbe_enable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 */
+	ixgbe_enable_sec_tx_path_generic(hw);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
+
+	ctrl = mac[4] | (mac[5] << 8);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
+
+	pi = rte_cpu_to_be_16(pi);
+	ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+				 uint32_t pn, uint8_t *key)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl, i;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (idx != 0 && idx != 1)
+		return -EINVAL;
+
+	if (an >= 4)
+		return -EINVAL;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Set the PN and key */
+	pn = rte_cpu_to_be_32(pn);
+	if (idx == 0) {
+		IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
+
+		for (i = 0; i < 4; i++) {
+			ctrl = (key[i * 4 + 0] <<  0) |
+			       (key[i * 4 + 1] <<  8) |
+			       (key[i * 4 + 2] << 16) |
+			       (key[i * 4 + 3] << 24);
+			IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
+		}
+	} else {
+		IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
+
+		for (i = 0; i < 4; i++) {
+			ctrl = (key[i * 4 + 0] <<  0) |
+			       (key[i * 4 + 1] <<  8) |
+			       (key[i * 4 + 2] << 16) |
+			       (key[i * 4 + 3] << 24);
+			IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
+		}
+	}
+
+	/* Set AN and select the SA */
+	ctrl = (an << idx * 2) | (idx << 4);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+				 uint32_t pn, uint8_t *key)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl, i;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (idx != 0 && idx != 1)
+		return -EINVAL;
+
+	if (an >= 4)
+		return -EINVAL;
+
+	/* Set the PN */
+	pn = rte_cpu_to_be_32(pn);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
+
+	/* Set the key */
+	for (i = 0; i < 4; i++) {
+		ctrl = (key[i * 4 + 0] <<  0) |
+		       (key[i * 4 + 1] <<  8) |
+		       (key[i * 4 + 2] << 16) |
+		       (key[i * 4 + 3] << 24);
+		IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
+	}
+
+	/* Set the AN and validate the SA */
+	ctrl = an | (1 << 2);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
+
+	return 0;
+}
+
 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 80350c2..ffced1c 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -43,6 +43,7 @@
 #define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
 #define IXGBE_FLAG_MAILBOX          (uint32_t)(1 << 1)
 #define IXGBE_FLAG_PHY_INTERRUPT    (uint32_t)(1 << 2)
+#define IXGBE_FLAG_MACSEC           (uint32_t)(1 << 3)
 
 /*
  * Defines that were not part of ixgbe_type.h as they are not used by the
@@ -130,6 +131,10 @@
 #define IXGBE_MISC_VEC_ID               RTE_INTR_VEC_ZERO_OFFSET
 #define IXGBE_RX_VEC_START              RTE_INTR_VEC_RXTX_OFFSET
 
+#define IXGBE_SECTX_MINSECIFG_MASK      0x0000000F
+
+#define IXGBE_MACSEC_PNTHRSH            0xFFFFFE00
+
 /*
  * Information about the fdir mode.
  */
@@ -265,11 +270,44 @@ struct ixgbe_filter_info {
 };
 
 /*
+ * Statistics counters collected by the MACsec
+ */
+struct ixgbe_macsec_stats {
+	/* TX port statistics */
+	uint64_t out_pkts_untagged;
+	uint64_t out_pkts_encrypted;
+	uint64_t out_pkts_protected;
+	uint64_t out_octets_encrypted;
+	uint64_t out_octets_protected;
+
+	/* RX port statistics */
+	uint64_t in_pkts_untagged;
+	uint64_t in_pkts_badtag;
+	uint64_t in_pkts_nosci;
+	uint64_t in_pkts_unknownsci;
+	uint64_t in_octets_decrypted;
+	uint64_t in_octets_validated;
+
+	/* RX SC statistics */
+	uint64_t in_pkts_unchecked;
+	uint64_t in_pkts_delayed;
+	uint64_t in_pkts_late;
+
+	/* RX SA statistics */
+	uint64_t in_pkts_ok;
+	uint64_t in_pkts_invalid;
+	uint64_t in_pkts_notvalid;
+	uint64_t in_pkts_unusedsa;
+	uint64_t in_pkts_notusingsa;
+};
+
+/*
  * Structure to store private data for each driver instance (for each port).
  */
 struct ixgbe_adapter {
 	struct ixgbe_hw             hw;
 	struct ixgbe_hw_stats       stats;
+	struct ixgbe_macsec_stats   macsec_stats;
 	struct ixgbe_hw_fdir_info   fdir;
 	struct ixgbe_interrupt      intr;
 	struct ixgbe_stat_mapping_registers stat_mappings;
@@ -300,6 +338,9 @@ struct ixgbe_adapter {
 #define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \
 	(&((struct ixgbe_adapter *)adapter)->stats)
 
+#define IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(adapter) \
+	(&((struct ixgbe_adapter *)adapter)->macsec_stats)
+
 #define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \
 	(&((struct ixgbe_adapter *)adapter)->intr)
 
@@ -445,4 +486,8 @@ uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
 
 int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
 			enum rte_filter_op filter_op, void *arg);
+
+int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw);
+
+int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw);
 #endif /* _IXGBE_ETHDEV_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index b2d9f45..db9fc18 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -79,12 +79,15 @@
 #include "base/ixgbe_common.h"
 #include "ixgbe_rxtx.h"
 
+#include "rte_pmd_ixgbe.h"
+
 /* Bit Mask to indicate what bits required for building TX context */
 #define IXGBE_TX_OFFLOAD_MASK (			 \
 		PKT_TX_VLAN_PKT |		 \
 		PKT_TX_IP_CKSUM |		 \
 		PKT_TX_L4_MASK |		 \
 		PKT_TX_TCP_SEG |		 \
+		PKT_TX_IXGBE_MACSEC |		 \
 		PKT_TX_OUTER_IP_CKSUM)
 
 #if 1
@@ -519,6 +522,8 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
 		cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
 	if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
 		cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
+	if (ol_flags & PKT_TX_IXGBE_MACSEC)
+		cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
 	return cmdtype;
 }
 
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index c2fb826..13b6a44 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -42,6 +42,28 @@
 #include <rte_ethdev.h>
 
 /**
+ * If these flags are advertised by the PMD, the NIC supports the MACsec
+ * offload. The incoming MACsec traffics can be offloaded transparently
+ * after the MACsec offload is configured correctly by the application.
+ * And the application can set the PKT_TX_IXGBE_MACSEC flag in mbufs to
+ * enable the MACsec offload for the packets to be transmitted.
+ */
+#define DEV_RX_OFFLOAD_IXGBE_MACSEC_STRIP	DEV_RX_OFFLOAD_RESERVED_0
+#define DEV_TX_OFFLOAD_IXGBE_MACSEC_INSERT	DEV_TX_OFFLOAD_RESERVED_0
+
+/**
+ * This event will occur when the PN counter in a MACsec connection
+ * reach the exhaustion threshold.
+ */
+#define RTE_ETH_EVENT_IXGBE_MACSEC		RTE_ETH_EVENT_RESERVED_0
+
+/**
+ * Offload the MACsec. This flag must be set by the application in mbuf
+ * to enable this offload feature for a packet to be transmitted.
+ */
+#define PKT_TX_IXGBE_MACSEC			PKT_TX_RESERVED_0
+
+/**
  * Set the VF MAC address.
  *
  * @param port
@@ -183,6 +205,106 @@ int
 rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
 
 /**
+ * Enable MACsec offloading.
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param en
+ *    1 - Enable encryption (encrypt and add integrity signature).
+ *    0 - Disable encryption (only add integrity signature).
+ * @param rp
+ *    1 - Enable replay protection.
+ *    0 - Disable replay protection.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp);
+
+/**
+ * Disable MACsec offloading.
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_disable(uint8_t port);
+
+/**
+ * Configure TX SC (Secure Connection)
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param mac
+ *   The MAC address on the local side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ */
+int rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac);
+
+/**
+ * Configure RX SC (Secure Connection)
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param mac
+ *   The MAC address on the remote side.
+ * @param pi
+ *   The PI (port identifier) on the remote side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ */
+int rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi);
+
+/**
+ * Enable TX SA (Secure Association)
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param idx
+ *   The SA to be enabled (0 or 1).
+ * @param an
+ *   The association number on the local side.
+ * @param pn
+ *   The packet number on the local side.
+ * @param key
+ *   The key on the local side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+		uint32_t pn, uint8_t *key);
+
+/**
+ * Enable RX SA (Secure Association)
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param idx
+ *   The SA to be enabled (0 or 1)
+ * @param an
+ *   The association number on the remote side.
+ * @param pn
+ *   The packet number on the remote side.
+ * @param key
+ *   The key on the remote side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+		uint32_t pn, uint8_t *key);
+
+/**
  * Response sent back to ixgbe driver from user app after callback
  */
 enum rte_pmd_ixgbe_mb_event_rsp {
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
index 92434f3..6d68934 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
@@ -15,3 +15,14 @@ DPDK_16.11 {
 	rte_pmd_ixgbe_set_vf_vlan_insert;
 	rte_pmd_ixgbe_set_vf_vlan_stripq;
 } DPDK_2.0;
+
+DPDK_17.02 {
+	global:
+
+	rte_pmd_ixgbe_macsec_enable;
+	rte_pmd_ixgbe_macsec_disable;
+	rte_pmd_ixgbe_macsec_config_txsc;
+	rte_pmd_ixgbe_macsec_config_rxsc;
+	rte_pmd_ixgbe_macsec_select_txsa;
+	rte_pmd_ixgbe_macsec_select_rxsa;
+} DPDK_16.11;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v4 5/7] app/testpmd: add MACsec offload commands
  2016-12-28 15:41     ` [PATCH v4 0/7] " Tiwei Bie
                         ` (3 preceding siblings ...)
  2016-12-28 15:41       ` [PATCH v4 4/7] net/ixgbe: add MACsec offload support Tiwei Bie
@ 2016-12-28 15:41       ` Tiwei Bie
  2016-12-28 15:41       ` [PATCH v4 6/7] doc: add ixgbe specific APIs Tiwei Bie
                         ` (3 subsequent siblings)
  8 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-28 15:41 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

Below MACsec offload commands are added:

- set macsec offload <port_id> on encrypt on|off replay-protect on|off
- set macsec offload <port_id> off
- set macsec sc tx|rx <port_id> <mac> <pi>
- set macsec sa tx|rx <port_id> <idx> <an> <pn> <key>

Also update the testpmd user guide.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 app/test-pmd/cmdline.c                      | 389 ++++++++++++++++++++++++++++
 app/test-pmd/macfwd.c                       |   7 +
 app/test-pmd/macswap.c                      |   7 +
 app/test-pmd/testpmd.h                      |   2 +
 app/test-pmd/txonly.c                       |   7 +
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  32 +++
 6 files changed, 444 insertions(+)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index f768b6b..4a67894 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -275,6 +275,18 @@ static void cmd_help_long_parsed(void *parsed_result,
 
 			"set vf mac antispoof (port_id) (vf_id) (on|off).\n"
 			"    Set MAC antispoof for a VF from the PF.\n\n"
+
+			"set macsec offload (port_id) on encrypt (on|off) replay-protect (on|off)\n"
+			"    Enable MACsec offload.\n\n"
+
+			"set macsec offload (port_id) off\n"
+			"    Disable MACsec offload.\n\n"
+
+			"set macsec sc (tx|rx) (port_id) (mac) (pi)\n"
+			"    Configure MACsec secure connection (SC).\n\n"
+
+			"set macsec sa (tx|rx) (port_id) (idx) (an) (pn) (key)\n"
+			"    Configure MACsec secure association (SA).\n\n"
 #endif
 
 			"vlan set strip (on|off) (port_id)\n"
@@ -11488,6 +11500,379 @@ cmdline_parse_inst_t cmd_set_vf_mac_addr = {
 		NULL,
 	},
 };
+
+/* MACsec configuration */
+
+/* Common result structure for MACsec offload enable */
+struct cmd_macsec_offload_on_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t offload;
+	uint8_t port_id;
+	cmdline_fixed_string_t on;
+	cmdline_fixed_string_t encrypt;
+	cmdline_fixed_string_t en_on_off;
+	cmdline_fixed_string_t replay_protect;
+	cmdline_fixed_string_t rp_on_off;
+};
+
+/* Common CLI fields for MACsec offload disable */
+cmdline_parse_token_string_t cmd_macsec_offload_on_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_offload_on_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_offload_on_offload =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 offload, "offload");
+cmdline_parse_token_string_t cmd_macsec_offload_on_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 port_id, UINT8);
+cmdline_parse_token_string_t cmd_macsec_offload_on_on =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 on, "on");
+cmdline_parse_token_string_t cmd_macsec_offload_on_encrypt =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 encrypt, "encrypt");
+cmdline_parse_token_string_t cmd_macsec_offload_on_en_on_off =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 en_on_off, "on#off");
+cmdline_parse_token_string_t cmd_macsec_offload_on_replay_protect =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 replay_protect, "replay-protect");
+cmdline_parse_token_string_t cmd_macsec_offload_on_rp_on_off =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 rp_on_off, "on#off");
+
+static void
+cmd_set_macsec_offload_on_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_offload_on_result *res = parsed_result;
+	int ret;
+	portid_t port_id = res->port_id;
+	int en = (strcmp(res->en_on_off, "on") == 0) ? 1 : 0;
+	int rp = (strcmp(res->rp_on_off, "on") == 0) ? 1 : 0;
+
+	if (port_id_is_invalid(port_id, ENABLED_WARN))
+		return;
+
+	ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_MACSEC;
+	ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
+
+	switch (ret) {
+	case 0:
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_offload_on = {
+	.f = cmd_set_macsec_offload_on_parsed,
+	.data = NULL,
+	.help_str = "set macsec offload <port_id> on "
+		"encrypt on|off replay-protect on|off",
+	.tokens = {
+		(void *)&cmd_macsec_offload_on_set,
+		(void *)&cmd_macsec_offload_on_macsec,
+		(void *)&cmd_macsec_offload_on_offload,
+		(void *)&cmd_macsec_offload_on_port_id,
+		(void *)&cmd_macsec_offload_on_on,
+		(void *)&cmd_macsec_offload_on_encrypt,
+		(void *)&cmd_macsec_offload_on_en_on_off,
+		(void *)&cmd_macsec_offload_on_replay_protect,
+		(void *)&cmd_macsec_offload_on_rp_on_off,
+		NULL,
+	},
+};
+
+/* Common result structure for MACsec offload disable */
+struct cmd_macsec_offload_off_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t offload;
+	uint8_t port_id;
+	cmdline_fixed_string_t off;
+};
+
+/* Common CLI fields for MACsec offload disable */
+cmdline_parse_token_string_t cmd_macsec_offload_off_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_offload_off_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_offload_off_offload =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 offload, "offload");
+cmdline_parse_token_string_t cmd_macsec_offload_off_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 port_id, UINT8);
+cmdline_parse_token_string_t cmd_macsec_offload_off_off =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 off, "off");
+
+static void
+cmd_set_macsec_offload_off_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_offload_off_result *res = parsed_result;
+	int ret;
+	portid_t port_id = res->port_id;
+
+	if (port_id_is_invalid(port_id, ENABLED_WARN))
+		return;
+
+	ports[port_id].tx_ol_flags &= ~TESTPMD_TX_OFFLOAD_MACSEC;
+	ret = rte_pmd_ixgbe_macsec_disable(port_id);
+
+	switch (ret) {
+	case 0:
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_offload_off = {
+	.f = cmd_set_macsec_offload_off_parsed,
+	.data = NULL,
+	.help_str = "set macsec offload <port_id> off",
+	.tokens = {
+		(void *)&cmd_macsec_offload_off_set,
+		(void *)&cmd_macsec_offload_off_macsec,
+		(void *)&cmd_macsec_offload_off_offload,
+		(void *)&cmd_macsec_offload_off_port_id,
+		(void *)&cmd_macsec_offload_off_off,
+		NULL,
+	},
+};
+
+/* Common result structure for MACsec secure connection configure */
+struct cmd_macsec_sc_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t sc;
+	cmdline_fixed_string_t tx_rx;
+	uint8_t port_id;
+	struct ether_addr mac;
+	uint16_t pi;
+};
+
+/* Common CLI fields for MACsec secure connection configure */
+cmdline_parse_token_string_t cmd_macsec_sc_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_sc_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_sc_sc =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 sc, "sc");
+cmdline_parse_token_string_t cmd_macsec_sc_tx_rx =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 tx_rx, "tx#rx");
+cmdline_parse_token_num_t cmd_macsec_sc_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 port_id, UINT8);
+cmdline_parse_token_etheraddr_t cmd_macsec_sc_mac =
+	TOKEN_ETHERADDR_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 mac);
+cmdline_parse_token_num_t cmd_macsec_sc_pi =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 pi, UINT16);
+
+static void
+cmd_set_macsec_sc_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_sc_result *res = parsed_result;
+	int ret;
+	int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0;
+
+	ret = is_tx ?
+		rte_pmd_ixgbe_macsec_config_txsc(res->port_id,
+				res->mac.addr_bytes) :
+		rte_pmd_ixgbe_macsec_config_rxsc(res->port_id,
+				res->mac.addr_bytes, res->pi);
+	switch (ret) {
+	case 0:
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", res->port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_sc = {
+	.f = cmd_set_macsec_sc_parsed,
+	.data = NULL,
+	.help_str = "set macsec sc tx|rx <port_id> <mac> <pi>",
+	.tokens = {
+		(void *)&cmd_macsec_sc_set,
+		(void *)&cmd_macsec_sc_macsec,
+		(void *)&cmd_macsec_sc_sc,
+		(void *)&cmd_macsec_sc_tx_rx,
+		(void *)&cmd_macsec_sc_port_id,
+		(void *)&cmd_macsec_sc_mac,
+		(void *)&cmd_macsec_sc_pi,
+		NULL,
+	},
+};
+
+/* Common result structure for MACsec secure connection configure */
+struct cmd_macsec_sa_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t sa;
+	cmdline_fixed_string_t tx_rx;
+	uint8_t port_id;
+	uint8_t idx;
+	uint8_t an;
+	uint32_t pn;
+	cmdline_fixed_string_t key;
+};
+
+/* Common CLI fields for MACsec secure connection configure */
+cmdline_parse_token_string_t cmd_macsec_sa_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_sa_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_sa_sa =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 sa, "sa");
+cmdline_parse_token_string_t cmd_macsec_sa_tx_rx =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 tx_rx, "tx#rx");
+cmdline_parse_token_num_t cmd_macsec_sa_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 port_id, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_idx =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 idx, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_an =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 an, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_pn =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 pn, UINT32);
+cmdline_parse_token_string_t cmd_macsec_sa_key =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 key, NULL);
+
+static void
+cmd_set_macsec_sa_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_sa_result *res = parsed_result;
+	int ret;
+	int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0;
+	uint8_t key[16] = { 0 };
+	uint8_t xdgt0;
+	uint8_t xdgt1;
+	int key_len;
+	int i;
+
+	key_len = strlen(res->key) / 2;
+	if (key_len > 16)
+		key_len = 16;
+
+	for (i = 0; i < key_len; i++) {
+		xdgt0 = parse_and_check_key_hexa_digit(res->key, (i * 2));
+		if (xdgt0 == 0xFF)
+			return;
+		xdgt1 = parse_and_check_key_hexa_digit(res->key, (i * 2) + 1);
+		if (xdgt1 == 0xFF)
+			return;
+		key[i] = (uint8_t) ((xdgt0 * 16) + xdgt1);
+	}
+
+	ret = is_tx ?
+		rte_pmd_ixgbe_macsec_select_txsa(res->port_id,
+			res->idx, res->an, res->pn, key) :
+		rte_pmd_ixgbe_macsec_select_rxsa(res->port_id,
+			res->idx, res->an, res->pn, key);
+	switch (ret) {
+	case 0:
+		break;
+	case -EINVAL:
+		printf("invalid idx %d or an %d\n", res->idx, res->an);
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", res->port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_sa = {
+	.f = cmd_set_macsec_sa_parsed,
+	.data = NULL,
+	.help_str = "set macsec sa tx|rx <port_id> <idx> <an> <pn> <key>",
+	.tokens = {
+		(void *)&cmd_macsec_sa_set,
+		(void *)&cmd_macsec_sa_macsec,
+		(void *)&cmd_macsec_sa_sa,
+		(void *)&cmd_macsec_sa_tx_rx,
+		(void *)&cmd_macsec_sa_port_id,
+		(void *)&cmd_macsec_sa_idx,
+		(void *)&cmd_macsec_sa_an,
+		(void *)&cmd_macsec_sa_pn,
+		(void *)&cmd_macsec_sa_key,
+		NULL,
+	},
+};
 #endif
 
 /* ******************************************************************************** */
@@ -11656,6 +12041,10 @@ cmdline_parse_ctx_t main_ctx[] = {
 	(cmdline_parse_inst_t *)&cmd_set_all_queues_drop_en,
 	(cmdline_parse_inst_t *)&cmd_set_vf_split_drop_en,
 	(cmdline_parse_inst_t *)&cmd_set_vf_mac_addr,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_offload_on,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_offload_off,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_sc,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_sa,
 #endif
 	NULL,
 };
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index d361db1..38e955f 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -66,6 +66,9 @@
 #include <rte_ip.h>
 #include <rte_string_fns.h>
 #include <rte_flow.h>
+#ifdef RTE_LIBRTE_IXGBE_PMD
+#include <rte_pmd_ixgbe.h>
+#endif
 
 #include "testpmd.h"
 
@@ -113,6 +116,10 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
 		ol_flags = PKT_TX_VLAN_PKT;
 	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
 		ol_flags |= PKT_TX_QINQ_PKT;
+#ifdef RTE_LIBRTE_IXGBE_PMD
+	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+		ol_flags |= PKT_TX_IXGBE_MACSEC;
+#endif
 	for (i = 0; i < nb_rx; i++) {
 		if (likely(i < nb_rx - 1))
 			rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index f996039..7f5dcd7 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -66,6 +66,9 @@
 #include <rte_ip.h>
 #include <rte_string_fns.h>
 #include <rte_flow.h>
+#ifdef RTE_LIBRTE_IXGBE_PMD
+#include <rte_pmd_ixgbe.h>
+#endif
 
 #include "testpmd.h"
 
@@ -113,6 +116,10 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
 		ol_flags = PKT_TX_VLAN_PKT;
 	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
 		ol_flags |= PKT_TX_QINQ_PKT;
+#ifdef RTE_LIBRTE_IXGBE_PMD
+	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+		ol_flags |= PKT_TX_IXGBE_MACSEC;
+#endif
 	for (i = 0; i < nb_rx; i++) {
 		if (likely(i < nb_rx - 1))
 			rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 22ce2d6..0a9a1af 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -143,6 +143,8 @@ struct fwd_stream {
 #define TESTPMD_TX_OFFLOAD_INSERT_VLAN       0x0040
 /** Insert double VLAN header in forward engine */
 #define TESTPMD_TX_OFFLOAD_INSERT_QINQ       0x0080
+/** Offload MACsec in forward engine */
+#define TESTPMD_TX_OFFLOAD_MACSEC            0x0100
 
 /** Descriptor for a single flow. */
 struct port_flow {
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index e996f35..3aea8b3 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -69,6 +69,9 @@
 #include <rte_udp.h>
 #include <rte_string_fns.h>
 #include <rte_flow.h>
+#ifdef RTE_LIBRTE_IXGBE_PMD
+#include <rte_pmd_ixgbe.h>
+#endif
 
 #include "testpmd.h"
 
@@ -215,6 +218,10 @@ pkt_burst_transmit(struct fwd_stream *fs)
 		ol_flags = PKT_TX_VLAN_PKT;
 	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
 		ol_flags |= PKT_TX_QINQ_PKT;
+#ifdef RTE_LIBRTE_IXGBE_PMD
+	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+		ol_flags |= PKT_TX_IXGBE_MACSEC;
+#endif
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_mbuf_raw_alloc(mbp);
 		if (pkt == NULL) {
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index cacdef1..f4068d3 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -507,6 +507,38 @@ Set mac antispoof for a VF from the PF::
 
    testpmd> set vf mac antispoof  (port_id) (vf_id) (on|off)
 
+set macsec offload
+~~~~~~~~~~~~~~~~~~
+
+Enable/disable MACsec offload::
+
+   testpmd> set macsec offload (port_id) on encrypt (on|off) replay-protect (on|off)
+   testpmd> set macsec offload (port_id) off
+
+set macsec sc
+~~~~~~~~~~~~~
+
+Configure MACsec secure connection (SC)::
+
+   testpmd> set macsec sc (tx|rx) (port_id) (mac) (pi)
+
+.. note::
+
+   The pi argument is ignored for tx.
+   Check the NIC Datasheet for hardware limits.
+
+set macsec sa
+~~~~~~~~~~~~~
+
+Configure MACsec secure association (SA)::
+
+   testpmd> set macsec sa (tx|rx) (port_id) (idx) (an) (pn) (key)
+
+.. note::
+
+   The IDX value must be 0 or 1.
+   Check the NIC Datasheet for hardware limits.
+
 vlan set strip
 ~~~~~~~~~~~~~~
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v4 6/7] doc: add ixgbe specific APIs
  2016-12-28 15:41     ` [PATCH v4 0/7] " Tiwei Bie
                         ` (4 preceding siblings ...)
  2016-12-28 15:41       ` [PATCH v4 5/7] app/testpmd: add MACsec offload commands Tiwei Bie
@ 2016-12-28 15:41       ` Tiwei Bie
  2016-12-28 15:41       ` [PATCH v4 7/7] doc: update the release notes for the reserved flags Tiwei Bie
                         ` (2 subsequent siblings)
  8 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-28 15:41 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

Add information about the new ixgbe PMD APIs in the release note.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 doc/guides/rel_notes/release_17_02.rst | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/doc/guides/rel_notes/release_17_02.rst b/doc/guides/rel_notes/release_17_02.rst
index 180af82..34dba45 100644
--- a/doc/guides/rel_notes/release_17_02.rst
+++ b/doc/guides/rel_notes/release_17_02.rst
@@ -52,6 +52,12 @@ New Features
   See the :ref:`Generic flow API <Generic_flow_API>` documentation for more
   information.
 
+* **Added APIs for MACsec offload support to the ixgbe PMD.**
+
+  Six new APIs and some related flags have been added to the ixgbe PMD for
+  MACsec offload support. The declarations of the APIs and the definitions
+  of the flags can be found in ``rte_pmd_ixgbe.h``.
+
 
 Resolved Issues
 ---------------
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v4 7/7] doc: update the release notes for the reserved flags
  2016-12-28 15:41     ` [PATCH v4 0/7] " Tiwei Bie
                         ` (5 preceding siblings ...)
  2016-12-28 15:41       ` [PATCH v4 6/7] doc: add ixgbe specific APIs Tiwei Bie
@ 2016-12-28 15:41       ` Tiwei Bie
  2017-01-03  6:15       ` [PATCH v4 0/7] Add MACsec offload support for ixgbe Lu, Wenzhuo
  2017-01-04  7:21       ` [PATCH v5 0/8] " Tiwei Bie
  8 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2016-12-28 15:41 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

Add information about the reserved flags for PMD-specific API in
the release note.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 doc/guides/rel_notes/release_17_02.rst | 12 ++++++++++++
 1 file changed, 12 insertions(+)

diff --git a/doc/guides/rel_notes/release_17_02.rst b/doc/guides/rel_notes/release_17_02.rst
index 34dba45..af71f39 100644
--- a/doc/guides/rel_notes/release_17_02.rst
+++ b/doc/guides/rel_notes/release_17_02.rst
@@ -58,6 +58,18 @@ New Features
   MACsec offload support. The declarations of the APIs and the definitions
   of the flags can be found in ``rte_pmd_ixgbe.h``.
 
+* **Reserved flags for PMD to implement the PMD-specific API.**
+
+  Following flags in mbuf and ethdev are reserved for PMD to define its
+  own flags when implementing the PMD-specific API.
+
+  * DEV_RX_OFFLOAD_RESERVED_0
+  * DEV_TX_OFFLOAD_RESERVED_0
+  * RTE_ETH_EVENT_RESERVED_0
+  * PKT_TX_RESERVED_0
+
+  You can refer to ``rte_pmd_ixgbe.h`` for an example.
+
 
 Resolved Issues
 ---------------
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* Re: [PATCH v4 0/7] Add MACsec offload support for ixgbe
  2016-12-28 15:41     ` [PATCH v4 0/7] " Tiwei Bie
                         ` (6 preceding siblings ...)
  2016-12-28 15:41       ` [PATCH v4 7/7] doc: update the release notes for the reserved flags Tiwei Bie
@ 2017-01-03  6:15       ` Lu, Wenzhuo
  2017-01-04  7:21       ` [PATCH v5 0/8] " Tiwei Bie
  8 siblings, 0 replies; 79+ messages in thread
From: Lu, Wenzhuo @ 2017-01-03  6:15 UTC (permalink / raw)
  To: Bie, Tiwei, dev
  Cc: adrien.mazarguil, Mcnamara, John, olivier.matz, thomas.monjalon,
	Ananyev, Konstantin, Zhang, Helin, Dai, Wei, Wang, Xiao W

Hi,


> -----Original Message-----
> From: Bie, Tiwei
> Sent: Wednesday, December 28, 2016 11:41 PM
> To: dev@dpdk.org
> Cc: adrien.mazarguil@6wind.com; Lu, Wenzhuo; Mcnamara, John;
> olivier.matz@6wind.com; thomas.monjalon@6wind.com; Ananyev, Konstantin;
> Zhang, Helin; Dai, Wei; Wang, Xiao W
> Subject: [PATCH v4 0/7] Add MACsec offload support for ixgbe
> 
> This patch set adds the MACsec offload support for ixgbe.
> The testpmd is also updated to support MACsec cmds.
> 
> v2:
> - Update the documents for testpmd;
> - Update the release notes;
> - Reuse the functions provided by base code;
> 
> v3:
> - Add the missing parts of MACsec mbuf flag and reorganize the patch set;
> - Add an ethdev event type for MACsec;
> - Advertise the MACsec offload capabilities based on the mac type;
> - Minor fixes and improvements;
> 
> v4:
> - Reserve bits in mbuf and ethdev for PMD specific API;
> - Use the reserved bits in PMD specific API;
> 
> Tiwei Bie (7):
>   mbuf: reserve a Tx offload flag for PMD-specific API
>   ethdev: reserve an event type for PMD-specific API
>   ethdev: reserve capability flags for PMD-specific API
>   net/ixgbe: add MACsec offload support
>   app/testpmd: add MACsec offload commands
>   doc: add ixgbe specific APIs
>   doc: update the release notes for the reserved flags
> 
>  app/test-pmd/cmdline.c                      | 389 ++++++++++++++++++++++
>  app/test-pmd/macfwd.c                       |   7 +
>  app/test-pmd/macswap.c                      |   7 +
>  app/test-pmd/testpmd.h                      |   2 +
>  app/test-pmd/txonly.c                       |   7 +
>  doc/guides/rel_notes/release_17_02.rst      |  18 ++
>  doc/guides/testpmd_app_ug/testpmd_funcs.rst |  32 ++
>  drivers/net/ixgbe/ixgbe_ethdev.c            | 481 +++++++++++++++++++++++++++-
>  drivers/net/ixgbe/ixgbe_ethdev.h            |  45 +++
>  drivers/net/ixgbe/ixgbe_rxtx.c              |   5 +
>  drivers/net/ixgbe/rte_pmd_ixgbe.h           | 122 +++++++
>  drivers/net/ixgbe/rte_pmd_ixgbe_version.map |  11 +
>  lib/librte_ether/rte_ethdev.h               |   4 +
>  lib/librte_mbuf/rte_mbuf.c                  |   2 +
>  lib/librte_mbuf/rte_mbuf.h                  |   5 +
>  15 files changed, 1132 insertions(+), 5 deletions(-)
> 
> --
> 2.7.4
Series Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>

^ permalink raw reply	[flat|nested] 79+ messages in thread

* [PATCH v5 0/8] Add MACsec offload support for ixgbe
  2016-12-28 15:41     ` [PATCH v4 0/7] " Tiwei Bie
                         ` (7 preceding siblings ...)
  2017-01-03  6:15       ` [PATCH v4 0/7] Add MACsec offload support for ixgbe Lu, Wenzhuo
@ 2017-01-04  7:21       ` Tiwei Bie
  2017-01-04  7:21         ` [PATCH v5 1/8] mbuf: reserve a Tx offload flag for PMD-specific API Tiwei Bie
                           ` (9 more replies)
  8 siblings, 10 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-04  7:21 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

This patch set adds the MACsec offload support for ixgbe.
The testpmd is also updated to support MACsec cmds.

v2:
- Update the documents for testpmd;
- Update the release notes;
- Reuse the functions provided by base code;

v3:
- Add the missing parts of MACsec mbuf flag and reorganize the patch set;
- Add an ethdev event type for MACsec;
- Advertise the MACsec offload capabilities based on the mac type;
- Minor fixes and improvements;

v4:
- Reserve bits in mbuf and ethdev for PMD specific API;
- Use the reserved bits in PMD specific API;

v5:
- Add MACsec offload in the NIC feature list;
- Minor improvements on comments;

Tiwei Bie (8):
  mbuf: reserve a Tx offload flag for PMD-specific API
  ethdev: reserve an event type for PMD-specific API
  ethdev: reserve capability flags for PMD-specific API
  net/ixgbe: add MACsec offload support
  app/testpmd: add MACsec offload commands
  doc: add ixgbe specific APIs
  doc: update the release notes for the reserved flags
  doc: add MACsec offload into NIC feature list

 app/test-pmd/cmdline.c                      | 389 ++++++++++++++++++++++
 app/test-pmd/macfwd.c                       |   7 +
 app/test-pmd/macswap.c                      |   7 +
 app/test-pmd/testpmd.h                      |   2 +
 app/test-pmd/txonly.c                       |   7 +
 doc/guides/nics/features/default.ini        |   1 +
 doc/guides/nics/features/ixgbe.ini          |   1 +
 doc/guides/rel_notes/release_17_02.rst      |  18 ++
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  32 ++
 drivers/net/ixgbe/ixgbe_ethdev.c            | 481 +++++++++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_ethdev.h            |  45 +++
 drivers/net/ixgbe/ixgbe_rxtx.c              |   5 +
 drivers/net/ixgbe/rte_pmd_ixgbe.h           | 122 +++++++
 drivers/net/ixgbe/rte_pmd_ixgbe_version.map |  11 +
 lib/librte_ether/rte_ethdev.h               |   4 +
 lib/librte_mbuf/rte_mbuf.c                  |   2 +
 lib/librte_mbuf/rte_mbuf.h                  |   5 +
 17 files changed, 1134 insertions(+), 5 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 79+ messages in thread

* [PATCH v5 1/8] mbuf: reserve a Tx offload flag for PMD-specific API
  2017-01-04  7:21       ` [PATCH v5 0/8] " Tiwei Bie
@ 2017-01-04  7:21         ` Tiwei Bie
  2017-01-04  7:21         ` [PATCH v5 2/8] ethdev: reserve an event type " Tiwei Bie
                           ` (8 subsequent siblings)
  9 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-04  7:21 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

Reserve a Tx offload flag in mbuf, that can be used by PMD to define
its own Tx offload flag when implementing the PMD-specific API.

Suggested-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 lib/librte_mbuf/rte_mbuf.c | 2 ++
 lib/librte_mbuf/rte_mbuf.h | 5 +++++
 2 files changed, 7 insertions(+)

diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c
index 63f43c8..15c4f68 100644
--- a/lib/librte_mbuf/rte_mbuf.c
+++ b/lib/librte_mbuf/rte_mbuf.c
@@ -404,6 +404,7 @@ const char *rte_get_tx_ol_flag_name(uint64_t mask)
 	case PKT_TX_TUNNEL_GRE: return "PKT_TX_TUNNEL_GRE";
 	case PKT_TX_TUNNEL_IPIP: return "PKT_TX_TUNNEL_IPIP";
 	case PKT_TX_TUNNEL_GENEVE: return "PKT_TX_TUNNEL_GENEVE";
+	case PKT_TX_RESERVED_0: return "PKT_TX_RESERVED_0";
 	default: return NULL;
 	}
 }
@@ -434,6 +435,7 @@ rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
 		  "PKT_TX_TUNNEL_NONE" },
 		{ PKT_TX_TUNNEL_GENEVE, PKT_TX_TUNNEL_MASK,
 		  "PKT_TX_TUNNEL_NONE" },
+		{ PKT_TX_RESERVED_0, PKT_TX_RESERVED_0, NULL },
 	};
 	const char *name;
 	unsigned int i;
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index ead7c6e..6168a6d 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -182,6 +182,11 @@ extern "C" {
 /* add new TX flags here */
 
 /**
+ * Reserved Tx offload flag for PMD-specific API.
+ */
+#define PKT_TX_RESERVED_0     (0x1ULL << 44)
+
+/**
  * Bits 45:48 used for the tunnel type.
  * When doing Tx offload like TSO or checksum, the HW needs to configure the
  * tunnel type into the HW descriptors.
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v5 2/8] ethdev: reserve an event type for PMD-specific API
  2017-01-04  7:21       ` [PATCH v5 0/8] " Tiwei Bie
  2017-01-04  7:21         ` [PATCH v5 1/8] mbuf: reserve a Tx offload flag for PMD-specific API Tiwei Bie
@ 2017-01-04  7:21         ` Tiwei Bie
  2017-01-04  7:21         ` [PATCH v5 3/8] ethdev: reserve capability flags " Tiwei Bie
                           ` (7 subsequent siblings)
  9 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-04  7:21 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

Reserve an event type, that can be used by PMD to define its own
event type when implementing the PMD-specific API.

Suggested-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 lib/librte_ether/rte_ethdev.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index fb51754..d465825 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -3044,6 +3044,8 @@ enum rte_eth_event_type {
 	RTE_ETH_EVENT_INTR_RESET,
 			/**< reset interrupt event, sent to VF on PF reset */
 	RTE_ETH_EVENT_VF_MBOX,  /**< message from the VF received by PF */
+	RTE_ETH_EVENT_RESERVED_0,
+			/**< reserved event type for PMD-specific API */
 	RTE_ETH_EVENT_MAX       /**< max value of this enum */
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
  2017-01-04  7:21       ` [PATCH v5 0/8] " Tiwei Bie
  2017-01-04  7:21         ` [PATCH v5 1/8] mbuf: reserve a Tx offload flag for PMD-specific API Tiwei Bie
  2017-01-04  7:21         ` [PATCH v5 2/8] ethdev: reserve an event type " Tiwei Bie
@ 2017-01-04  7:21         ` Tiwei Bie
  2017-01-04 14:21           ` Ananyev, Konstantin
  2017-01-04  7:21         ` [PATCH v5 4/8] net/ixgbe: add MACsec offload support Tiwei Bie
                           ` (6 subsequent siblings)
  9 siblings, 1 reply; 79+ messages in thread
From: Tiwei Bie @ 2017-01-04  7:21 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

Reserve a Tx capability flag and a Rx capability flag, that can be
used by PMD to define its own capability flags when implementing the
PMD-specific API.

Suggested-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 lib/librte_ether/rte_ethdev.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index d465825..8800b39 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -857,6 +857,7 @@ struct rte_eth_conf {
 #define DEV_RX_OFFLOAD_TCP_LRO     0x00000010
 #define DEV_RX_OFFLOAD_QINQ_STRIP  0x00000020
 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
+#define DEV_RX_OFFLOAD_RESERVED_0  0x00000080 /**< Used for PMD-specific API. */
 
 /**
  * TX offload capabilities of a device.
@@ -874,6 +875,7 @@ struct rte_eth_conf {
 #define DEV_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_RESERVED_0  0x00002000 /**< Used for PMD-specific API. */
 
 /**
  * Ethernet device information
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v5 4/8] net/ixgbe: add MACsec offload support
  2017-01-04  7:21       ` [PATCH v5 0/8] " Tiwei Bie
                           ` (2 preceding siblings ...)
  2017-01-04  7:21         ` [PATCH v5 3/8] ethdev: reserve capability flags " Tiwei Bie
@ 2017-01-04  7:21         ` Tiwei Bie
  2017-01-04  7:21         ` [PATCH v5 5/8] app/testpmd: add MACsec offload commands Tiwei Bie
                           ` (5 subsequent siblings)
  9 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-04  7:21 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

MACsec (or LinkSec, 802.1AE) is a MAC level encryption/authentication
scheme defined in IEEE 802.1AE that uses symmetric cryptography.
This commit adds the MACsec offload support for ixgbe.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c            | 481 +++++++++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_ethdev.h            |  45 +++
 drivers/net/ixgbe/ixgbe_rxtx.c              |   5 +
 drivers/net/ixgbe/rte_pmd_ixgbe.h           | 122 +++++++
 drivers/net/ixgbe/rte_pmd_ixgbe_version.map |  11 +
 5 files changed, 659 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index ec2edad..653ddd2 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -231,6 +231,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 			uint16_t reta_size);
 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
@@ -745,6 +746,51 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
 			   sizeof(rte_ixgbe_stats_strings[0]))
 
+/* MACsec statistics */
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
+	{"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+		out_pkts_untagged)},
+	{"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
+		out_pkts_encrypted)},
+	{"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
+		out_pkts_protected)},
+	{"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
+		out_octets_encrypted)},
+	{"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
+		out_octets_protected)},
+	{"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_untagged)},
+	{"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_badtag)},
+	{"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_nosci)},
+	{"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_unknownsci)},
+	{"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
+		in_octets_decrypted)},
+	{"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
+		in_octets_validated)},
+	{"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_unchecked)},
+	{"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_delayed)},
+	{"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_late)},
+	{"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_ok)},
+	{"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_invalid)},
+	{"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_notvalid)},
+	{"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_unusedsa)},
+	{"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_notusingsa)},
+};
+
+#define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
+			   sizeof(rte_ixgbe_macsec_strings[0]))
+
 /* Per-queue statistics */
 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
 	{"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
@@ -2367,6 +2413,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 		/* check if lsc interrupt is enabled */
 		if (dev->data->dev_conf.intr_conf.lsc != 0)
 			ixgbe_dev_lsc_interrupt_setup(dev);
+		ixgbe_dev_macsec_interrupt_setup(dev);
 	} else {
 		rte_intr_callback_unregister(intr_handle,
 					     ixgbe_dev_interrupt_handler, dev);
@@ -2557,6 +2604,7 @@ ixgbe_dev_close(struct rte_eth_dev *dev)
 static void
 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
 			   struct ixgbe_hw_stats *hw_stats,
+			   struct ixgbe_macsec_stats *macsec_stats,
 			   uint64_t *total_missed_rx, uint64_t *total_qbrc,
 			   uint64_t *total_qprc, uint64_t *total_qprdc)
 {
@@ -2726,6 +2774,40 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
 	/* Flow Director Stats registers */
 	hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
 	hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+
+	/* MACsec Stats registers */
+	macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
+	macsec_stats->out_pkts_encrypted +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
+	macsec_stats->out_pkts_protected +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
+	macsec_stats->out_octets_encrypted +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
+	macsec_stats->out_octets_protected +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
+	macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
+	macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
+	macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
+	macsec_stats->in_pkts_unknownsci +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
+	macsec_stats->in_octets_decrypted +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
+	macsec_stats->in_octets_validated +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
+	macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
+	macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
+	macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
+	for (i = 0; i < 2; i++) {
+		macsec_stats->in_pkts_ok +=
+			IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
+		macsec_stats->in_pkts_invalid +=
+			IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
+		macsec_stats->in_pkts_notvalid +=
+			IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
+	}
+	macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
+	macsec_stats->in_pkts_notusingsa +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
 }
 
 /*
@@ -2738,6 +2820,9 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_hw_stats *hw_stats =
 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	struct ixgbe_macsec_stats *macsec_stats =
+			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+				dev->data->dev_private);
 	uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
 	unsigned i;
 
@@ -2746,8 +2831,8 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 	total_qprc = 0;
 	total_qprdc = 0;
 
-	ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
-			&total_qprc, &total_qprdc);
+	ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+			&total_qbrc, &total_qprc, &total_qprdc);
 
 	if (stats == NULL)
 		return;
@@ -2799,7 +2884,7 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
 /* This function calculates the number of xstats based on the current config */
 static unsigned
 ixgbe_xstats_calc_num(void) {
-	return IXGBE_NB_HW_STATS +
+	return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
 		(IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
 		(IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
 }
@@ -2826,6 +2911,15 @@ static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 			count++;
 		}
 
+		/* MACsec Stats */
+		for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+			snprintf(xstats_names[count].name,
+				sizeof(xstats_names[count].name),
+				"%s",
+				rte_ixgbe_macsec_strings[i].name);
+			count++;
+		}
+
 		/* RX Priority Stats */
 		for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
 			for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
@@ -2875,6 +2969,9 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_hw_stats *hw_stats =
 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	struct ixgbe_macsec_stats *macsec_stats =
+			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+				dev->data->dev_private);
 	uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
 	unsigned i, stat, count = 0;
 
@@ -2888,8 +2985,8 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 	total_qprc = 0;
 	total_qprdc = 0;
 
-	ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
-				   &total_qprc, &total_qprdc);
+	ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+			&total_qbrc, &total_qprc, &total_qprdc);
 
 	/* If this is a reset xstats is NULL, and we have cleared the
 	 * registers by reading them.
@@ -2905,6 +3002,13 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		count++;
 	}
 
+	/* MACsec Stats */
+	for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+		xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
+				rte_ixgbe_macsec_strings[i].offset);
+		count++;
+	}
+
 	/* RX Priority Stats */
 	for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
 		for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
@@ -2932,6 +3036,9 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw_stats *stats =
 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	struct ixgbe_macsec_stats *macsec_stats =
+			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+				dev->data->dev_private);
 
 	unsigned count = ixgbe_xstats_calc_num();
 
@@ -2940,6 +3047,7 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
 
 	/* Reset software totals */
 	memset(stats, 0, sizeof(*stats));
+	memset(macsec_stats, 0, sizeof(*macsec_stats));
 }
 
 static void
@@ -3072,6 +3180,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	    !RTE_ETH_DEV_SRIOV(dev).active)
 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
 
+	if (hw->mac.type == ixgbe_mac_82599EB ||
+	    hw->mac.type == ixgbe_mac_X540)
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IXGBE_MACSEC_STRIP;
+
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
@@ -3085,6 +3197,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		DEV_TX_OFFLOAD_SCTP_CKSUM  |
 		DEV_TX_OFFLOAD_TCP_TSO;
 
+	if (hw->mac.type == ixgbe_mac_82599EB ||
+	    hw->mac.type == ixgbe_mac_X540)
+		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IXGBE_MACSEC_INSERT;
+
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
@@ -3382,6 +3498,28 @@ ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
 	return 0;
 }
 
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
+{
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+	intr->mask |= IXGBE_EICR_LINKSEC;
+
+	return 0;
+}
+
 /*
  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
  *
@@ -3416,6 +3554,9 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
 	if (eicr & IXGBE_EICR_MAILBOX)
 		intr->flags |= IXGBE_FLAG_MAILBOX;
 
+	if (eicr & IXGBE_EICR_LINKSEC)
+		intr->flags |= IXGBE_FLAG_MACSEC;
+
 	if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
 	    hw->phy.type == ixgbe_phy_x550em_ext_t &&
 	    (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
@@ -3570,6 +3711,12 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
 		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
 	}
 
+	if (intr->flags & IXGBE_FLAG_MACSEC) {
+		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_IXGBE_MACSEC,
+					      NULL);
+		intr->flags &= ~IXGBE_FLAG_MACSEC;
+	}
+
 	PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
 	ixgbe_enable_intr(dev);
 	rte_intr_enable(intr_handle);
@@ -7610,6 +7757,330 @@ ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
 	ixgbevf_dev_interrupt_action(dev);
 }
 
+/**
+ *  ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Stops the transmit data path and waits for the HW to internally empty
+ *  the Tx security block
+ **/
+int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
+{
+#define IXGBE_MAX_SECTX_POLL 40
+
+	int i;
+	int sectxreg;
+
+	sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
+	for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
+		sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
+		if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
+			break;
+		/* Use interrupt-safe sleep just in case */
+		usec_delay(1000);
+	}
+
+	/* For informational purposes only */
+	if (i >= IXGBE_MAX_SECTX_POLL)
+		PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
+			 "path fully disabled.  Continuing with init.\n");
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Enables the transmit data path.
+ **/
+int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
+{
+	uint32_t sectxreg;
+
+	sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return IXGBE_SUCCESS;
+}
+
+int
+rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Stop the data paths */
+	if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+		return -ENOTSUP;
+	/*
+	 * Workaround:
+	 * As no ixgbe_disable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 * The hardware support has been checked by
+	 * ixgbe_disable_sec_rx_path().
+	 */
+	ixgbe_disable_sec_tx_path_generic(hw);
+
+	/* Enable Ethernet CRC (required by MACsec offload) */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+	ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
+	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
+
+	/* Enable the TX and RX crypto engines */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+	ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
+	ctrl |= 0x3;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
+
+	/* Enable SA lookup */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+	ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+	ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
+		     IXGBE_LSECTXCTRL_AUTH;
+	ctrl |= IXGBE_LSECTXCTRL_AISCI;
+	ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+	ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+	ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
+	ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
+	if (rp)
+		ctrl |= IXGBE_LSECRXCTRL_RP;
+	else
+		ctrl &= ~IXGBE_LSECRXCTRL_RP;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+	/* Start the data paths */
+	ixgbe_enable_sec_rx_path(hw);
+	/*
+	 * Workaround:
+	 * As no ixgbe_enable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 */
+	ixgbe_enable_sec_tx_path_generic(hw);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_disable(uint8_t port)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Stop the data paths */
+	if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+		return -ENOTSUP;
+	/*
+	 * Workaround:
+	 * As no ixgbe_disable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 * The hardware support has been checked by
+	 * ixgbe_disable_sec_rx_path().
+	 */
+	ixgbe_disable_sec_tx_path_generic(hw);
+
+	/* Disable the TX and RX crypto engines */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+	/* Disable SA lookup */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+	ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECTXCTRL_DISABLE;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+	ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+	/* Start the data paths */
+	ixgbe_enable_sec_rx_path(hw);
+	/*
+	 * Workaround:
+	 * As no ixgbe_enable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 */
+	ixgbe_enable_sec_tx_path_generic(hw);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
+
+	ctrl = mac[4] | (mac[5] << 8);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
+
+	pi = rte_cpu_to_be_16(pi);
+	ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+				 uint32_t pn, uint8_t *key)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl, i;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (idx != 0 && idx != 1)
+		return -EINVAL;
+
+	if (an >= 4)
+		return -EINVAL;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Set the PN and key */
+	pn = rte_cpu_to_be_32(pn);
+	if (idx == 0) {
+		IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
+
+		for (i = 0; i < 4; i++) {
+			ctrl = (key[i * 4 + 0] <<  0) |
+			       (key[i * 4 + 1] <<  8) |
+			       (key[i * 4 + 2] << 16) |
+			       (key[i * 4 + 3] << 24);
+			IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
+		}
+	} else {
+		IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
+
+		for (i = 0; i < 4; i++) {
+			ctrl = (key[i * 4 + 0] <<  0) |
+			       (key[i * 4 + 1] <<  8) |
+			       (key[i * 4 + 2] << 16) |
+			       (key[i * 4 + 3] << 24);
+			IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
+		}
+	}
+
+	/* Set AN and select the SA */
+	ctrl = (an << idx * 2) | (idx << 4);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+				 uint32_t pn, uint8_t *key)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl, i;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (idx != 0 && idx != 1)
+		return -EINVAL;
+
+	if (an >= 4)
+		return -EINVAL;
+
+	/* Set the PN */
+	pn = rte_cpu_to_be_32(pn);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
+
+	/* Set the key */
+	for (i = 0; i < 4; i++) {
+		ctrl = (key[i * 4 + 0] <<  0) |
+		       (key[i * 4 + 1] <<  8) |
+		       (key[i * 4 + 2] << 16) |
+		       (key[i * 4 + 3] << 24);
+		IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
+	}
+
+	/* Set the AN and validate the SA */
+	ctrl = an | (1 << 2);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
+
+	return 0;
+}
+
 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 80350c2..ffced1c 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -43,6 +43,7 @@
 #define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
 #define IXGBE_FLAG_MAILBOX          (uint32_t)(1 << 1)
 #define IXGBE_FLAG_PHY_INTERRUPT    (uint32_t)(1 << 2)
+#define IXGBE_FLAG_MACSEC           (uint32_t)(1 << 3)
 
 /*
  * Defines that were not part of ixgbe_type.h as they are not used by the
@@ -130,6 +131,10 @@
 #define IXGBE_MISC_VEC_ID               RTE_INTR_VEC_ZERO_OFFSET
 #define IXGBE_RX_VEC_START              RTE_INTR_VEC_RXTX_OFFSET
 
+#define IXGBE_SECTX_MINSECIFG_MASK      0x0000000F
+
+#define IXGBE_MACSEC_PNTHRSH            0xFFFFFE00
+
 /*
  * Information about the fdir mode.
  */
@@ -265,11 +270,44 @@ struct ixgbe_filter_info {
 };
 
 /*
+ * Statistics counters collected by the MACsec
+ */
+struct ixgbe_macsec_stats {
+	/* TX port statistics */
+	uint64_t out_pkts_untagged;
+	uint64_t out_pkts_encrypted;
+	uint64_t out_pkts_protected;
+	uint64_t out_octets_encrypted;
+	uint64_t out_octets_protected;
+
+	/* RX port statistics */
+	uint64_t in_pkts_untagged;
+	uint64_t in_pkts_badtag;
+	uint64_t in_pkts_nosci;
+	uint64_t in_pkts_unknownsci;
+	uint64_t in_octets_decrypted;
+	uint64_t in_octets_validated;
+
+	/* RX SC statistics */
+	uint64_t in_pkts_unchecked;
+	uint64_t in_pkts_delayed;
+	uint64_t in_pkts_late;
+
+	/* RX SA statistics */
+	uint64_t in_pkts_ok;
+	uint64_t in_pkts_invalid;
+	uint64_t in_pkts_notvalid;
+	uint64_t in_pkts_unusedsa;
+	uint64_t in_pkts_notusingsa;
+};
+
+/*
  * Structure to store private data for each driver instance (for each port).
  */
 struct ixgbe_adapter {
 	struct ixgbe_hw             hw;
 	struct ixgbe_hw_stats       stats;
+	struct ixgbe_macsec_stats   macsec_stats;
 	struct ixgbe_hw_fdir_info   fdir;
 	struct ixgbe_interrupt      intr;
 	struct ixgbe_stat_mapping_registers stat_mappings;
@@ -300,6 +338,9 @@ struct ixgbe_adapter {
 #define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \
 	(&((struct ixgbe_adapter *)adapter)->stats)
 
+#define IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(adapter) \
+	(&((struct ixgbe_adapter *)adapter)->macsec_stats)
+
 #define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \
 	(&((struct ixgbe_adapter *)adapter)->intr)
 
@@ -445,4 +486,8 @@ uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
 
 int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
 			enum rte_filter_op filter_op, void *arg);
+
+int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw);
+
+int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw);
 #endif /* _IXGBE_ETHDEV_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index b2d9f45..db9fc18 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -79,12 +79,15 @@
 #include "base/ixgbe_common.h"
 #include "ixgbe_rxtx.h"
 
+#include "rte_pmd_ixgbe.h"
+
 /* Bit Mask to indicate what bits required for building TX context */
 #define IXGBE_TX_OFFLOAD_MASK (			 \
 		PKT_TX_VLAN_PKT |		 \
 		PKT_TX_IP_CKSUM |		 \
 		PKT_TX_L4_MASK |		 \
 		PKT_TX_TCP_SEG |		 \
+		PKT_TX_IXGBE_MACSEC |		 \
 		PKT_TX_OUTER_IP_CKSUM)
 
 #if 1
@@ -519,6 +522,8 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
 		cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
 	if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
 		cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
+	if (ol_flags & PKT_TX_IXGBE_MACSEC)
+		cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
 	return cmdtype;
 }
 
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index c2fb826..0c28ea2 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -42,6 +42,28 @@
 #include <rte_ethdev.h>
 
 /**
+ * If these flags are advertised by the PMD, the NIC supports the MACsec
+ * offload. The incoming MACsec traffics can be offloaded transparently
+ * after the MACsec offload is configured correctly by the application.
+ * And the application can set the PKT_TX_IXGBE_MACSEC flag in mbufs to
+ * enable the MACsec offload for the packets to be transmitted.
+ */
+#define DEV_RX_OFFLOAD_IXGBE_MACSEC_STRIP	DEV_RX_OFFLOAD_RESERVED_0
+#define DEV_TX_OFFLOAD_IXGBE_MACSEC_INSERT	DEV_TX_OFFLOAD_RESERVED_0
+
+/**
+ * This event will occur when the PN counter in a MACsec connection
+ * reach the exhaustion threshold.
+ */
+#define RTE_ETH_EVENT_IXGBE_MACSEC		RTE_ETH_EVENT_RESERVED_0
+
+/**
+ * Offload the MACsec. This flag must be set by the application in mbuf
+ * to enable this offload feature for a packet to be transmitted.
+ */
+#define PKT_TX_IXGBE_MACSEC			PKT_TX_RESERVED_0
+
+/**
  * Set the VF MAC address.
  *
  * @param port
@@ -183,6 +205,106 @@ int
 rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
 
 /**
+ * Enable MACsec offload.
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param en
+ *    1 - Enable encryption (encrypt and add integrity signature).
+ *    0 - Disable encryption (only add integrity signature).
+ * @param rp
+ *    1 - Enable replay protection.
+ *    0 - Disable replay protection.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp);
+
+/**
+ * Disable MACsec offload.
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_disable(uint8_t port);
+
+/**
+ * Configure Tx SC (Secure Connection).
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param mac
+ *   The MAC address on the local side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ */
+int rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac);
+
+/**
+ * Configure Rx SC (Secure Connection).
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param mac
+ *   The MAC address on the remote side.
+ * @param pi
+ *   The PI (port identifier) on the remote side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ */
+int rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi);
+
+/**
+ * Enable Tx SA (Secure Association).
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param idx
+ *   The SA to be enabled (0 or 1).
+ * @param an
+ *   The association number on the local side.
+ * @param pn
+ *   The packet number on the local side.
+ * @param key
+ *   The key on the local side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+		uint32_t pn, uint8_t *key);
+
+/**
+ * Enable Rx SA (Secure Association).
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param idx
+ *   The SA to be enabled (0 or 1)
+ * @param an
+ *   The association number on the remote side.
+ * @param pn
+ *   The packet number on the remote side.
+ * @param key
+ *   The key on the remote side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+		uint32_t pn, uint8_t *key);
+
+/**
  * Response sent back to ixgbe driver from user app after callback
  */
 enum rte_pmd_ixgbe_mb_event_rsp {
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
index 92434f3..6d68934 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
@@ -15,3 +15,14 @@ DPDK_16.11 {
 	rte_pmd_ixgbe_set_vf_vlan_insert;
 	rte_pmd_ixgbe_set_vf_vlan_stripq;
 } DPDK_2.0;
+
+DPDK_17.02 {
+	global:
+
+	rte_pmd_ixgbe_macsec_enable;
+	rte_pmd_ixgbe_macsec_disable;
+	rte_pmd_ixgbe_macsec_config_txsc;
+	rte_pmd_ixgbe_macsec_config_rxsc;
+	rte_pmd_ixgbe_macsec_select_txsa;
+	rte_pmd_ixgbe_macsec_select_rxsa;
+} DPDK_16.11;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v5 5/8] app/testpmd: add MACsec offload commands
  2017-01-04  7:21       ` [PATCH v5 0/8] " Tiwei Bie
                           ` (3 preceding siblings ...)
  2017-01-04  7:21         ` [PATCH v5 4/8] net/ixgbe: add MACsec offload support Tiwei Bie
@ 2017-01-04  7:21         ` Tiwei Bie
  2017-01-04  7:21         ` [PATCH v5 6/8] doc: add ixgbe specific APIs Tiwei Bie
                           ` (4 subsequent siblings)
  9 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-04  7:21 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

Below MACsec offload commands are added:

- set macsec offload <port_id> on encrypt on|off replay-protect on|off
- set macsec offload <port_id> off
- set macsec sc tx|rx <port_id> <mac> <pi>
- set macsec sa tx|rx <port_id> <idx> <an> <pn> <key>

Also update the testpmd user guide.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 app/test-pmd/cmdline.c                      | 389 ++++++++++++++++++++++++++++
 app/test-pmd/macfwd.c                       |   7 +
 app/test-pmd/macswap.c                      |   7 +
 app/test-pmd/testpmd.h                      |   2 +
 app/test-pmd/txonly.c                       |   7 +
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  32 +++
 6 files changed, 444 insertions(+)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index f768b6b..4a67894 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -275,6 +275,18 @@ static void cmd_help_long_parsed(void *parsed_result,
 
 			"set vf mac antispoof (port_id) (vf_id) (on|off).\n"
 			"    Set MAC antispoof for a VF from the PF.\n\n"
+
+			"set macsec offload (port_id) on encrypt (on|off) replay-protect (on|off)\n"
+			"    Enable MACsec offload.\n\n"
+
+			"set macsec offload (port_id) off\n"
+			"    Disable MACsec offload.\n\n"
+
+			"set macsec sc (tx|rx) (port_id) (mac) (pi)\n"
+			"    Configure MACsec secure connection (SC).\n\n"
+
+			"set macsec sa (tx|rx) (port_id) (idx) (an) (pn) (key)\n"
+			"    Configure MACsec secure association (SA).\n\n"
 #endif
 
 			"vlan set strip (on|off) (port_id)\n"
@@ -11488,6 +11500,379 @@ cmdline_parse_inst_t cmd_set_vf_mac_addr = {
 		NULL,
 	},
 };
+
+/* MACsec configuration */
+
+/* Common result structure for MACsec offload enable */
+struct cmd_macsec_offload_on_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t offload;
+	uint8_t port_id;
+	cmdline_fixed_string_t on;
+	cmdline_fixed_string_t encrypt;
+	cmdline_fixed_string_t en_on_off;
+	cmdline_fixed_string_t replay_protect;
+	cmdline_fixed_string_t rp_on_off;
+};
+
+/* Common CLI fields for MACsec offload disable */
+cmdline_parse_token_string_t cmd_macsec_offload_on_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_offload_on_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_offload_on_offload =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 offload, "offload");
+cmdline_parse_token_string_t cmd_macsec_offload_on_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 port_id, UINT8);
+cmdline_parse_token_string_t cmd_macsec_offload_on_on =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 on, "on");
+cmdline_parse_token_string_t cmd_macsec_offload_on_encrypt =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 encrypt, "encrypt");
+cmdline_parse_token_string_t cmd_macsec_offload_on_en_on_off =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 en_on_off, "on#off");
+cmdline_parse_token_string_t cmd_macsec_offload_on_replay_protect =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 replay_protect, "replay-protect");
+cmdline_parse_token_string_t cmd_macsec_offload_on_rp_on_off =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 rp_on_off, "on#off");
+
+static void
+cmd_set_macsec_offload_on_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_offload_on_result *res = parsed_result;
+	int ret;
+	portid_t port_id = res->port_id;
+	int en = (strcmp(res->en_on_off, "on") == 0) ? 1 : 0;
+	int rp = (strcmp(res->rp_on_off, "on") == 0) ? 1 : 0;
+
+	if (port_id_is_invalid(port_id, ENABLED_WARN))
+		return;
+
+	ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_MACSEC;
+	ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
+
+	switch (ret) {
+	case 0:
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_offload_on = {
+	.f = cmd_set_macsec_offload_on_parsed,
+	.data = NULL,
+	.help_str = "set macsec offload <port_id> on "
+		"encrypt on|off replay-protect on|off",
+	.tokens = {
+		(void *)&cmd_macsec_offload_on_set,
+		(void *)&cmd_macsec_offload_on_macsec,
+		(void *)&cmd_macsec_offload_on_offload,
+		(void *)&cmd_macsec_offload_on_port_id,
+		(void *)&cmd_macsec_offload_on_on,
+		(void *)&cmd_macsec_offload_on_encrypt,
+		(void *)&cmd_macsec_offload_on_en_on_off,
+		(void *)&cmd_macsec_offload_on_replay_protect,
+		(void *)&cmd_macsec_offload_on_rp_on_off,
+		NULL,
+	},
+};
+
+/* Common result structure for MACsec offload disable */
+struct cmd_macsec_offload_off_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t offload;
+	uint8_t port_id;
+	cmdline_fixed_string_t off;
+};
+
+/* Common CLI fields for MACsec offload disable */
+cmdline_parse_token_string_t cmd_macsec_offload_off_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_offload_off_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_offload_off_offload =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 offload, "offload");
+cmdline_parse_token_string_t cmd_macsec_offload_off_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 port_id, UINT8);
+cmdline_parse_token_string_t cmd_macsec_offload_off_off =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 off, "off");
+
+static void
+cmd_set_macsec_offload_off_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_offload_off_result *res = parsed_result;
+	int ret;
+	portid_t port_id = res->port_id;
+
+	if (port_id_is_invalid(port_id, ENABLED_WARN))
+		return;
+
+	ports[port_id].tx_ol_flags &= ~TESTPMD_TX_OFFLOAD_MACSEC;
+	ret = rte_pmd_ixgbe_macsec_disable(port_id);
+
+	switch (ret) {
+	case 0:
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_offload_off = {
+	.f = cmd_set_macsec_offload_off_parsed,
+	.data = NULL,
+	.help_str = "set macsec offload <port_id> off",
+	.tokens = {
+		(void *)&cmd_macsec_offload_off_set,
+		(void *)&cmd_macsec_offload_off_macsec,
+		(void *)&cmd_macsec_offload_off_offload,
+		(void *)&cmd_macsec_offload_off_port_id,
+		(void *)&cmd_macsec_offload_off_off,
+		NULL,
+	},
+};
+
+/* Common result structure for MACsec secure connection configure */
+struct cmd_macsec_sc_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t sc;
+	cmdline_fixed_string_t tx_rx;
+	uint8_t port_id;
+	struct ether_addr mac;
+	uint16_t pi;
+};
+
+/* Common CLI fields for MACsec secure connection configure */
+cmdline_parse_token_string_t cmd_macsec_sc_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_sc_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_sc_sc =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 sc, "sc");
+cmdline_parse_token_string_t cmd_macsec_sc_tx_rx =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 tx_rx, "tx#rx");
+cmdline_parse_token_num_t cmd_macsec_sc_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 port_id, UINT8);
+cmdline_parse_token_etheraddr_t cmd_macsec_sc_mac =
+	TOKEN_ETHERADDR_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 mac);
+cmdline_parse_token_num_t cmd_macsec_sc_pi =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 pi, UINT16);
+
+static void
+cmd_set_macsec_sc_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_sc_result *res = parsed_result;
+	int ret;
+	int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0;
+
+	ret = is_tx ?
+		rte_pmd_ixgbe_macsec_config_txsc(res->port_id,
+				res->mac.addr_bytes) :
+		rte_pmd_ixgbe_macsec_config_rxsc(res->port_id,
+				res->mac.addr_bytes, res->pi);
+	switch (ret) {
+	case 0:
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", res->port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_sc = {
+	.f = cmd_set_macsec_sc_parsed,
+	.data = NULL,
+	.help_str = "set macsec sc tx|rx <port_id> <mac> <pi>",
+	.tokens = {
+		(void *)&cmd_macsec_sc_set,
+		(void *)&cmd_macsec_sc_macsec,
+		(void *)&cmd_macsec_sc_sc,
+		(void *)&cmd_macsec_sc_tx_rx,
+		(void *)&cmd_macsec_sc_port_id,
+		(void *)&cmd_macsec_sc_mac,
+		(void *)&cmd_macsec_sc_pi,
+		NULL,
+	},
+};
+
+/* Common result structure for MACsec secure connection configure */
+struct cmd_macsec_sa_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t sa;
+	cmdline_fixed_string_t tx_rx;
+	uint8_t port_id;
+	uint8_t idx;
+	uint8_t an;
+	uint32_t pn;
+	cmdline_fixed_string_t key;
+};
+
+/* Common CLI fields for MACsec secure connection configure */
+cmdline_parse_token_string_t cmd_macsec_sa_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_sa_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_sa_sa =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 sa, "sa");
+cmdline_parse_token_string_t cmd_macsec_sa_tx_rx =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 tx_rx, "tx#rx");
+cmdline_parse_token_num_t cmd_macsec_sa_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 port_id, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_idx =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 idx, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_an =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 an, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_pn =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 pn, UINT32);
+cmdline_parse_token_string_t cmd_macsec_sa_key =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 key, NULL);
+
+static void
+cmd_set_macsec_sa_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_sa_result *res = parsed_result;
+	int ret;
+	int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0;
+	uint8_t key[16] = { 0 };
+	uint8_t xdgt0;
+	uint8_t xdgt1;
+	int key_len;
+	int i;
+
+	key_len = strlen(res->key) / 2;
+	if (key_len > 16)
+		key_len = 16;
+
+	for (i = 0; i < key_len; i++) {
+		xdgt0 = parse_and_check_key_hexa_digit(res->key, (i * 2));
+		if (xdgt0 == 0xFF)
+			return;
+		xdgt1 = parse_and_check_key_hexa_digit(res->key, (i * 2) + 1);
+		if (xdgt1 == 0xFF)
+			return;
+		key[i] = (uint8_t) ((xdgt0 * 16) + xdgt1);
+	}
+
+	ret = is_tx ?
+		rte_pmd_ixgbe_macsec_select_txsa(res->port_id,
+			res->idx, res->an, res->pn, key) :
+		rte_pmd_ixgbe_macsec_select_rxsa(res->port_id,
+			res->idx, res->an, res->pn, key);
+	switch (ret) {
+	case 0:
+		break;
+	case -EINVAL:
+		printf("invalid idx %d or an %d\n", res->idx, res->an);
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", res->port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_sa = {
+	.f = cmd_set_macsec_sa_parsed,
+	.data = NULL,
+	.help_str = "set macsec sa tx|rx <port_id> <idx> <an> <pn> <key>",
+	.tokens = {
+		(void *)&cmd_macsec_sa_set,
+		(void *)&cmd_macsec_sa_macsec,
+		(void *)&cmd_macsec_sa_sa,
+		(void *)&cmd_macsec_sa_tx_rx,
+		(void *)&cmd_macsec_sa_port_id,
+		(void *)&cmd_macsec_sa_idx,
+		(void *)&cmd_macsec_sa_an,
+		(void *)&cmd_macsec_sa_pn,
+		(void *)&cmd_macsec_sa_key,
+		NULL,
+	},
+};
 #endif
 
 /* ******************************************************************************** */
@@ -11656,6 +12041,10 @@ cmdline_parse_ctx_t main_ctx[] = {
 	(cmdline_parse_inst_t *)&cmd_set_all_queues_drop_en,
 	(cmdline_parse_inst_t *)&cmd_set_vf_split_drop_en,
 	(cmdline_parse_inst_t *)&cmd_set_vf_mac_addr,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_offload_on,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_offload_off,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_sc,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_sa,
 #endif
 	NULL,
 };
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index d361db1..38e955f 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -66,6 +66,9 @@
 #include <rte_ip.h>
 #include <rte_string_fns.h>
 #include <rte_flow.h>
+#ifdef RTE_LIBRTE_IXGBE_PMD
+#include <rte_pmd_ixgbe.h>
+#endif
 
 #include "testpmd.h"
 
@@ -113,6 +116,10 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
 		ol_flags = PKT_TX_VLAN_PKT;
 	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
 		ol_flags |= PKT_TX_QINQ_PKT;
+#ifdef RTE_LIBRTE_IXGBE_PMD
+	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+		ol_flags |= PKT_TX_IXGBE_MACSEC;
+#endif
 	for (i = 0; i < nb_rx; i++) {
 		if (likely(i < nb_rx - 1))
 			rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index f996039..7f5dcd7 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -66,6 +66,9 @@
 #include <rte_ip.h>
 #include <rte_string_fns.h>
 #include <rte_flow.h>
+#ifdef RTE_LIBRTE_IXGBE_PMD
+#include <rte_pmd_ixgbe.h>
+#endif
 
 #include "testpmd.h"
 
@@ -113,6 +116,10 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
 		ol_flags = PKT_TX_VLAN_PKT;
 	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
 		ol_flags |= PKT_TX_QINQ_PKT;
+#ifdef RTE_LIBRTE_IXGBE_PMD
+	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+		ol_flags |= PKT_TX_IXGBE_MACSEC;
+#endif
 	for (i = 0; i < nb_rx; i++) {
 		if (likely(i < nb_rx - 1))
 			rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 22ce2d6..0a9a1af 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -143,6 +143,8 @@ struct fwd_stream {
 #define TESTPMD_TX_OFFLOAD_INSERT_VLAN       0x0040
 /** Insert double VLAN header in forward engine */
 #define TESTPMD_TX_OFFLOAD_INSERT_QINQ       0x0080
+/** Offload MACsec in forward engine */
+#define TESTPMD_TX_OFFLOAD_MACSEC            0x0100
 
 /** Descriptor for a single flow. */
 struct port_flow {
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index e996f35..3aea8b3 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -69,6 +69,9 @@
 #include <rte_udp.h>
 #include <rte_string_fns.h>
 #include <rte_flow.h>
+#ifdef RTE_LIBRTE_IXGBE_PMD
+#include <rte_pmd_ixgbe.h>
+#endif
 
 #include "testpmd.h"
 
@@ -215,6 +218,10 @@ pkt_burst_transmit(struct fwd_stream *fs)
 		ol_flags = PKT_TX_VLAN_PKT;
 	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
 		ol_flags |= PKT_TX_QINQ_PKT;
+#ifdef RTE_LIBRTE_IXGBE_PMD
+	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+		ol_flags |= PKT_TX_IXGBE_MACSEC;
+#endif
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_mbuf_raw_alloc(mbp);
 		if (pkt == NULL) {
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index cacdef1..f4068d3 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -507,6 +507,38 @@ Set mac antispoof for a VF from the PF::
 
    testpmd> set vf mac antispoof  (port_id) (vf_id) (on|off)
 
+set macsec offload
+~~~~~~~~~~~~~~~~~~
+
+Enable/disable MACsec offload::
+
+   testpmd> set macsec offload (port_id) on encrypt (on|off) replay-protect (on|off)
+   testpmd> set macsec offload (port_id) off
+
+set macsec sc
+~~~~~~~~~~~~~
+
+Configure MACsec secure connection (SC)::
+
+   testpmd> set macsec sc (tx|rx) (port_id) (mac) (pi)
+
+.. note::
+
+   The pi argument is ignored for tx.
+   Check the NIC Datasheet for hardware limits.
+
+set macsec sa
+~~~~~~~~~~~~~
+
+Configure MACsec secure association (SA)::
+
+   testpmd> set macsec sa (tx|rx) (port_id) (idx) (an) (pn) (key)
+
+.. note::
+
+   The IDX value must be 0 or 1.
+   Check the NIC Datasheet for hardware limits.
+
 vlan set strip
 ~~~~~~~~~~~~~~
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v5 6/8] doc: add ixgbe specific APIs
  2017-01-04  7:21       ` [PATCH v5 0/8] " Tiwei Bie
                           ` (4 preceding siblings ...)
  2017-01-04  7:21         ` [PATCH v5 5/8] app/testpmd: add MACsec offload commands Tiwei Bie
@ 2017-01-04  7:21         ` Tiwei Bie
  2017-01-04  7:21         ` [PATCH v5 7/8] doc: update the release notes for the reserved flags Tiwei Bie
                           ` (3 subsequent siblings)
  9 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-04  7:21 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

Add information about the new ixgbe PMD APIs in the release notes.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 doc/guides/rel_notes/release_17_02.rst | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/doc/guides/rel_notes/release_17_02.rst b/doc/guides/rel_notes/release_17_02.rst
index 180af82..34dba45 100644
--- a/doc/guides/rel_notes/release_17_02.rst
+++ b/doc/guides/rel_notes/release_17_02.rst
@@ -52,6 +52,12 @@ New Features
   See the :ref:`Generic flow API <Generic_flow_API>` documentation for more
   information.
 
+* **Added APIs for MACsec offload support to the ixgbe PMD.**
+
+  Six new APIs and some related flags have been added to the ixgbe PMD for
+  MACsec offload support. The declarations of the APIs and the definitions
+  of the flags can be found in ``rte_pmd_ixgbe.h``.
+
 
 Resolved Issues
 ---------------
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v5 7/8] doc: update the release notes for the reserved flags
  2017-01-04  7:21       ` [PATCH v5 0/8] " Tiwei Bie
                           ` (5 preceding siblings ...)
  2017-01-04  7:21         ` [PATCH v5 6/8] doc: add ixgbe specific APIs Tiwei Bie
@ 2017-01-04  7:21         ` Tiwei Bie
  2017-01-04  7:21         ` [PATCH v5 8/8] doc: add MACsec offload into NIC feature list Tiwei Bie
                           ` (2 subsequent siblings)
  9 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-04  7:21 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

Add information about the flags reserved for PMD-specific API in
the release notes.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 doc/guides/rel_notes/release_17_02.rst | 12 ++++++++++++
 1 file changed, 12 insertions(+)

diff --git a/doc/guides/rel_notes/release_17_02.rst b/doc/guides/rel_notes/release_17_02.rst
index 34dba45..af71f39 100644
--- a/doc/guides/rel_notes/release_17_02.rst
+++ b/doc/guides/rel_notes/release_17_02.rst
@@ -58,6 +58,18 @@ New Features
   MACsec offload support. The declarations of the APIs and the definitions
   of the flags can be found in ``rte_pmd_ixgbe.h``.
 
+* **Reserved flags for PMD to implement the PMD-specific API.**
+
+  Following flags in mbuf and ethdev are reserved for PMD to define its
+  own flags when implementing the PMD-specific API.
+
+  * DEV_RX_OFFLOAD_RESERVED_0
+  * DEV_TX_OFFLOAD_RESERVED_0
+  * RTE_ETH_EVENT_RESERVED_0
+  * PKT_TX_RESERVED_0
+
+  You can refer to ``rte_pmd_ixgbe.h`` for an example.
+
 
 Resolved Issues
 ---------------
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v5 8/8] doc: add MACsec offload into NIC feature list
  2017-01-04  7:21       ` [PATCH v5 0/8] " Tiwei Bie
                           ` (6 preceding siblings ...)
  2017-01-04  7:21         ` [PATCH v5 7/8] doc: update the release notes for the reserved flags Tiwei Bie
@ 2017-01-04  7:21         ` Tiwei Bie
  2017-01-04  8:29         ` [PATCH v5 0/8] Add MACsec offload support for ixgbe Peng, Yuan
  2017-01-11  4:31         ` [PATCH v6 0/6] " Tiwei Bie
  9 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-04  7:21 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

Add MACsec offload into NIC feature list. And also update the
feature list for the supported drivers.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 doc/guides/nics/features/default.ini | 1 +
 doc/guides/nics/features/ixgbe.ini   | 1 +
 2 files changed, 2 insertions(+)

diff --git a/doc/guides/nics/features/default.ini b/doc/guides/nics/features/default.ini
index f1bf9bf..c412d6f 100644
--- a/doc/guides/nics/features/default.ini
+++ b/doc/guides/nics/features/default.ini
@@ -43,6 +43,7 @@ VLAN offload         =
 QinQ offload         =
 L3 checksum offload  =
 L4 checksum offload  =
+MACsec offload       =
 Inner L3 checksum    =
 Inner L4 checksum    =
 Packet type parsing  =
diff --git a/doc/guides/nics/features/ixgbe.ini b/doc/guides/nics/features/ixgbe.ini
index 4a5667f..24c02fb 100644
--- a/doc/guides/nics/features/ixgbe.ini
+++ b/doc/guides/nics/features/ixgbe.ini
@@ -36,6 +36,7 @@ VLAN offload         = Y
 QinQ offload         = Y
 L3 checksum offload  = Y
 L4 checksum offload  = Y
+MACsec offload       = Y
 Inner L3 checksum    = Y
 Inner L4 checksum    = Y
 Packet type parsing  = Y
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* Re: [PATCH v5 0/8] Add MACsec offload support for ixgbe
  2017-01-04  7:21       ` [PATCH v5 0/8] " Tiwei Bie
                           ` (7 preceding siblings ...)
  2017-01-04  7:21         ` [PATCH v5 8/8] doc: add MACsec offload into NIC feature list Tiwei Bie
@ 2017-01-04  8:29         ` Peng, Yuan
  2017-01-11  4:31         ` [PATCH v6 0/6] " Tiwei Bie
  9 siblings, 0 replies; 79+ messages in thread
From: Peng, Yuan @ 2017-01-04  8:29 UTC (permalink / raw)
  To: Bie, Tiwei, dev
  Cc: adrien.mazarguil, Lu, Wenzhuo, Mcnamara, John, olivier.matz,
	thomas.monjalon, Ananyev, Konstantin, Zhang, Helin, Dai, Wei,
	Wang, Xiao W

Tested-by: Peng Yuan <yuan.peng@intel.com>

- Tested Branch: master
- Tested Commit: eac901ce29be559b1bb5c5da33fe2bf5c0b4bfd6
- OS: Fedora24 4.5.5-300.fc24.x86_64
- GCC: gcc version 5.3.1 20151207
- CPU: Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
- NIC: Intel Corporation 82599ES 10-Gigabit SFI/SFP+ Network Connection [8086:10fb] 
- Default x86_64-native-linuxapp-gcc configuration
- Total 5 cases, 5 passed, 0 failed

- Prerequisites:
  1x Niantic NIC (2x 10G)
  2x IXIA ports (10G)

- Added commands:
  testpmd>set macsec offload (port_id) on encrypt (on|off) replay-protect (on|off)
  " Enable MACsec offload. "
  testpmd>set macsec offload (port_id) off
  " Disable MACsec offload. "
  testpmd>set macsec sc (tx|rx) (port_id) (mac) (pi)
  " Configure MACsec secure connection (SC). "
  testpmd>set macsec sa (tx|rx) (port_id) (idx) (an) (pn) (key)
  " Configure MACsec secure association (SA). "

- Test Case 1: MACsec packets send and receive
============================================

1. bind two ports to dpdk driver::
 ./tools/dpdk-devbind.py -b igb_uio 07:00.0 07:00.1

2. config the rx port
1). start the testpmd of rx port::
 ./testpmd -c 0xc --socket-mem 1024,1024 --file-prefix=rx -w 0000:07:00.1 \
 -- --port-topology=chained -i --crc-strip

2). set MACsec offload on::
 testpmd>set macsec offload 0 on encrypt on replay-protect on

3). set MACsec parameters as rx_port::
 testpmd>set macsec sc rx 0 00:00:00:00:00:01 0
 testpmd>set macsec sa rx 0 0 0 0 00112200000000000000000000000000

4). set MACsec parameters as tx_port::
 testpmd>set macsec sc tx 0 00:00:00:00:00:02 0
 testpmd>set macsec sa tx 0 0 0 0 00112200000000000000000000000000

5). set rxonly::
 testpmd>set fwd rxonly

6). start::
 testpmd>set promisc all on
 testpmd>start

3. config the tx port
1). start the testpmd of tx port::
 ./testpmd -c 0x30 --socket-mem 1024,1024 --file-prefix=tx -w 0000:07:00.0 \
 -- --port-topology=chained -i --crc-strip --txqflags=0x0

2). set MACsec offload on::
 testpmd>set macsec offload 0 on encrypt on replay-protect on

3). set MACsec parameters as tx_port::
 testpmd>set macsec sc tx 0 00:00:00:00:00:01 0
 testpmd>set macsec sa tx 0 0 0 0 00112200000000000000000000000000

4). set MACsec parameters as rx_port::
 testpmd>set macsec sc rx 0 00:00:00:00:00:02 0
 testpmd>set macsec sa rx 0 0 0 0 00112200000000000000000000000000

5). set txonly::
 testpmd>set fwd txonly

6). start::
 testpmd>start

4. check the result::
 testpmd>stop
 testpmd>show port xstats 0
stop the packet transmiting on tx_port first, then stop the packet receiving
on rx_port.
check the rx data and tx data:
tx_good_packets == rx_good_packets
out_pkts_encrypted == in_pkts_ok == tx_good_packets == rx_good_packets
out_octets_encrypted == in_octets_decrypted
out_octets_protected == in_octets_validated

 if you want to check the content of the packet, use the command::
 testpmd>set verbose 1
the received packets are Decrypted.
check the ol_flags:PKT_RX_IP_CKSUM_GOOD
check the content of the packet:
type=0x0800, the ptype of L2,L3,L4: L2_ETHER L3_IPV4 L4_UDP

Test Case 2: MACsec packets send and normal receive
===================================================

1. disable MACsec offload on rx port::
 testpmd>set macsec offload 0 off

2. start the the packets transfer

3. check the result::
 testpmd>stop
 testpmd>show port xstats 0
stop the testpmd on tx_port first, then stop the testpmd on rx_port.
the received packets are encrypted.
check the content of the packet:
type=0x88e5 sw ptype: L2_ETHER  - l2_len=14 - Receive queue=0x0
you can't find L3 and L4 infomation in the packet
in_octets_decrypted and in_octets_validated doesn't increase on last data
transfer.


Test Case 3: normal packet send and MACsec receive
==================================================

1. enable MACsec offload on rx port::
 testpmd>set macsec offload 0 on encrypt on replay-protect on

2. disable MACsec offload on tx port::
 testpmd>set macsec offload 0 off

3. start the the packets transfer

4. check the result::
 testpmd>stop
 testpmd>show port xstats 0
stop the testpmd on tx_port first, then stop the testpmd on rx_port.
the received packets are not encrypted.
check the content of the packet:
type=0x0800, the ptype of L2,L3,L4: L2_ETHER L3_IPV4 L4_UDP
in_octets_decrypted and out_pkts_encrypted doesn't increase on last data
transfer.


Test Case 4: MACsec send and receive with wrong parameters
==========================================================

1. don't add "--txqflags=0x0" in the tx_port command line.
   the MACsec offload can't work. the tx packets are normal packets.

2. set different pn on rx and tx port, then start the data transfer.

1) set the parameters as test case 1, start and stop the data transfer.
   check the result, rx port can receive and decrypt the packets normally.
2) reset the pn of tx port to 0::
    testpmd>set macsec sa tx 0 0 0 0 00112200000000000000000000000000
   rx port can receive the packets until the pn equals the pn of tx port::
    out_pkts_encrypted = in_pkts_late + in_pkts_ok

2. set different keys on rx and tx port, then start the data transfer::
    the RX-packets=0,
    in_octets_decrypted == out_octets_encrypted,
    in_pkts_notvalid == out_pkts_encrypted,
    in_pkts_ok=0,
    rx_good_packets=0

3. set different pi on rx and tx port(reset on rx_port), then start the data
   transfer::
    in_octets_decrypted == out_octets_encrypted,
    in_pkts_ok = 0,
    in_pkts_nosci == out_pkts_encrypted

4. set different an on rx and tx port, then start the data transfer::
    rx_good_packets=0,
    in_octets_decrypted == out_octets_encrypted,
    in_pkts_notusingsa == out_pkts_encrypted,
    in_pkts_ok=0,
    rx_good_packets=0

5. set different index on rx and tx port, then start the data transfer::
    in_octets_decrypted == out_octets_encrypted,
    in_pkts_ok == out_pkts_encrypted


Test Case 5: performance test of MACsec offload packets
==========================================================

1. tx linerate
   port0 connected to IXIA port5, port1 connected to IXIA port6, set port0
   MACsec offload on, set fwd mac.
   on IXIA side, start IXIA port6 transmit, start the IXIA capture.
   view the IXIA port5 captrued packet, the protocol is MACsec, the EtherType
   is 0x88E5, and the packet length is 96bytes, more than the normal packet
   32 bytes.
   The valid frames received rate is 10.78Mpps, and the %linerate is 100%.

2. rx linerate
   there are three ports 05:00.0 07:00.0 07:00.1. connect 07:00.0 to 07:00.1
   with cable, connect 05:00.0 to IXIA. bind the three ports to dpdk driver.
   start two testpmd::
    ./testpmd -c 0x3 --socket-mem 1024,1024 --file-prefix=rx -w 0000:07:00.1 \
    -- --port-topology=chained -i --crc-strip --txqflags=0x0

    testpmd>set macsec offload 0 on encrypt on replay-protect on
    testpmd>set macsec sc rx 0 00:00:00:00:00:01 0
    testpmd>set macsec sa rx 0 0 0 0 00112200000000000000000000000000
    testpmd>set macsec sc tx 0 00:00:00:00:00:02 0
    testpmd>set macsec sa tx 0 0 0 0 00112200000000000000000000000000
    testpmd>set fwd rxonly

    ./testpmd -c 0xc --socket-mem 1024,1024 --file-prefix=tx -b 0000:07:00.1 \
    -- --port-topology=chained -i --crc-strip --txqflags=0x0

    testpmd>set macsec offload 1 on encrypt on replay-protect on
    testpmd>set macsec sc rx 1 00:00:00:00:00:02 0
    testpmd>set macsec sa rx 1 0 0 0 00112200000000000000000000000000
    testpmd>set macsec sc tx 1 00:00:00:00:00:01 0
    testpmd>set macsec sa tx 1 0 0 0 00112200000000000000000000000000
    testpmd>set fwd mac

   start on both two testpmd.
   start data transmit from IXIA port, the frame size is 64bytes,
   the Ethertype is 0x0800. the rate is 14.88Mpps.
   check the linerate on rxonly port::
    testpmd>show port stats 0
   It shows "Rx-pps:     10775697", so the rx %linerate is 100%.
   check the MACsec packets number on tx side::
    testpmd>show port xstats 1
   on rx side::
    testpmd>show port xstats 0
   in_pkts_ok == out_pkts_encrypted




-----Original Message-----
From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Tiwei Bie
Sent: Wednesday, January 4, 2017 3:22 PM
To: dev@dpdk.org
Cc: adrien.mazarguil@6wind.com; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Mcnamara, John <john.mcnamara@intel.com>; olivier.matz@6wind.com; thomas.monjalon@6wind.com; Ananyev, Konstantin <konstantin.ananyev@intel.com>; Zhang, Helin <helin.zhang@intel.com>; Dai, Wei <wei.dai@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
Subject: [dpdk-dev] [PATCH v5 0/8] Add MACsec offload support for ixgbe

This patch set adds the MACsec offload support for ixgbe.
The testpmd is also updated to support MACsec cmds.

v2:
- Update the documents for testpmd;
- Update the release notes;
- Reuse the functions provided by base code;

v3:
- Add the missing parts of MACsec mbuf flag and reorganize the patch set;
- Add an ethdev event type for MACsec;
- Advertise the MACsec offload capabilities based on the mac type;
- Minor fixes and improvements;

v4:
- Reserve bits in mbuf and ethdev for PMD specific API;
- Use the reserved bits in PMD specific API;

v5:
- Add MACsec offload in the NIC feature list;
- Minor improvements on comments;

Tiwei Bie (8):
  mbuf: reserve a Tx offload flag for PMD-specific API
  ethdev: reserve an event type for PMD-specific API
  ethdev: reserve capability flags for PMD-specific API
  net/ixgbe: add MACsec offload support
  app/testpmd: add MACsec offload commands
  doc: add ixgbe specific APIs
  doc: update the release notes for the reserved flags
  doc: add MACsec offload into NIC feature list

 app/test-pmd/cmdline.c                      | 389 ++++++++++++++++++++++
 app/test-pmd/macfwd.c                       |   7 +
 app/test-pmd/macswap.c                      |   7 +
 app/test-pmd/testpmd.h                      |   2 +
 app/test-pmd/txonly.c                       |   7 +
 doc/guides/nics/features/default.ini        |   1 +
 doc/guides/nics/features/ixgbe.ini          |   1 +
 doc/guides/rel_notes/release_17_02.rst      |  18 ++
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  32 ++
 drivers/net/ixgbe/ixgbe_ethdev.c            | 481 +++++++++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_ethdev.h            |  45 +++
 drivers/net/ixgbe/ixgbe_rxtx.c              |   5 +
 drivers/net/ixgbe/rte_pmd_ixgbe.h           | 122 +++++++
 drivers/net/ixgbe/rte_pmd_ixgbe_version.map |  11 +
 lib/librte_ether/rte_ethdev.h               |   4 +
 lib/librte_mbuf/rte_mbuf.c                  |   2 +
 lib/librte_mbuf/rte_mbuf.h                  |   5 +
 17 files changed, 1134 insertions(+), 5 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
  2017-01-04  7:21         ` [PATCH v5 3/8] ethdev: reserve capability flags " Tiwei Bie
@ 2017-01-04 14:21           ` Ananyev, Konstantin
  2017-01-04 14:39             ` Tiwei Bie
  0 siblings, 1 reply; 79+ messages in thread
From: Ananyev, Konstantin @ 2017-01-04 14:21 UTC (permalink / raw)
  To: Bie, Tiwei, dev
  Cc: adrien.mazarguil, Lu, Wenzhuo, Mcnamara, John, olivier.matz,
	thomas.monjalon, Zhang, Helin, Dai, Wei, Wang, Xiao W

Hi Twei,

> -----Original Message-----
> From: Bie, Tiwei
> Sent: Wednesday, January 4, 2017 7:22 AM
> To: dev@dpdk.org
> Cc: adrien.mazarguil@6wind.com; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Mcnamara, John <john.mcnamara@intel.com>;
> olivier.matz@6wind.com; thomas.monjalon@6wind.com; Ananyev, Konstantin <konstantin.ananyev@intel.com>; Zhang, Helin
> <helin.zhang@intel.com>; Dai, Wei <wei.dai@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> Subject: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
> 
> Reserve a Tx capability flag and a Rx capability flag, that can be
> used by PMD to define its own capability flags when implementing the
> PMD-specific API.
> 
> Suggested-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
> Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
> Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> ---
>  lib/librte_ether/rte_ethdev.h | 2 ++
>  1 file changed, 2 insertions(+)
> 
> diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
> index d465825..8800b39 100644
> --- a/lib/librte_ether/rte_ethdev.h
> +++ b/lib/librte_ether/rte_ethdev.h
> @@ -857,6 +857,7 @@ struct rte_eth_conf {
>  #define DEV_RX_OFFLOAD_TCP_LRO     0x00000010
>  #define DEV_RX_OFFLOAD_QINQ_STRIP  0x00000020
>  #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
> +#define DEV_RX_OFFLOAD_RESERVED_0  0x00000080 /**< Used for PMD-specific API. */
> 
>  /**
>   * TX offload capabilities of a device.
> @@ -874,6 +875,7 @@ struct rte_eth_conf {
>  #define DEV_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
>  #define DEV_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
>  #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
> +#define DEV_TX_OFFLOAD_RESERVED_0  0x00002000 /**< Used for PMD-specific API. */
> 
>  /**
>   * Ethernet device information
> --
> 2.7.4

I am not sure how that supposed to work and how user should know that DEV_RX_OFFLOAD_RESERVED_0  
is actually a MACSEC for ixgbe?
Another question what to do if you would like to create a bonded device over two devices with different NIC types?
As I understand you can end up in situation when  DEV_RX_OFFLOAD_RESERVED_0  would mean different capabilities.
Why not to have this MACSEC capability and ol_flag value as generic ones, as you have them in previous versions of your patch?
Konstantin

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
  2017-01-04 14:21           ` Ananyev, Konstantin
@ 2017-01-04 14:39             ` Tiwei Bie
  2017-01-04 15:14               ` Ananyev, Konstantin
  0 siblings, 1 reply; 79+ messages in thread
From: Tiwei Bie @ 2017-01-04 14:39 UTC (permalink / raw)
  To: Ananyev, Konstantin
  Cc: dev, adrien.mazarguil, Lu, Wenzhuo, Mcnamara, John, olivier.matz,
	thomas.monjalon, Zhang, Helin, Dai, Wei, Wang, Xiao W

On Wed, Jan 04, 2017 at 10:21:08PM +0800, Ananyev, Konstantin wrote:
> Hi Twei,
> 
> > -----Original Message-----
> > From: Bie, Tiwei
> > Sent: Wednesday, January 4, 2017 7:22 AM
> > To: dev@dpdk.org
> > Cc: adrien.mazarguil@6wind.com; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Mcnamara, John <john.mcnamara@intel.com>;
> > olivier.matz@6wind.com; thomas.monjalon@6wind.com; Ananyev, Konstantin <konstantin.ananyev@intel.com>; Zhang, Helin
> > <helin.zhang@intel.com>; Dai, Wei <wei.dai@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> > Subject: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
> > 
> > Reserve a Tx capability flag and a Rx capability flag, that can be
> > used by PMD to define its own capability flags when implementing the
> > PMD-specific API.
> > 
> > Suggested-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
> > Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
> > Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> > ---
> >  lib/librte_ether/rte_ethdev.h | 2 ++
> >  1 file changed, 2 insertions(+)
> > 
> > diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
> > index d465825..8800b39 100644
> > --- a/lib/librte_ether/rte_ethdev.h
> > +++ b/lib/librte_ether/rte_ethdev.h
> > @@ -857,6 +857,7 @@ struct rte_eth_conf {
> >  #define DEV_RX_OFFLOAD_TCP_LRO     0x00000010
> >  #define DEV_RX_OFFLOAD_QINQ_STRIP  0x00000020
> >  #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
> > +#define DEV_RX_OFFLOAD_RESERVED_0  0x00000080 /**< Used for PMD-specific API. */
> > 
> >  /**
> >   * TX offload capabilities of a device.
> > @@ -874,6 +875,7 @@ struct rte_eth_conf {
> >  #define DEV_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
> >  #define DEV_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
> >  #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
> > +#define DEV_TX_OFFLOAD_RESERVED_0  0x00002000 /**< Used for PMD-specific API. */
> > 
> >  /**
> >   * Ethernet device information
> > --
> > 2.7.4
> 
> I am not sure how that supposed to work and how user should know that DEV_RX_OFFLOAD_RESERVED_0  
> is actually a MACSEC for ixgbe?

Users are not supposed to use DEV_RX_OFFLOAD_RESERVED_0, instead, they
should use the capabilities and the likes defined in rte_pmd_ixgbe.h
where the PMD-specifics APIs are declared:

/**
 * If these flags are advertised by the PMD, the NIC supports the MACsec
 * offload. The incoming MACsec traffics can be offloaded transparently
 * after the MACsec offload is configured correctly by the application.
 * And the application can set the PKT_TX_IXGBE_MACSEC flag in mbufs to
 * enable the MACsec offload for the packets to be transmitted.
 */
#define DEV_RX_OFFLOAD_IXGBE_MACSEC_STRIP	DEV_RX_OFFLOAD_RESERVED_0
#define DEV_TX_OFFLOAD_IXGBE_MACSEC_INSERT	DEV_TX_OFFLOAD_RESERVED_0

/**
 * This event will occur when the PN counter in a MACsec connection
 * reach the exhaustion threshold.
 */
#define RTE_ETH_EVENT_IXGBE_MACSEC		RTE_ETH_EVENT_RESERVED_0

/**
 * Offload the MACsec. This flag must be set by the application in mbuf
 * to enable this offload feature for a packet to be transmitted.
 */
#define PKT_TX_IXGBE_MACSEC			PKT_TX_RESERVED_0

PMD-specific APIs can only be used on the corresponding driver/device,
so different PMD can share the same reserved bit to represent different
things when implementing their own PMD-specific APIs.

> Another question what to do if you would like to create a bonded device over two devices with different NIC types?
> As I understand you can end up in situation when  DEV_RX_OFFLOAD_RESERVED_0  would mean different capabilities.
> Why not to have this MACSEC capability and ol_flag value as generic ones, as you have them in previous versions of your patch?

Those flags are only used in PMD-specific APIs. I don't think we could
use the PMD-specific APIs provided by a certain PMD on a bonded device.

Thanks & regards,
Tiwei Bie

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
  2017-01-04 14:39             ` Tiwei Bie
@ 2017-01-04 15:14               ` Ananyev, Konstantin
  2017-01-04 17:00                 ` Tiwei Bie
  0 siblings, 1 reply; 79+ messages in thread
From: Ananyev, Konstantin @ 2017-01-04 15:14 UTC (permalink / raw)
  To: Bie, Tiwei
  Cc: dev, adrien.mazarguil, Lu, Wenzhuo, Mcnamara, John, olivier.matz,
	thomas.monjalon, Zhang, Helin, Dai, Wei, Wang, Xiao W



> -----Original Message-----
> From: Bie, Tiwei
> Sent: Wednesday, January 4, 2017 2:39 PM
> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Cc: dev@dpdk.org; adrien.mazarguil@6wind.com; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Mcnamara, John <john.mcnamara@intel.com>;
> olivier.matz@6wind.com; thomas.monjalon@6wind.com; Zhang, Helin <helin.zhang@intel.com>; Dai, Wei <wei.dai@intel.com>; Wang,
> Xiao W <xiao.w.wang@intel.com>
> Subject: Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
> 
> On Wed, Jan 04, 2017 at 10:21:08PM +0800, Ananyev, Konstantin wrote:
> > Hi Twei,
> >
> > > -----Original Message-----
> > > From: Bie, Tiwei
> > > Sent: Wednesday, January 4, 2017 7:22 AM
> > > To: dev@dpdk.org
> > > Cc: adrien.mazarguil@6wind.com; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Mcnamara, John <john.mcnamara@intel.com>;
> > > olivier.matz@6wind.com; thomas.monjalon@6wind.com; Ananyev, Konstantin <konstantin.ananyev@intel.com>; Zhang, Helin
> > > <helin.zhang@intel.com>; Dai, Wei <wei.dai@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> > > Subject: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
> > >
> > > Reserve a Tx capability flag and a Rx capability flag, that can be
> > > used by PMD to define its own capability flags when implementing the
> > > PMD-specific API.
> > >
> > > Suggested-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
> > > Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
> > > Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> > > ---
> > >  lib/librte_ether/rte_ethdev.h | 2 ++
> > >  1 file changed, 2 insertions(+)
> > >
> > > diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
> > > index d465825..8800b39 100644
> > > --- a/lib/librte_ether/rte_ethdev.h
> > > +++ b/lib/librte_ether/rte_ethdev.h
> > > @@ -857,6 +857,7 @@ struct rte_eth_conf {
> > >  #define DEV_RX_OFFLOAD_TCP_LRO     0x00000010
> > >  #define DEV_RX_OFFLOAD_QINQ_STRIP  0x00000020
> > >  #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
> > > +#define DEV_RX_OFFLOAD_RESERVED_0  0x00000080 /**< Used for PMD-specific API. */
> > >
> > >  /**
> > >   * TX offload capabilities of a device.
> > > @@ -874,6 +875,7 @@ struct rte_eth_conf {
> > >  #define DEV_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
> > >  #define DEV_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
> > >  #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
> > > +#define DEV_TX_OFFLOAD_RESERVED_0  0x00002000 /**< Used for PMD-specific API. */
> > >
> > >  /**
> > >   * Ethernet device information
> > > --
> > > 2.7.4
> >
> > I am not sure how that supposed to work and how user should know that DEV_RX_OFFLOAD_RESERVED_0
> > is actually a MACSEC for ixgbe?
> 
> Users are not supposed to use DEV_RX_OFFLOAD_RESERVED_0, instead, they
> should use the capabilities and the likes defined in rte_pmd_ixgbe.h
> where the PMD-specifics APIs are declared:
> 
> /**
>  * If these flags are advertised by the PMD, the NIC supports the MACsec
>  * offload. The incoming MACsec traffics can be offloaded transparently
>  * after the MACsec offload is configured correctly by the application.
>  * And the application can set the PKT_TX_IXGBE_MACSEC flag in mbufs to
>  * enable the MACsec offload for the packets to be transmitted.
>  */
> #define DEV_RX_OFFLOAD_IXGBE_MACSEC_STRIP	DEV_RX_OFFLOAD_RESERVED_0
> #define DEV_TX_OFFLOAD_IXGBE_MACSEC_INSERT	DEV_TX_OFFLOAD_RESERVED_0
> 
> /**
>  * This event will occur when the PN counter in a MACsec connection
>  * reach the exhaustion threshold.
>  */
> #define RTE_ETH_EVENT_IXGBE_MACSEC		RTE_ETH_EVENT_RESERVED_0
> 
> /**
>  * Offload the MACsec. This flag must be set by the application in mbuf
>  * to enable this offload feature for a packet to be transmitted.
>  */
> #define PKT_TX_IXGBE_MACSEC			PKT_TX_RESERVED_0
> 
> PMD-specific APIs can only be used on the corresponding driver/device,
> so different PMD can share the same reserved bit to represent different
> things when implementing their own PMD-specific APIs.

Ok, and why do we need it?
Why we can't just have PKT_TX_MACSEC straightway?
What are you trying to gain here?
Is it just for future opportunity to save an extra bit in mbuf.ol_flags?
I don't think we are short of bits here right now, and we don't consume them exra-fast
to start to worry about it.           
Again, if it *really* would be for ixgbe only forever, and y don't want to waste a bit in tx_olflags,
why not to introduce device specific ol_flags in mbuf second cache line?
Probably uint16_t would be enough for that.

> 
> > Another question what to do if you would like to create a bonded device over two devices with different NIC types?
> > As I understand you can end up in situation when  DEV_RX_OFFLOAD_RESERVED_0  would mean different capabilities.
> > Why not to have this MACSEC capability and ol_flag value as generic ones, as you have them in previous versions of your patch?
> 
> Those flags are only used in PMD-specific APIs. I don't think we could
> use the PMD-specific APIs provided by a certain PMD on a bonded device.

I understand that.
My question was: suppose user would like to create a bonded device over 2 NICs.
One of them is ixgbe, while other would be some other type.
In future get_dev_info() for each of them might return DEV_RX_OFFLOAD_RESERVED_0  bit as set.
But it would mean completely different thing.
How bonded device would know that to deal properly?

Another example - user has 2 NICs of different type and would like to send the same packet on both of them simultaneously.
As PKT_TX_RESERVED might mean different things for these devices, and user would like to use let say
PKT_TX_IXGBE_MACSEC on one of them, he would need to do a copy of them, instead just increment a refcnt. 

Similar issues might arise at RX handling: user got a packet with PKT_RX_RESERVED_0 set.
What does it really mean if there are different NIC types in the system?
The only way to answer that question, as I can see,  is to keep track from what NIC that packet was received.
Which I suppose, is not always convenient.

Konstantin

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
  2017-01-04 15:14               ` Ananyev, Konstantin
@ 2017-01-04 17:00                 ` Tiwei Bie
  2017-01-04 17:44                   ` Ananyev, Konstantin
  0 siblings, 1 reply; 79+ messages in thread
From: Tiwei Bie @ 2017-01-04 17:00 UTC (permalink / raw)
  To: Ananyev, Konstantin
  Cc: dev, adrien.mazarguil, Lu, Wenzhuo, Mcnamara, John, olivier.matz,
	thomas.monjalon, Zhang, Helin, Dai, Wei, Wang, Xiao W

On Wed, Jan 04, 2017 at 11:14:25PM +0800, Ananyev, Konstantin wrote:
> 
> 
> > -----Original Message-----
> > From: Bie, Tiwei
> > Sent: Wednesday, January 4, 2017 2:39 PM
> > To: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> > Cc: dev@dpdk.org; adrien.mazarguil@6wind.com; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Mcnamara, John <john.mcnamara@intel.com>;
> > olivier.matz@6wind.com; thomas.monjalon@6wind.com; Zhang, Helin <helin.zhang@intel.com>; Dai, Wei <wei.dai@intel.com>; Wang,
> > Xiao W <xiao.w.wang@intel.com>
> > Subject: Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
> > 
> > On Wed, Jan 04, 2017 at 10:21:08PM +0800, Ananyev, Konstantin wrote:
> > > Hi Twei,
> > >
> > > > -----Original Message-----
> > > > From: Bie, Tiwei
> > > > Sent: Wednesday, January 4, 2017 7:22 AM
> > > > To: dev@dpdk.org
> > > > Cc: adrien.mazarguil@6wind.com; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Mcnamara, John <john.mcnamara@intel.com>;
> > > > olivier.matz@6wind.com; thomas.monjalon@6wind.com; Ananyev, Konstantin <konstantin.ananyev@intel.com>; Zhang, Helin
> > > > <helin.zhang@intel.com>; Dai, Wei <wei.dai@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> > > > Subject: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
> > > >
> > > > Reserve a Tx capability flag and a Rx capability flag, that can be
> > > > used by PMD to define its own capability flags when implementing the
> > > > PMD-specific API.
> > > >
> > > > Suggested-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
> > > > Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
> > > > Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> > > > ---
> > > >  lib/librte_ether/rte_ethdev.h | 2 ++
> > > >  1 file changed, 2 insertions(+)
> > > >
> > > > diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
> > > > index d465825..8800b39 100644
> > > > --- a/lib/librte_ether/rte_ethdev.h
> > > > +++ b/lib/librte_ether/rte_ethdev.h
> > > > @@ -857,6 +857,7 @@ struct rte_eth_conf {
> > > >  #define DEV_RX_OFFLOAD_TCP_LRO     0x00000010
> > > >  #define DEV_RX_OFFLOAD_QINQ_STRIP  0x00000020
> > > >  #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
> > > > +#define DEV_RX_OFFLOAD_RESERVED_0  0x00000080 /**< Used for PMD-specific API. */
> > > >
> > > >  /**
> > > >   * TX offload capabilities of a device.
> > > > @@ -874,6 +875,7 @@ struct rte_eth_conf {
> > > >  #define DEV_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
> > > >  #define DEV_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
> > > >  #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
> > > > +#define DEV_TX_OFFLOAD_RESERVED_0  0x00002000 /**< Used for PMD-specific API. */
> > > >
> > > >  /**
> > > >   * Ethernet device information
> > > > --
> > > > 2.7.4
> > >
> > > I am not sure how that supposed to work and how user should know that DEV_RX_OFFLOAD_RESERVED_0
> > > is actually a MACSEC for ixgbe?
> > 
> > Users are not supposed to use DEV_RX_OFFLOAD_RESERVED_0, instead, they
> > should use the capabilities and the likes defined in rte_pmd_ixgbe.h
> > where the PMD-specifics APIs are declared:
> > 
> > /**
> >  * If these flags are advertised by the PMD, the NIC supports the MACsec
> >  * offload. The incoming MACsec traffics can be offloaded transparently
> >  * after the MACsec offload is configured correctly by the application.
> >  * And the application can set the PKT_TX_IXGBE_MACSEC flag in mbufs to
> >  * enable the MACsec offload for the packets to be transmitted.
> >  */
> > #define DEV_RX_OFFLOAD_IXGBE_MACSEC_STRIP	DEV_RX_OFFLOAD_RESERVED_0
> > #define DEV_TX_OFFLOAD_IXGBE_MACSEC_INSERT	DEV_TX_OFFLOAD_RESERVED_0
> > 
> > /**
> >  * This event will occur when the PN counter in a MACsec connection
> >  * reach the exhaustion threshold.
> >  */
> > #define RTE_ETH_EVENT_IXGBE_MACSEC		RTE_ETH_EVENT_RESERVED_0
> > 
> > /**
> >  * Offload the MACsec. This flag must be set by the application in mbuf
> >  * to enable this offload feature for a packet to be transmitted.
> >  */
> > #define PKT_TX_IXGBE_MACSEC			PKT_TX_RESERVED_0
> > 
> > PMD-specific APIs can only be used on the corresponding driver/device,
> > so different PMD can share the same reserved bit to represent different
> > things when implementing their own PMD-specific APIs.
> 
> Ok, and why do we need it?
> Why we can't just have PKT_TX_MACSEC straightway?
> What are you trying to gain here?
> Is it just for future opportunity to save an extra bit in mbuf.ol_flags?
> I don't think we are short of bits here right now, and we don't consume them exra-fast
> to start to worry about it.
> Again, if it *really* would be for ixgbe only forever, and y don't want to waste a bit in tx_olflags,
> why not to introduce device specific ol_flags in mbuf second cache line?
> Probably uint16_t would be enough for that.
> 
> > 
> > > Another question what to do if you would like to create a bonded device over two devices with different NIC types?
> > > As I understand you can end up in situation when  DEV_RX_OFFLOAD_RESERVED_0  would mean different capabilities.
> > > Why not to have this MACSEC capability and ol_flag value as generic ones, as you have them in previous versions of your patch?
> > 
> > Those flags are only used in PMD-specific APIs. I don't think we could
> > use the PMD-specific APIs provided by a certain PMD on a bonded device.
> 
> I understand that.
> My question was: suppose user would like to create a bonded device over 2 NICs.
> One of them is ixgbe, while other would be some other type.
> In future get_dev_info() for each of them might return DEV_RX_OFFLOAD_RESERVED_0  bit as set.
> But it would mean completely different thing.
> How bonded device would know that to deal properly?
> 
> Another example - user has 2 NICs of different type and would like to send the same packet on both of them simultaneously.
> As PKT_TX_RESERVED might mean different things for these devices, and user would like to use let say
> PKT_TX_IXGBE_MACSEC on one of them, he would need to do a copy of them, instead just increment a refcnt. 
> 
> Similar issues might arise at RX handling: user got a packet with PKT_RX_RESERVED_0 set.
> What does it really mean if there are different NIC types in the system?
> The only way to answer that question, as I can see,  is to keep track from what NIC that packet was received.
> Which I suppose, is not always convenient.
> 

The main purpose is to put the PMD-specific APIs in a separate
namespace instead of mixing the PMD-specific APIs and global APIs
up, and also save the bits in mbuf.ol_flags.

There are other ways to achieve this goal, such as introducing
the PMD specific ol_flags in mbuf second cache line as you said.
I just thought defining some reserved bits seems to be the most
simple way which won't introduce many changes.

What's your suggestions? Should I just revert the changes and
define the generic flags directly?

Thanks & regards,
Tiwei Bie

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
  2017-01-04 17:00                 ` Tiwei Bie
@ 2017-01-04 17:44                   ` Ananyev, Konstantin
  2017-01-04 23:56                     ` Tiwei Bie
  0 siblings, 1 reply; 79+ messages in thread
From: Ananyev, Konstantin @ 2017-01-04 17:44 UTC (permalink / raw)
  To: Bie, Tiwei
  Cc: dev, adrien.mazarguil, Lu, Wenzhuo, Mcnamara, John, olivier.matz,
	thomas.monjalon, Zhang, Helin, Dai, Wei, Wang, Xiao W



> -----Original Message-----
> From: Bie, Tiwei
> Sent: Wednesday, January 4, 2017 5:00 PM
> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Cc: dev@dpdk.org; adrien.mazarguil@6wind.com; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Mcnamara, John <john.mcnamara@intel.com>;
> olivier.matz@6wind.com; thomas.monjalon@6wind.com; Zhang, Helin <helin.zhang@intel.com>; Dai, Wei <wei.dai@intel.com>; Wang,
> Xiao W <xiao.w.wang@intel.com>
> Subject: Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
> 
> On Wed, Jan 04, 2017 at 11:14:25PM +0800, Ananyev, Konstantin wrote:
> >
> >
> > > -----Original Message-----
> > > From: Bie, Tiwei
> > > Sent: Wednesday, January 4, 2017 2:39 PM
> > > To: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> > > Cc: dev@dpdk.org; adrien.mazarguil@6wind.com; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Mcnamara, John
> <john.mcnamara@intel.com>;
> > > olivier.matz@6wind.com; thomas.monjalon@6wind.com; Zhang, Helin <helin.zhang@intel.com>; Dai, Wei <wei.dai@intel.com>;
> Wang,
> > > Xiao W <xiao.w.wang@intel.com>
> > > Subject: Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
> > >
> > > On Wed, Jan 04, 2017 at 10:21:08PM +0800, Ananyev, Konstantin wrote:
> > > > Hi Twei,
> > > >
> > > > > -----Original Message-----
> > > > > From: Bie, Tiwei
> > > > > Sent: Wednesday, January 4, 2017 7:22 AM
> > > > > To: dev@dpdk.org
> > > > > Cc: adrien.mazarguil@6wind.com; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Mcnamara, John <john.mcnamara@intel.com>;
> > > > > olivier.matz@6wind.com; thomas.monjalon@6wind.com; Ananyev, Konstantin <konstantin.ananyev@intel.com>; Zhang, Helin
> > > > > <helin.zhang@intel.com>; Dai, Wei <wei.dai@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> > > > > Subject: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
> > > > >
> > > > > Reserve a Tx capability flag and a Rx capability flag, that can be
> > > > > used by PMD to define its own capability flags when implementing the
> > > > > PMD-specific API.
> > > > >
> > > > > Suggested-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
> > > > > Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
> > > > > Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> > > > > ---
> > > > >  lib/librte_ether/rte_ethdev.h | 2 ++
> > > > >  1 file changed, 2 insertions(+)
> > > > >
> > > > > diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
> > > > > index d465825..8800b39 100644
> > > > > --- a/lib/librte_ether/rte_ethdev.h
> > > > > +++ b/lib/librte_ether/rte_ethdev.h
> > > > > @@ -857,6 +857,7 @@ struct rte_eth_conf {
> > > > >  #define DEV_RX_OFFLOAD_TCP_LRO     0x00000010
> > > > >  #define DEV_RX_OFFLOAD_QINQ_STRIP  0x00000020
> > > > >  #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
> > > > > +#define DEV_RX_OFFLOAD_RESERVED_0  0x00000080 /**< Used for PMD-specific API. */
> > > > >
> > > > >  /**
> > > > >   * TX offload capabilities of a device.
> > > > > @@ -874,6 +875,7 @@ struct rte_eth_conf {
> > > > >  #define DEV_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
> > > > >  #define DEV_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
> > > > >  #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
> > > > > +#define DEV_TX_OFFLOAD_RESERVED_0  0x00002000 /**< Used for PMD-specific API. */
> > > > >
> > > > >  /**
> > > > >   * Ethernet device information
> > > > > --
> > > > > 2.7.4
> > > >
> > > > I am not sure how that supposed to work and how user should know that DEV_RX_OFFLOAD_RESERVED_0
> > > > is actually a MACSEC for ixgbe?
> > >
> > > Users are not supposed to use DEV_RX_OFFLOAD_RESERVED_0, instead, they
> > > should use the capabilities and the likes defined in rte_pmd_ixgbe.h
> > > where the PMD-specifics APIs are declared:
> > >
> > > /**
> > >  * If these flags are advertised by the PMD, the NIC supports the MACsec
> > >  * offload. The incoming MACsec traffics can be offloaded transparently
> > >  * after the MACsec offload is configured correctly by the application.
> > >  * And the application can set the PKT_TX_IXGBE_MACSEC flag in mbufs to
> > >  * enable the MACsec offload for the packets to be transmitted.
> > >  */
> > > #define DEV_RX_OFFLOAD_IXGBE_MACSEC_STRIP	DEV_RX_OFFLOAD_RESERVED_0
> > > #define DEV_TX_OFFLOAD_IXGBE_MACSEC_INSERT	DEV_TX_OFFLOAD_RESERVED_0
> > >
> > > /**
> > >  * This event will occur when the PN counter in a MACsec connection
> > >  * reach the exhaustion threshold.
> > >  */
> > > #define RTE_ETH_EVENT_IXGBE_MACSEC		RTE_ETH_EVENT_RESERVED_0
> > >
> > > /**
> > >  * Offload the MACsec. This flag must be set by the application in mbuf
> > >  * to enable this offload feature for a packet to be transmitted.
> > >  */
> > > #define PKT_TX_IXGBE_MACSEC			PKT_TX_RESERVED_0
> > >
> > > PMD-specific APIs can only be used on the corresponding driver/device,
> > > so different PMD can share the same reserved bit to represent different
> > > things when implementing their own PMD-specific APIs.
> >
> > Ok, and why do we need it?
> > Why we can't just have PKT_TX_MACSEC straightway?
> > What are you trying to gain here?
> > Is it just for future opportunity to save an extra bit in mbuf.ol_flags?
> > I don't think we are short of bits here right now, and we don't consume them exra-fast
> > to start to worry about it.
> > Again, if it *really* would be for ixgbe only forever, and y don't want to waste a bit in tx_olflags,
> > why not to introduce device specific ol_flags in mbuf second cache line?
> > Probably uint16_t would be enough for that.
> >
> > >
> > > > Another question what to do if you would like to create a bonded device over two devices with different NIC types?
> > > > As I understand you can end up in situation when  DEV_RX_OFFLOAD_RESERVED_0  would mean different capabilities.
> > > > Why not to have this MACSEC capability and ol_flag value as generic ones, as you have them in previous versions of your patch?
> > >
> > > Those flags are only used in PMD-specific APIs. I don't think we could
> > > use the PMD-specific APIs provided by a certain PMD on a bonded device.
> >
> > I understand that.
> > My question was: suppose user would like to create a bonded device over 2 NICs.
> > One of them is ixgbe, while other would be some other type.
> > In future get_dev_info() for each of them might return DEV_RX_OFFLOAD_RESERVED_0  bit as set.
> > But it would mean completely different thing.
> > How bonded device would know that to deal properly?
> >
> > Another example - user has 2 NICs of different type and would like to send the same packet on both of them simultaneously.
> > As PKT_TX_RESERVED might mean different things for these devices, and user would like to use let say
> > PKT_TX_IXGBE_MACSEC on one of them, he would need to do a copy of them, instead just increment a refcnt.
> >
> > Similar issues might arise at RX handling: user got a packet with PKT_RX_RESERVED_0 set.
> > What does it really mean if there are different NIC types in the system?
> > The only way to answer that question, as I can see,  is to keep track from what NIC that packet was received.
> > Which I suppose, is not always convenient.
> >
> 
> The main purpose is to put the PMD-specific APIs in a separate
> namespace instead of mixing the PMD-specific APIs and global APIs
> up, and also save the bits in mbuf.ol_flags.
> 
> There are other ways to achieve this goal, such as introducing
> the PMD specific ol_flags in mbuf second cache line as you said.
> I just thought defining some reserved bits seems to be the most
> simple way which won't introduce many changes.
> 
> What's your suggestions? Should I just revert the changes and
> define the generic flags directly?

Yes, that would be my preference.
As I said above - spending extra bit in ol_flags  doesn't look like a big problem to me.
In return there would be no need to consider how to handle all that confusing scenarios in future.
Konstantin


> 
> Thanks & regards,
> Tiwei Bie

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
  2017-01-04 17:44                   ` Ananyev, Konstantin
@ 2017-01-04 23:56                     ` Tiwei Bie
  2017-01-05  8:33                       ` Adrien Mazarguil
  0 siblings, 1 reply; 79+ messages in thread
From: Tiwei Bie @ 2017-01-04 23:56 UTC (permalink / raw)
  To: Ananyev, Konstantin
  Cc: dev, adrien.mazarguil, Lu, Wenzhuo, Mcnamara, John, olivier.matz,
	thomas.monjalon, Zhang, Helin, Dai, Wei, Wang, Xiao W

On Thu, Jan 05, 2017 at 01:44:18AM +0800, Ananyev, Konstantin wrote:
[...]
> > >
> > > I understand that.
> > > My question was: suppose user would like to create a bonded device over 2 NICs.
> > > One of them is ixgbe, while other would be some other type.
> > > In future get_dev_info() for each of them might return DEV_RX_OFFLOAD_RESERVED_0  bit as set.
> > > But it would mean completely different thing.
> > > How bonded device would know that to deal properly?
> > >
> > > Another example - user has 2 NICs of different type and would like to send the same packet on both of them simultaneously.
> > > As PKT_TX_RESERVED might mean different things for these devices, and user would like to use let say
> > > PKT_TX_IXGBE_MACSEC on one of them, he would need to do a copy of them, instead just increment a refcnt.
> > >
> > > Similar issues might arise at RX handling: user got a packet with PKT_RX_RESERVED_0 set.
> > > What does it really mean if there are different NIC types in the system?
> > > The only way to answer that question, as I can see,  is to keep track from what NIC that packet was received.
> > > Which I suppose, is not always convenient.
> > >
> > 
> > The main purpose is to put the PMD-specific APIs in a separate
> > namespace instead of mixing the PMD-specific APIs and global APIs
> > up, and also save the bits in mbuf.ol_flags.
> > 
> > There are other ways to achieve this goal, such as introducing
> > the PMD specific ol_flags in mbuf second cache line as you said.
> > I just thought defining some reserved bits seems to be the most
> > simple way which won't introduce many changes.
> > 
> > What's your suggestions? Should I just revert the changes and
> > define the generic flags directly?
> 
> Yes, that would be my preference.
> As I said above - spending extra bit in ol_flags  doesn't look like a big problem to me.
> In return there would be no need to consider how to handle all that confusing scenarios in future.

Okay. I'll update my patches. Thanks a lot for your comments.

Thanks & regards,
Tiwei Bie

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
  2017-01-04 23:56                     ` Tiwei Bie
@ 2017-01-05  8:33                       ` Adrien Mazarguil
  2017-01-05 10:05                         ` Tiwei Bie
  2017-01-05 11:32                         ` Ananyev, Konstantin
  0 siblings, 2 replies; 79+ messages in thread
From: Adrien Mazarguil @ 2017-01-05  8:33 UTC (permalink / raw)
  To: Tiwei Bie
  Cc: Ananyev, Konstantin, dev, Lu, Wenzhuo, Mcnamara, John,
	olivier.matz, thomas.monjalon, Zhang, Helin, Dai, Wei, Wang,
	Xiao W

On Thu, Jan 05, 2017 at 07:56:08AM +0800, Tiwei Bie wrote:
> On Thu, Jan 05, 2017 at 01:44:18AM +0800, Ananyev, Konstantin wrote:
> [...]
> > > >
> > > > I understand that.
> > > > My question was: suppose user would like to create a bonded device over 2 NICs.
> > > > One of them is ixgbe, while other would be some other type.
> > > > In future get_dev_info() for each of them might return DEV_RX_OFFLOAD_RESERVED_0  bit as set.
> > > > But it would mean completely different thing.
> > > > How bonded device would know that to deal properly?
> > > >
> > > > Another example - user has 2 NICs of different type and would like to send the same packet on both of them simultaneously.
> > > > As PKT_TX_RESERVED might mean different things for these devices, and user would like to use let say
> > > > PKT_TX_IXGBE_MACSEC on one of them, he would need to do a copy of them, instead just increment a refcnt.
> > > >
> > > > Similar issues might arise at RX handling: user got a packet with PKT_RX_RESERVED_0 set.
> > > > What does it really mean if there are different NIC types in the system?
> > > > The only way to answer that question, as I can see,  is to keep track from what NIC that packet was received.
> > > > Which I suppose, is not always convenient.
> > > >
> > > 
> > > The main purpose is to put the PMD-specific APIs in a separate
> > > namespace instead of mixing the PMD-specific APIs and global APIs
> > > up, and also save the bits in mbuf.ol_flags.
> > > 
> > > There are other ways to achieve this goal, such as introducing
> > > the PMD specific ol_flags in mbuf second cache line as you said.
> > > I just thought defining some reserved bits seems to be the most
> > > simple way which won't introduce many changes.
> > > 
> > > What's your suggestions? Should I just revert the changes and
> > > define the generic flags directly?
> > 
> > Yes, that would be my preference.
> > As I said above - spending extra bit in ol_flags  doesn't look like a big problem to me.
> > In return there would be no need to consider how to handle all that confusing scenarios in future.
> 
> Okay. I'll update my patches. Thanks a lot for your comments.

Well, I do not agree with Konstantin (no one saw this coming eh?) and do not
think you need to update your series again.

PMD-specific symbols have nothing to do in the global namespace in my
opinion, they are not versioned and may evolve without notice. Neither
applications nor the bonding PMD can rely on them. That's the trade-off.

Therefore until APIs are made global, the safe compromise is to define
neutral, reserved symbols that any PMD can use to implement their own
temporary APIs for testing purposes. These can be renamed later without
changing their value as long as a single PMD uses them.

-- 
Adrien Mazarguil
6WIND

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
  2017-01-05  8:33                       ` Adrien Mazarguil
@ 2017-01-05 10:05                         ` Tiwei Bie
  2017-01-05 11:32                         ` Ananyev, Konstantin
  1 sibling, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-05 10:05 UTC (permalink / raw)
  To: thomas.monjalon, olivier.matz
  Cc: Adrien Mazarguil, Ananyev, Konstantin, dev, Lu, Wenzhuo,
	Mcnamara, John, Zhang, Helin, Dai, Wei, Wang, Xiao W

On Thu, Jan 05, 2017 at 09:33:22AM +0100, Adrien Mazarguil wrote:
> On Thu, Jan 05, 2017 at 07:56:08AM +0800, Tiwei Bie wrote:
> > On Thu, Jan 05, 2017 at 01:44:18AM +0800, Ananyev, Konstantin wrote:
> > [...]
> > > > >
> > > > > I understand that.
> > > > > My question was: suppose user would like to create a bonded device over 2 NICs.
> > > > > One of them is ixgbe, while other would be some other type.
> > > > > In future get_dev_info() for each of them might return DEV_RX_OFFLOAD_RESERVED_0  bit as set.
> > > > > But it would mean completely different thing.
> > > > > How bonded device would know that to deal properly?
> > > > >
> > > > > Another example - user has 2 NICs of different type and would like to send the same packet on both of them simultaneously.
> > > > > As PKT_TX_RESERVED might mean different things for these devices, and user would like to use let say
> > > > > PKT_TX_IXGBE_MACSEC on one of them, he would need to do a copy of them, instead just increment a refcnt.
> > > > >
> > > > > Similar issues might arise at RX handling: user got a packet with PKT_RX_RESERVED_0 set.
> > > > > What does it really mean if there are different NIC types in the system?
> > > > > The only way to answer that question, as I can see,  is to keep track from what NIC that packet was received.
> > > > > Which I suppose, is not always convenient.
> > > > >
> > > > 
> > > > The main purpose is to put the PMD-specific APIs in a separate
> > > > namespace instead of mixing the PMD-specific APIs and global APIs
> > > > up, and also save the bits in mbuf.ol_flags.
> > > > 
> > > > There are other ways to achieve this goal, such as introducing
> > > > the PMD specific ol_flags in mbuf second cache line as you said.
> > > > I just thought defining some reserved bits seems to be the most
> > > > simple way which won't introduce many changes.
> > > > 
> > > > What's your suggestions? Should I just revert the changes and
> > > > define the generic flags directly?
> > > 
> > > Yes, that would be my preference.
> > > As I said above - spending extra bit in ol_flags  doesn't look like a big problem to me.
> > > In return there would be no need to consider how to handle all that confusing scenarios in future.
> > 
> > Okay. I'll update my patches. Thanks a lot for your comments.
> 
> Well, I do not agree with Konstantin (no one saw this coming eh?) and do not
> think you need to update your series again.
> 

Hi Adrien,

Thank you very much! :-)

Hi Thomas and Olivier,

I don't have strong opinions here, although I prefer to put
the PMD-specific APIs in a separate namespace. I'd like to
hear or follow your opinions since you are the maintainers
of ethdev and mbuf.

Best regards,
Tiwei Bie

> PMD-specific symbols have nothing to do in the global namespace in my
> opinion, they are not versioned and may evolve without notice. Neither
> applications nor the bonding PMD can rely on them. That's the trade-off.
> 
> Therefore until APIs are made global, the safe compromise is to define
> neutral, reserved symbols that any PMD can use to implement their own
> temporary APIs for testing purposes. These can be renamed later without
> changing their value as long as a single PMD uses them.
> 
> -- 
> Adrien Mazarguil
> 6WIND

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
  2017-01-05  8:33                       ` Adrien Mazarguil
  2017-01-05 10:05                         ` Tiwei Bie
@ 2017-01-05 11:32                         ` Ananyev, Konstantin
  2017-01-05 18:21                           ` Adrien Mazarguil
  1 sibling, 1 reply; 79+ messages in thread
From: Ananyev, Konstantin @ 2017-01-05 11:32 UTC (permalink / raw)
  To: Adrien Mazarguil, Bie, Tiwei
  Cc: dev, Lu, Wenzhuo, Mcnamara, John, olivier.matz, thomas.monjalon,
	Zhang, Helin, Dai, Wei, Wang, Xiao W

Hi Adrien,

> 
> On Thu, Jan 05, 2017 at 07:56:08AM +0800, Tiwei Bie wrote:
> > On Thu, Jan 05, 2017 at 01:44:18AM +0800, Ananyev, Konstantin wrote:
> > [...]
> > > > >
> > > > > I understand that.
> > > > > My question was: suppose user would like to create a bonded device over 2 NICs.
> > > > > One of them is ixgbe, while other would be some other type.
> > > > > In future get_dev_info() for each of them might return DEV_RX_OFFLOAD_RESERVED_0  bit as set.
> > > > > But it would mean completely different thing.
> > > > > How bonded device would know that to deal properly?
> > > > >
> > > > > Another example - user has 2 NICs of different type and would like to send the same packet on both of them simultaneously.
> > > > > As PKT_TX_RESERVED might mean different things for these devices, and user would like to use let say
> > > > > PKT_TX_IXGBE_MACSEC on one of them, he would need to do a copy of them, instead just increment a refcnt.
> > > > >
> > > > > Similar issues might arise at RX handling: user got a packet with PKT_RX_RESERVED_0 set.
> > > > > What does it really mean if there are different NIC types in the system?
> > > > > The only way to answer that question, as I can see,  is to keep track from what NIC that packet was received.
> > > > > Which I suppose, is not always convenient.
> > > > >
> > > >
> > > > The main purpose is to put the PMD-specific APIs in a separate
> > > > namespace instead of mixing the PMD-specific APIs and global APIs
> > > > up, and also save the bits in mbuf.ol_flags.
> > > >
> > > > There are other ways to achieve this goal, such as introducing
> > > > the PMD specific ol_flags in mbuf second cache line as you said.
> > > > I just thought defining some reserved bits seems to be the most
> > > > simple way which won't introduce many changes.
> > > >
> > > > What's your suggestions? Should I just revert the changes and
> > > > define the generic flags directly?
> > >
> > > Yes, that would be my preference.
> > > As I said above - spending extra bit in ol_flags  doesn't look like a big problem to me.
> > > In return there would be no need to consider how to handle all that confusing scenarios in future.
> >
> > Okay. I'll update my patches. Thanks a lot for your comments.
> 
> Well, I do not agree with Konstantin (no one saw this coming eh?)

:)

 >and do not think you need to update your series again.
> 
> PMD-specific symbols have nothing to do in the global namespace in my
> opinion, they are not versioned and may evolve without notice. Neither
> applications nor the bonding PMD can rely on them. That's the trade-off.

Not sure I do understand your reasoning.
For me MACSEC offload is just one of many HW offloads that we support
and should be treated in exactly the same way.
Applications should be able to use it in a transparent and reliable way,
not only under some limited conditions. 
Otherwise what is the point to introduce it at all?
Yes, right now it is supported only by ixgbe PMD, but why that should be the
reason to treat is as second-class citizen?
Let say PKT_TX_TUNNEL_* offloads also are supported only by one PMD right now.

> 
> Therefore until APIs are made global, the safe compromise is to define
> neutral, reserved symbols that any PMD can use to implement their own
> temporary APIs for testing purposes. These can be renamed later without
> changing their value as long as a single PMD uses them.

Ok, so what we'll gain by introducing PKT_TX_RESERVED instead of PKT_TX_MACSEC?
As I said in my previous mail the redefinition for the same ol_flag bit (and dev capabilities)
by different PMD might create a lot of confusion in future.
Does the potential saving of 1 bit really worth it?
 
Konstantin

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
  2017-01-05 11:32                         ` Ananyev, Konstantin
@ 2017-01-05 18:21                           ` Adrien Mazarguil
  2017-01-08 12:39                             ` Ananyev, Konstantin
  0 siblings, 1 reply; 79+ messages in thread
From: Adrien Mazarguil @ 2017-01-05 18:21 UTC (permalink / raw)
  To: Ananyev, Konstantin
  Cc: Bie, Tiwei, dev, Lu, Wenzhuo, Mcnamara, John, olivier.matz,
	thomas.monjalon, Zhang, Helin, Dai, Wei, Wang, Xiao W

Hi Konstantin,

On Thu, Jan 05, 2017 at 11:32:38AM +0000, Ananyev, Konstantin wrote:
> Hi Adrien,
> 
> > 
> > On Thu, Jan 05, 2017 at 07:56:08AM +0800, Tiwei Bie wrote:
> > > On Thu, Jan 05, 2017 at 01:44:18AM +0800, Ananyev, Konstantin wrote:
> > > [...]
> > > > > >
> > > > > > I understand that.
> > > > > > My question was: suppose user would like to create a bonded device over 2 NICs.
> > > > > > One of them is ixgbe, while other would be some other type.
> > > > > > In future get_dev_info() for each of them might return DEV_RX_OFFLOAD_RESERVED_0  bit as set.
> > > > > > But it would mean completely different thing.
> > > > > > How bonded device would know that to deal properly?
> > > > > >
> > > > > > Another example - user has 2 NICs of different type and would like to send the same packet on both of them simultaneously.
> > > > > > As PKT_TX_RESERVED might mean different things for these devices, and user would like to use let say
> > > > > > PKT_TX_IXGBE_MACSEC on one of them, he would need to do a copy of them, instead just increment a refcnt.
> > > > > >
> > > > > > Similar issues might arise at RX handling: user got a packet with PKT_RX_RESERVED_0 set.
> > > > > > What does it really mean if there are different NIC types in the system?
> > > > > > The only way to answer that question, as I can see,  is to keep track from what NIC that packet was received.
> > > > > > Which I suppose, is not always convenient.
> > > > > >
> > > > >
> > > > > The main purpose is to put the PMD-specific APIs in a separate
> > > > > namespace instead of mixing the PMD-specific APIs and global APIs
> > > > > up, and also save the bits in mbuf.ol_flags.
> > > > >
> > > > > There are other ways to achieve this goal, such as introducing
> > > > > the PMD specific ol_flags in mbuf second cache line as you said.
> > > > > I just thought defining some reserved bits seems to be the most
> > > > > simple way which won't introduce many changes.
> > > > >
> > > > > What's your suggestions? Should I just revert the changes and
> > > > > define the generic flags directly?
> > > >
> > > > Yes, that would be my preference.
> > > > As I said above - spending extra bit in ol_flags  doesn't look like a big problem to me.
> > > > In return there would be no need to consider how to handle all that confusing scenarios in future.
> > >
> > > Okay. I'll update my patches. Thanks a lot for your comments.
> > 
> > Well, I do not agree with Konstantin (no one saw this coming eh?)
> 
> :)
> 
>  >and do not think you need to update your series again.
> > 
> > PMD-specific symbols have nothing to do in the global namespace in my
> > opinion, they are not versioned and may evolve without notice. Neither
> > applications nor the bonding PMD can rely on them. That's the trade-off.
> 
> Not sure I do understand your reasoning.
> For me MACSEC offload is just one of many HW offloads that we support
> and should be treated in exactly the same way.
> Applications should be able to use it in a transparent and reliable way,
> not only under some limited conditions. 
> Otherwise what is the point to introduce it at all?

Well my first reply to this thread was asking why isn't the whole API global
from the start then?

Given there are valid reasons for it not to and no plan to make it so in the
near future, applications must be aware that they are including
rte_pmd_ixgbe.h to use it. That in itself is a limiting condition, right?

> Yes, right now it is supported only by ixgbe PMD, but why that should be the
> reason to treat is as second-class citizen?
> Let say PKT_TX_TUNNEL_* offloads also are supported only by one PMD right now.

You are right about PKT_TX_TUNNEL_*, however these flags exist on their own
and are not tied to any API function calls, unlike in this series where
PKT_TX_MACSEC can only be used if the DEV_TX_OFFLOAD_MACSEC_INSERT
capability is present and the whole thing was configured through
rte_pmd_ixgbe_macsec_*() calls after including rte_pmd_ixgbe.h.

To be clear it is not about MACsec per se (as a standardized protocol, I
think related definitions for offloads have their place), but it has to do
with the fact that the rest of the API is PMD-specific and there is a
dependency between them.

> > Therefore until APIs are made global, the safe compromise is to define
> > neutral, reserved symbols that any PMD can use to implement their own
> > temporary APIs for testing purposes. These can be renamed later without
> > changing their value as long as a single PMD uses them.
> 
> Ok, so what we'll gain by introducing PKT_TX_RESERVED instead of PKT_TX_MACSEC?
> As I said in my previous mail the redefinition for the same ol_flag bit (and dev capabilities)
> by different PMD might create a lot of confusion in future.
> Does the potential saving of 1 bit really worth it?

That is one benefit, but my point is mainly to keep applications aware that
they are using an API defined by a single PMD, which may be temporary and
whose symbols are not versioned.

Consider this:

rte_mbuf.h:

 #define PKT_TX_RESERVED_0 (1 << 42)

rte_pmd_ixgbe.h:

 #define PKT_TX_MACSEC PKT_TX_RESERVED_0

That way, applications have to get the PKT_TX_MACSEC definition where the
rest of the API is also defined.

Other PMDs may reuse PKT_TX_RESERVED_0 and other reserved flags to implement
their own experimental APIs.

Applications and the bonding PMD can easily be made aware that such reserved
flags cannot be shared between ports unless they know what the underlying
PMD is, which is already a requirement to use this API in the first place
(for instance, calling rte_pmd_ixgbe_macsec_*() functions with another
vendor's port_id may crash the application).

So the idea if/when the API is made global is to rename PKT_TX_RESERVED_0 to
PKT_TX_MACSEC and keep its original value.

If other PMDs also implemented PKT_TX_RESERVED_0 in the meantime, it is
redefined using a different value. If there is no room left to do so, these
PMDs are out of luck I guess, and their specific API is disabled/removed
until something gets re-designed.

How about this?

-- 
Adrien Mazarguil
6WIND

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
  2017-01-05 18:21                           ` Adrien Mazarguil
@ 2017-01-08 12:39                             ` Ananyev, Konstantin
  2017-01-09  3:57                               ` Tiwei Bie
  2017-01-09 14:41                               ` Adrien Mazarguil
  0 siblings, 2 replies; 79+ messages in thread
From: Ananyev, Konstantin @ 2017-01-08 12:39 UTC (permalink / raw)
  To: Adrien Mazarguil
  Cc: Bie, Tiwei, dev, Lu, Wenzhuo, Mcnamara, John, olivier.matz,
	thomas.monjalon, Zhang, Helin, Dai, Wei, Wang, Xiao W


Hi Adrien,

> 
> Hi Konstantin,
> 
> On Thu, Jan 05, 2017 at 11:32:38AM +0000, Ananyev, Konstantin wrote:
> > Hi Adrien,
> >
> > >
> > > On Thu, Jan 05, 2017 at 07:56:08AM +0800, Tiwei Bie wrote:
> > > > On Thu, Jan 05, 2017 at 01:44:18AM +0800, Ananyev, Konstantin wrote:
> > > > [...]
> > > > > > >
> > > > > > > I understand that.
> > > > > > > My question was: suppose user would like to create a bonded device over 2 NICs.
> > > > > > > One of them is ixgbe, while other would be some other type.
> > > > > > > In future get_dev_info() for each of them might return DEV_RX_OFFLOAD_RESERVED_0  bit as set.
> > > > > > > But it would mean completely different thing.
> > > > > > > How bonded device would know that to deal properly?
> > > > > > >
> > > > > > > Another example - user has 2 NICs of different type and would like to send the same packet on both of them simultaneously.
> > > > > > > As PKT_TX_RESERVED might mean different things for these devices, and user would like to use let say
> > > > > > > PKT_TX_IXGBE_MACSEC on one of them, he would need to do a copy of them, instead just increment a refcnt.
> > > > > > >
> > > > > > > Similar issues might arise at RX handling: user got a packet with PKT_RX_RESERVED_0 set.
> > > > > > > What does it really mean if there are different NIC types in the system?
> > > > > > > The only way to answer that question, as I can see,  is to keep track from what NIC that packet was received.
> > > > > > > Which I suppose, is not always convenient.
> > > > > > >
> > > > > >
> > > > > > The main purpose is to put the PMD-specific APIs in a separate
> > > > > > namespace instead of mixing the PMD-specific APIs and global APIs
> > > > > > up, and also save the bits in mbuf.ol_flags.
> > > > > >
> > > > > > There are other ways to achieve this goal, such as introducing
> > > > > > the PMD specific ol_flags in mbuf second cache line as you said.
> > > > > > I just thought defining some reserved bits seems to be the most
> > > > > > simple way which won't introduce many changes.
> > > > > >
> > > > > > What's your suggestions? Should I just revert the changes and
> > > > > > define the generic flags directly?
> > > > >
> > > > > Yes, that would be my preference.
> > > > > As I said above - spending extra bit in ol_flags  doesn't look like a big problem to me.
> > > > > In return there would be no need to consider how to handle all that confusing scenarios in future.
> > > >
> > > > Okay. I'll update my patches. Thanks a lot for your comments.
> > >
> > > Well, I do not agree with Konstantin (no one saw this coming eh?)
> >
> > :)
> >
> >  >and do not think you need to update your series again.
> > >
> > > PMD-specific symbols have nothing to do in the global namespace in my
> > > opinion, they are not versioned and may evolve without notice. Neither
> > > applications nor the bonding PMD can rely on them. That's the trade-off.
> >
> > Not sure I do understand your reasoning.
> > For me MACSEC offload is just one of many HW offloads that we support
> > and should be treated in exactly the same way.
> > Applications should be able to use it in a transparent and reliable way,
> > not only under some limited conditions.
> > Otherwise what is the point to introduce it at all?
> 
> Well my first reply to this thread was asking why isn't the whole API global
> from the start then?

That's good question, and my preference would always be to have the
API to configure this feature as generic one.
I guess the main reason why it is not right now we don't reach an agreement
how this API should look like: 
http://dpdk.org/ml/archives/dev/2016-September/047810.html
But I'll leave it to the author to provide the real reason here. 

> 
> Given there are valid reasons for it not to and no plan to make it so in the
> near future, applications must be aware that they are including
> rte_pmd_ixgbe.h to use it. That in itself is a limiting condition, right?

Yes, it is definitely a limiting factor.
Though even if API to configure device to use macsec would be PMD specific right now,
The API to query that capability and the API to use it at datapath (mbuf.ol_flags) still
can be (and I think should be) device independent and transparent to use.  

> 
> > Yes, right now it is supported only by ixgbe PMD, but why that should be the
> > reason to treat is as second-class citizen?
> > Let say PKT_TX_TUNNEL_* offloads also are supported only by one PMD right now.
> 
> You are right about PKT_TX_TUNNEL_*, however these flags exist on their own
> and are not tied to any API function calls, unlike in this series where
> PKT_TX_MACSEC can only be used if the DEV_TX_OFFLOAD_MACSEC_INSERT
> capability is present 

I don't think PKT_TX_TUNNEL_* 'exists on its own'.
To use it well behaving app have to:
1) Query that device does provide that capability: DEV_TX_OFFLOAD_*_TNL_TSO
2) configure PMD( & device) to use that capability
3) use that offload at run-time TX code (mb->ol_flags |= ...; mb->tx_offload = ...)

For PKT_TX_TUNNEL_*  2) is pretty simple - user just need to make sure
that full-featured TX function will be selected:
txconf.txq_flags = 0; ...;  rte_eth_tx_queue_setup(..., &txconf);
 
For TX_MACSEC, as I understand 2) will be more complicated and
right now is PMD specific, but anyway the main pattern remains the same.
So at least 1) and 3) could be kept device neutral.

>and the whole thing was configured through
> rte_pmd_ixgbe_macsec_*() calls after including rte_pmd_ixgbe.h.
> 
> To be clear it is not about MACsec per se (as a standardized protocol, I
> think related definitions for offloads have their place), but it has to do
> with the fact that the rest of the API is PMD-specific and there is a
> dependency between them.
> 
> > > Therefore until APIs are made global, the safe compromise is to define
> > > neutral, reserved symbols that any PMD can use to implement their own
> > > temporary APIs for testing purposes. These can be renamed later without
> > > changing their value as long as a single PMD uses them.
> >
> > Ok, so what we'll gain by introducing PKT_TX_RESERVED instead of PKT_TX_MACSEC?
> > As I said in my previous mail the redefinition for the same ol_flag bit (and dev capabilities)
> > by different PMD might create a lot of confusion in future.
> > Does the potential saving of 1 bit really worth it?
> 
> That is one benefit, but my point is mainly to keep applications aware that
> they are using an API defined by a single PMD, which may be temporary and
> whose symbols are not versioned.

As applications have to use PMD specific functions to configure it they definitely are aware.

> 
> Consider this:
> 
> rte_mbuf.h:
> 
>  #define PKT_TX_RESERVED_0 (1 << 42)
> 
> rte_pmd_ixgbe.h:
> 
>  #define PKT_TX_MACSEC PKT_TX_RESERVED_0
> 
> That way, applications have to get the PKT_TX_MACSEC definition where the
> rest of the API is also defined.
> 
> Other PMDs may reuse PKT_TX_RESERVED_0 and other reserved flags to implement
> their own experimental APIs.

That's the main thing I am opposed to.
I think that by allowing PMD to redefine meaning of
mbuf.ol_flags and dev_info.(rx| tx)_offload_capa 
we just asking for trouble.

Let say tomorrow, i40e will redefine DEV_TX_OFFLOAD_RESERVED_0 and PKT_TX_RESERVED_0
to implement new specific TX offload (PKT_TX_FEATURE_X).
Now let say we have an application that works over both ixgbe and i40e
and would like to use both TX_MACSEC and TC_FEATURE_X offloads whenever they are available.
As I can see, with the approach you proposed the only way for the application to make it
is to support 2 different TX code paths (or at least some parts of it).
To me that way looks inconvenient to the users and source of future troubles.

Same for RX:  somewhere at upper layer user got a packet with PKT_RX_RESERVED_0 set.
What does it really mean if there are different NIC types in the system?

> 
> Applications and the bonding PMD can easily be made aware that such reserved
> flags cannot be shared between ports unless they know what the underlying
> PMD is, which is already a requirement to use this API in the first place
> (for instance, calling rte_pmd_ixgbe_macsec_*() functions with another
> vendor's port_id may crash the application).

I am talking about that code:
rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
{
...
/* Take the first dev's offload capabilities */
internals->rx_offload_capa = dev_info.rx_offload_capa;
internals->tx_offload_capa = dev_info.tx_offload_capa;
...
internals->rx_offload_capa &= dev_info.rx_offload_capa;
internals->tx_offload_capa &= dev_info.tx_offload_capa;

Obviously with what you are suggesting it is not valid any more.
Bonded device need to support a MASK of all device reserved offloads to exclude
them from common subset. 
Any user app(/lib) that does similar thing would also have to be changed.

> 
> So the idea if/when the API is made global is to rename PKT_TX_RESERVED_0 to
> PKT_TX_MACSEC and keep its original value.
> 
> If other PMDs also implemented PKT_TX_RESERVED_0 in the meantime, it is
> redefined using a different value. If there is no room left to do so, these
> PMDs are out of luck I guess, and their specific API is disabled/removed
> until something gets re-designed.
> 
> How about this?

I still think that we shouldn't allow PMDs to redefine mbuf.olflags and
dev_info.(rx|tx)_offload_capa. 
See above for my reasons.
Konstantin

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
  2017-01-08 12:39                             ` Ananyev, Konstantin
@ 2017-01-09  3:57                               ` Tiwei Bie
  2017-01-09 11:26                                 ` Thomas Monjalon
  2017-01-09 14:41                               ` Adrien Mazarguil
  1 sibling, 1 reply; 79+ messages in thread
From: Tiwei Bie @ 2017-01-09  3:57 UTC (permalink / raw)
  To: Ananyev, Konstantin
  Cc: Adrien Mazarguil, dev, Lu, Wenzhuo, Mcnamara, John, olivier.matz,
	thomas.monjalon, Zhang, Helin, Dai, Wei, Wang, Xiao W

On Sun, Jan 08, 2017 at 08:39:55PM +0800, Ananyev, Konstantin wrote:
> Hi Adrien,
> 
> > 
> > Hi Konstantin,
> > 
> > On Thu, Jan 05, 2017 at 11:32:38AM +0000, Ananyev, Konstantin wrote:
> > > Hi Adrien,
> > >
> > > >
> > > > On Thu, Jan 05, 2017 at 07:56:08AM +0800, Tiwei Bie wrote:
> > > > > On Thu, Jan 05, 2017 at 01:44:18AM +0800, Ananyev, Konstantin wrote:
[...]
> > Well my first reply to this thread was asking why isn't the whole API global
> > from the start then?
> 
> That's good question, and my preference would always be to have the
> API to configure this feature as generic one.
> I guess the main reason why it is not right now we don't reach an agreement
> how this API should look like: 
> http://dpdk.org/ml/archives/dev/2016-September/047810.html
> But I'll leave it to the author to provide the real reason here. 
> 

Yes, currently this work just provided a thin layer over 82599's
hardware MACsec offload support to allow users configure 82599's
MACsec offload engine. The current API may be too specific and may
need a rework to be used with other NICs.

> > 
> > Given there are valid reasons for it not to and no plan to make it so in the
> > near future, applications must be aware that they are including
> > rte_pmd_ixgbe.h to use it. That in itself is a limiting condition, right?
> 
> Yes, it is definitely a limiting factor.
> Though even if API to configure device to use macsec would be PMD specific right now,
> The API to query that capability and the API to use it at datapath (mbuf.ol_flags) still
> can be (and I think should be) device independent and transparent to use.  
> 
> > 
> > > Yes, right now it is supported only by ixgbe PMD, but why that should be the
> > > reason to treat is as second-class citizen?
> > > Let say PKT_TX_TUNNEL_* offloads also are supported only by one PMD right now.
> > 
> > You are right about PKT_TX_TUNNEL_*, however these flags exist on their own
> > and are not tied to any API function calls, unlike in this series where
> > PKT_TX_MACSEC can only be used if the DEV_TX_OFFLOAD_MACSEC_INSERT
> > capability is present 
> 
> I don't think PKT_TX_TUNNEL_* 'exists on its own'.
> To use it well behaving app have to:
> 1) Query that device does provide that capability: DEV_TX_OFFLOAD_*_TNL_TSO
> 2) configure PMD( & device) to use that capability
> 3) use that offload at run-time TX code (mb->ol_flags |= ...; mb->tx_offload = ...)
> 
> For PKT_TX_TUNNEL_*  2) is pretty simple - user just need to make sure
> that full-featured TX function will be selected:
> txconf.txq_flags = 0; ...;  rte_eth_tx_queue_setup(..., &txconf);
>  
> For TX_MACSEC, as I understand 2) will be more complicated and
> right now is PMD specific, but anyway the main pattern remains the same.
> So at least 1) and 3) could be kept device neutral.
> 

Yes, the PMD-specific APIs focus on the 2nd step:

2) configure PMD( & device) to use that capability

The other parts (querying the capabilities, using the offload
in the datapath, registering the callback, getting the statistics
via xstats) are done in generic way.

Best regards,
Tiwei Bie

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
  2017-01-09  3:57                               ` Tiwei Bie
@ 2017-01-09 11:26                                 ` Thomas Monjalon
  2017-01-10  2:08                                   ` Tiwei Bie
  2017-01-11 17:32                                   ` Olivier MATZ
  0 siblings, 2 replies; 79+ messages in thread
From: Thomas Monjalon @ 2017-01-09 11:26 UTC (permalink / raw)
  To: Tiwei Bie, Ananyev, Konstantin, Adrien Mazarguil
  Cc: dev, Lu, Wenzhuo, Mcnamara, John, olivier.matz, Zhang, Helin,
	Dai, Wei, Wang, Xiao W

2017-01-09 11:57, Tiwei Bie:
> On Sun, Jan 08, 2017 at 08:39:55PM +0800, Ananyev, Konstantin wrote:
> > > Well my first reply to this thread was asking why isn't the whole API global
> > > from the start then?
> > 
> > That's good question, and my preference would always be to have the
> > API to configure this feature as generic one.
> > I guess the main reason why it is not right now we don't reach an agreement
> > how this API should look like: 
> > http://dpdk.org/ml/archives/dev/2016-September/047810.html
> > But I'll leave it to the author to provide the real reason here. 
> 
> Yes, currently this work just provided a thin layer over 82599's
> hardware MACsec offload support to allow users configure 82599's
> MACsec offload engine. The current API may be too specific and may
> need a rework to be used with other NICs.

I think it is a really good approach to start such API privately in a driver.
It will give us more time and experience to design a proper generic API.

Regarding the mbuf flag, it looks straight-forward, and as it is IEEE
standardized, I do not see any objection to add it now.
However, I will wait for the approval of Olivier - as maintainer of mbuf.

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
  2017-01-08 12:39                             ` Ananyev, Konstantin
  2017-01-09  3:57                               ` Tiwei Bie
@ 2017-01-09 14:41                               ` Adrien Mazarguil
  1 sibling, 0 replies; 79+ messages in thread
From: Adrien Mazarguil @ 2017-01-09 14:41 UTC (permalink / raw)
  To: Ananyev, Konstantin
  Cc: Bie, Tiwei, dev, Lu, Wenzhuo, Mcnamara, John, olivier.matz,
	thomas.monjalon, Zhang, Helin, Dai, Wei, Wang, Xiao W

Hi Konstantin,

On Sun, Jan 08, 2017 at 12:39:55PM +0000, Ananyev, Konstantin wrote:
> 
> Hi Adrien,
> 
> > 
> > Hi Konstantin,
> > 
> > On Thu, Jan 05, 2017 at 11:32:38AM +0000, Ananyev, Konstantin wrote:
> > > Hi Adrien,
[...]
> > > > PMD-specific symbols have nothing to do in the global namespace in my
> > > > opinion, they are not versioned and may evolve without notice. Neither
> > > > applications nor the bonding PMD can rely on them. That's the trade-off.
> > >
> > > Not sure I do understand your reasoning.
> > > For me MACSEC offload is just one of many HW offloads that we support
> > > and should be treated in exactly the same way.
> > > Applications should be able to use it in a transparent and reliable way,
> > > not only under some limited conditions.
> > > Otherwise what is the point to introduce it at all?
> > 
> > Well my first reply to this thread was asking why isn't the whole API global
> > from the start then?
> 
> That's good question, and my preference would always be to have the
> API to configure this feature as generic one.
> I guess the main reason why it is not right now we don't reach an agreement
> how this API should look like: 
> http://dpdk.org/ml/archives/dev/2016-September/047810.html
> But I'll leave it to the author to provide the real reason here. 
> 
> > Given there are valid reasons for it not to and no plan to make it so in the
> > near future, applications must be aware that they are including
> > rte_pmd_ixgbe.h to use it. That in itself is a limiting condition, right?
> 
> Yes, it is definitely a limiting factor.
> Though even if API to configure device to use macsec would be PMD specific right now,
> The API to query that capability and the API to use it at datapath (mbuf.ol_flags) still
> can be (and I think should be) device independent and transparent to use.  

With RESERVED flags, what is global and transparent is the fact the mbuf or
device features in question are PMD-specific and some extra knowledge about
the underlying port is necessary to handle them. I think that could be
useful to applications in order to get the necessary precautions.

> > > Yes, right now it is supported only by ixgbe PMD, but why that should be the
> > > reason to treat is as second-class citizen?
> > > Let say PKT_TX_TUNNEL_* offloads also are supported only by one PMD right now.
> > 
> > You are right about PKT_TX_TUNNEL_*, however these flags exist on their own
> > and are not tied to any API function calls, unlike in this series where
> > PKT_TX_MACSEC can only be used if the DEV_TX_OFFLOAD_MACSEC_INSERT
> > capability is present 
> 
> I don't think PKT_TX_TUNNEL_* 'exists on its own'.
> To use it well behaving app have to:
> 1) Query that device does provide that capability: DEV_TX_OFFLOAD_*_TNL_TSO
> 2) configure PMD( & device) to use that capability
> 3) use that offload at run-time TX code (mb->ol_flags |= ...; mb->tx_offload = ...)
> 
> For PKT_TX_TUNNEL_*  2) is pretty simple - user just need to make sure
> that full-featured TX function will be selected:
> txconf.txq_flags = 0; ...;  rte_eth_tx_queue_setup(..., &txconf);

Pretty much like any other offloads. Any PMD could implement them as well
without exposing additional callbacks.

> For TX_MACSEC, as I understand 2) will be more complicated and
> right now is PMD specific, but anyway the main pattern remains the same.
> So at least 1) and 3) could be kept device neutral.

Yes, however this discussion is precisely because I think it could be a
problem that this API "right now is PMD specific", particularly because it
will remain that way.

> >and the whole thing was configured through
> > rte_pmd_ixgbe_macsec_*() calls after including rte_pmd_ixgbe.h.
> > 
> > To be clear it is not about MACsec per se (as a standardized protocol, I
> > think related definitions for offloads have their place), but it has to do
> > with the fact that the rest of the API is PMD-specific and there is a
> > dependency between them.
> > 
> > > > Therefore until APIs are made global, the safe compromise is to define
> > > > neutral, reserved symbols that any PMD can use to implement their own
> > > > temporary APIs for testing purposes. These can be renamed later without
> > > > changing their value as long as a single PMD uses them.
> > >
> > > Ok, so what we'll gain by introducing PKT_TX_RESERVED instead of PKT_TX_MACSEC?
> > > As I said in my previous mail the redefinition for the same ol_flag bit (and dev capabilities)
> > > by different PMD might create a lot of confusion in future.
> > > Does the potential saving of 1 bit really worth it?
> > 
> > That is one benefit, but my point is mainly to keep applications aware that
> > they are using an API defined by a single PMD, which may be temporary and
> > whose symbols are not versioned.
> 
> As applications have to use PMD specific functions to configure it they definitely are aware.

Therefore they also know if they are only running on top of ixgbe adapters
of a mix with something else. By choosing the PMD-specific path, they know
what they are getting into.

> > Consider this:
> > 
> > rte_mbuf.h:
> > 
> >  #define PKT_TX_RESERVED_0 (1 << 42)
> > 
> > rte_pmd_ixgbe.h:
> > 
> >  #define PKT_TX_MACSEC PKT_TX_RESERVED_0
> > 
> > That way, applications have to get the PKT_TX_MACSEC definition where the
> > rest of the API is also defined.
> > 
> > Other PMDs may reuse PKT_TX_RESERVED_0 and other reserved flags to implement
> > their own experimental APIs.
> 
> That's the main thing I am opposed to.
> I think that by allowing PMD to redefine meaning of
> mbuf.ol_flags and dev_info.(rx| tx)_offload_capa 
> we just asking for trouble.
> 
> Let say tomorrow, i40e will redefine DEV_TX_OFFLOAD_RESERVED_0 and PKT_TX_RESERVED_0
> to implement new specific TX offload (PKT_TX_FEATURE_X).
> Now let say we have an application that works over both ixgbe and i40e
> and would like to use both TX_MACSEC and TC_FEATURE_X offloads whenever they are available.
> As I can see, with the approach you proposed the only way for the application to make it
> is to support 2 different TX code paths (or at least some parts of it).
> To me that way looks inconvenient to the users and source of future troubles.

Remember applications still have to clear PKT_TX_MACSEC and other flags on
ports that do not implement them, just like when the related offload is not
configured even if supported. Whichever approach is taken, applications have
to be careful and need a few extra checks in their TX path. There is no
extra cost involved compared to existing offloads.

> Same for RX:  somewhere at upper layer user got a packet with PKT_RX_RESERVED_0 set.
> What does it really mean if there are different NIC types in the system?

For RX, I agree things are more complicated, applications would have to know
what a given flag means from a particular port. We could define several
RESERVED_X flags that do not overlap on a case basis, so at least
applications know they are dealing with PMD-specific flags.

> > Applications and the bonding PMD can easily be made aware that such reserved
> > flags cannot be shared between ports unless they know what the underlying
> > PMD is, which is already a requirement to use this API in the first place
> > (for instance, calling rte_pmd_ixgbe_macsec_*() functions with another
> > vendor's port_id may crash the application).
> 
> I am talking about that code:
> rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
> {
> ...
> /* Take the first dev's offload capabilities */
> internals->rx_offload_capa = dev_info.rx_offload_capa;
> internals->tx_offload_capa = dev_info.tx_offload_capa;
> ...
> internals->rx_offload_capa &= dev_info.rx_offload_capa;
> internals->tx_offload_capa &= dev_info.tx_offload_capa;
> 
> Obviously with what you are suggesting it is not valid any more.
> Bonded device need to support a MASK of all device reserved offloads to exclude
> them from common subset. 
> Any user app(/lib) that does similar thing would also have to be changed.

Why can't they clear RESERVED flags as well? We just have to document the
expected behavior. Even if the bonding PMD does not, applications that do
not use those flags (since they are reserved) should not be impacted.

Because the bonding PMD won't forward PMD-specific API calls, I think it's
safer to clear them anyway.

By the way, how do you handle the case where the bonding PMD exposes the
MACSEC feature and as a result the application tries to configure MACSEC
support on the bonding port_id? How is the resulting crash documented in a
generic fashion?

> > So the idea if/when the API is made global is to rename PKT_TX_RESERVED_0 to
> > PKT_TX_MACSEC and keep its original value.
> > 
> > If other PMDs also implemented PKT_TX_RESERVED_0 in the meantime, it is
> > redefined using a different value. If there is no room left to do so, these
> > PMDs are out of luck I guess, and their specific API is disabled/removed
> > until something gets re-designed.
> > 
> > How about this?
> 
> I still think that we shouldn't allow PMDs to redefine mbuf.olflags and
> dev_info.(rx|tx)_offload_capa. 
> See above for my reasons.

I'm still not comfortable with partially global/specific APIs such as this,
because we assume applications are fully aware of the underlying port in
order to use them, while at the same time the related flags are part of the
global namespace, I find it confusing.

If application developers have no problem with that (so far they haven't
commented on this topic, which means they likely agree to go with anything),
I have no reason left to go against the majority.

-- 
Adrien Mazarguil
6WIND

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
  2017-01-09 11:26                                 ` Thomas Monjalon
@ 2017-01-10  2:08                                   ` Tiwei Bie
  2017-01-11 17:32                                   ` Olivier MATZ
  1 sibling, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-10  2:08 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: Ananyev, Konstantin, Adrien Mazarguil, dev, Lu, Wenzhuo,
	Mcnamara, John, olivier.matz, Zhang, Helin, Dai, Wei, Wang,
	Xiao W

On Mon, Jan 09, 2017 at 12:26:53PM +0100, Thomas Monjalon wrote:
> 2017-01-09 11:57, Tiwei Bie:
> > On Sun, Jan 08, 2017 at 08:39:55PM +0800, Ananyev, Konstantin wrote:
> > > > Well my first reply to this thread was asking why isn't the whole API global
> > > > from the start then?
> > > 
> > > That's good question, and my preference would always be to have the
> > > API to configure this feature as generic one.
> > > I guess the main reason why it is not right now we don't reach an agreement
> > > how this API should look like: 
> > > http://dpdk.org/ml/archives/dev/2016-September/047810.html
> > > But I'll leave it to the author to provide the real reason here. 
> > 
> > Yes, currently this work just provided a thin layer over 82599's
> > hardware MACsec offload support to allow users configure 82599's
> > MACsec offload engine. The current API may be too specific and may
> > need a rework to be used with other NICs.
> 
> I think it is a really good approach to start such API privately in a driver.
> It will give us more time and experience to design a proper generic API.
> 
> Regarding the mbuf flag, it looks straight-forward, and as it is IEEE
> standardized, I do not see any objection to add it now.
> However, I will wait for the approval of Olivier - as maintainer of mbuf.
> 

I see. Thank you very much for your comments! :-)

Best regards,
Tiwei Bie

^ permalink raw reply	[flat|nested] 79+ messages in thread

* [PATCH v6 0/6] Add MACsec offload support for ixgbe
  2017-01-04  7:21       ` [PATCH v5 0/8] " Tiwei Bie
                           ` (8 preceding siblings ...)
  2017-01-04  8:29         ` [PATCH v5 0/8] Add MACsec offload support for ixgbe Peng, Yuan
@ 2017-01-11  4:31         ` Tiwei Bie
  2017-01-11  4:31           ` [PATCH v6 1/6] mbuf: add flag for MACsec Tiwei Bie
                             ` (6 more replies)
  9 siblings, 7 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-11  4:31 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

This patch set adds the MACsec offload support for ixgbe.
The testpmd is also updated to support MACsec cmds.

v2:
- Update the documents for testpmd;
- Update the release notes;
- Reuse the functions provided by base code;

v3:
- Add the missing parts of MACsec mbuf flag and reorganize the patch set;
- Add an ethdev event type for MACsec;
- Advertise the MACsec offload capabilities based on the mac type;
- Minor fixes and improvements;

v4:
- Reserve bits in mbuf and ethdev for PMD specific API;
- Use the reserved bits in PMD specific API;

v5:
- Add MACsec offload in the NIC feature list;
- Minor improvements on comments;

v6:
- Revert the changes related to the reserved flags;
- Rebase the patch set on the latest branch, xstats code is changed recently;
- Update the feature list when adding the MACsec support for ixgbe;

Tiwei Bie (6):
  mbuf: add flag for MACsec
  ethdev: add event type for MACsec
  ethdev: add MACsec offload capability flags
  net/ixgbe: add MACsec offload support
  app/testpmd: add MACsec offload commands
  doc: add ixgbe specific APIs

 app/test-pmd/cmdline.c                      | 389 ++++++++++++++++++++++
 app/test-pmd/macfwd.c                       |   2 +
 app/test-pmd/macswap.c                      |   2 +
 app/test-pmd/testpmd.h                      |   2 +
 app/test-pmd/txonly.c                       |   2 +
 doc/guides/nics/features/default.ini        |   1 +
 doc/guides/nics/features/ixgbe.ini          |   1 +
 doc/guides/rel_notes/release_17_02.rst      |  10 +
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  32 ++
 drivers/net/ixgbe/ixgbe_ethdev.c            | 482 +++++++++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_ethdev.h            |  45 +++
 drivers/net/ixgbe/ixgbe_rxtx.c              |   3 +
 drivers/net/ixgbe/rte_pmd_ixgbe.h           | 100 ++++++
 drivers/net/ixgbe/rte_pmd_ixgbe_version.map |  11 +
 lib/librte_ether/rte_ethdev.h               |   3 +
 lib/librte_mbuf/rte_mbuf.c                  |   2 +
 lib/librte_mbuf/rte_mbuf.h                  |   6 +
 17 files changed, 1088 insertions(+), 5 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 79+ messages in thread

* [PATCH v6 1/6] mbuf: add flag for MACsec
  2017-01-11  4:31         ` [PATCH v6 0/6] " Tiwei Bie
@ 2017-01-11  4:31           ` Tiwei Bie
  2017-01-11 17:41             ` Olivier MATZ
  2017-01-11  4:31           ` [PATCH v6 2/6] ethdev: add event type " Tiwei Bie
                             ` (5 subsequent siblings)
  6 siblings, 1 reply; 79+ messages in thread
From: Tiwei Bie @ 2017-01-11  4:31 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

Add a new Tx flag in mbuf, that can be set by applications to
enable the MACsec offload for a packet to be transmitted.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 doc/guides/rel_notes/release_17_02.rst | 5 +++++
 lib/librte_mbuf/rte_mbuf.c             | 2 ++
 lib/librte_mbuf/rte_mbuf.h             | 6 ++++++
 3 files changed, 13 insertions(+)

diff --git a/doc/guides/rel_notes/release_17_02.rst b/doc/guides/rel_notes/release_17_02.rst
index 180af82..3958714 100644
--- a/doc/guides/rel_notes/release_17_02.rst
+++ b/doc/guides/rel_notes/release_17_02.rst
@@ -52,6 +52,11 @@ New Features
   See the :ref:`Generic flow API <Generic_flow_API>` documentation for more
   information.
 
+* **Improved offload support in mbuf.**
+
+  Added a new Tx mbuf flag for MACsec, which can be set by applications
+  to enable the MACsec offload for the packets to be transmitted.
+
 
 Resolved Issues
 ---------------
diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c
index 63f43c8..72ad91e 100644
--- a/lib/librte_mbuf/rte_mbuf.c
+++ b/lib/librte_mbuf/rte_mbuf.c
@@ -404,6 +404,7 @@ const char *rte_get_tx_ol_flag_name(uint64_t mask)
 	case PKT_TX_TUNNEL_GRE: return "PKT_TX_TUNNEL_GRE";
 	case PKT_TX_TUNNEL_IPIP: return "PKT_TX_TUNNEL_IPIP";
 	case PKT_TX_TUNNEL_GENEVE: return "PKT_TX_TUNNEL_GENEVE";
+	case PKT_TX_MACSEC: return "PKT_TX_MACSEC";
 	default: return NULL;
 	}
 }
@@ -434,6 +435,7 @@ rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
 		  "PKT_TX_TUNNEL_NONE" },
 		{ PKT_TX_TUNNEL_GENEVE, PKT_TX_TUNNEL_MASK,
 		  "PKT_TX_TUNNEL_NONE" },
+		{ PKT_TX_MACSEC, PKT_TX_MACSEC, NULL },
 	};
 	const char *name;
 	unsigned int i;
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 4476d75..ffbb4b3 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -182,6 +182,12 @@ extern "C" {
 /* add new TX flags here */
 
 /**
+ * Offload the MACsec. This flag must be set by the application to enable
+ * this offload feature for a packet to be transmitted.
+ */
+#define PKT_TX_MACSEC        (1ULL << 44)
+
+/**
  * Bits 45:48 used for the tunnel type.
  * When doing Tx offload like TSO or checksum, the HW needs to configure the
  * tunnel type into the HW descriptors.
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v6 2/6] ethdev: add event type for MACsec
  2017-01-11  4:31         ` [PATCH v6 0/6] " Tiwei Bie
  2017-01-11  4:31           ` [PATCH v6 1/6] mbuf: add flag for MACsec Tiwei Bie
@ 2017-01-11  4:31           ` Tiwei Bie
  2017-01-11  4:31           ` [PATCH v6 3/6] ethdev: add MACsec offload capability flags Tiwei Bie
                             ` (4 subsequent siblings)
  6 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-11  4:31 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

This commit adds a below event type:

- RTE_ETH_EVENT_MACSEC

This event will occur when the PN counter in a MACsec connection
reaches the exhaustion threshold.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 lib/librte_ether/rte_ethdev.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 1c356c1..d5d2956 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -3188,6 +3188,7 @@ enum rte_eth_event_type {
 	RTE_ETH_EVENT_INTR_RESET,
 			/**< reset interrupt event, sent to VF on PF reset */
 	RTE_ETH_EVENT_VF_MBOX,  /**< message from the VF received by PF */
+	RTE_ETH_EVENT_MACSEC,   /**< MACsec offload related event */
 	RTE_ETH_EVENT_MAX       /**< max value of this enum */
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v6 3/6] ethdev: add MACsec offload capability flags
  2017-01-11  4:31         ` [PATCH v6 0/6] " Tiwei Bie
  2017-01-11  4:31           ` [PATCH v6 1/6] mbuf: add flag for MACsec Tiwei Bie
  2017-01-11  4:31           ` [PATCH v6 2/6] ethdev: add event type " Tiwei Bie
@ 2017-01-11  4:31           ` Tiwei Bie
  2017-01-11  4:31           ` [PATCH v6 4/6] net/ixgbe: add MACsec offload support Tiwei Bie
                             ` (3 subsequent siblings)
  6 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-11  4:31 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

If these flags are advertised by a PMD, the NIC supports the MACsec
offload. The incoming MACsec traffics can be offloaded transparently
after the MACsec offload is configured correctly by the application.
And the application can set the PKT_TX_MACSEC flag in mbufs to enable
the MACsec offload for the packets to be transmitted.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 lib/librte_ether/rte_ethdev.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index d5d2956..49c073b 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -881,6 +881,7 @@ struct rte_eth_conf {
 #define DEV_RX_OFFLOAD_TCP_LRO     0x00000010
 #define DEV_RX_OFFLOAD_QINQ_STRIP  0x00000020
 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
+#define DEV_RX_OFFLOAD_MACSEC_STRIP     0x00000080
 
 /**
  * TX offload capabilities of a device.
@@ -898,6 +899,7 @@ struct rte_eth_conf {
 #define DEV_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_MACSEC_INSERT    0x00002000
 
 /**
  * Ethernet device information
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v6 4/6] net/ixgbe: add MACsec offload support
  2017-01-11  4:31         ` [PATCH v6 0/6] " Tiwei Bie
                             ` (2 preceding siblings ...)
  2017-01-11  4:31           ` [PATCH v6 3/6] ethdev: add MACsec offload capability flags Tiwei Bie
@ 2017-01-11  4:31           ` Tiwei Bie
  2017-01-11  4:31           ` [PATCH v6 5/6] app/testpmd: add MACsec offload commands Tiwei Bie
                             ` (2 subsequent siblings)
  6 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-11  4:31 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

MACsec (or LinkSec, 802.1AE) is a MAC level encryption/authentication
scheme defined in IEEE 802.1AE that uses symmetric cryptography.
This commit adds the MACsec offload support for ixgbe.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 doc/guides/nics/features/default.ini        |   1 +
 doc/guides/nics/features/ixgbe.ini          |   1 +
 drivers/net/ixgbe/ixgbe_ethdev.c            | 482 +++++++++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_ethdev.h            |  45 +++
 drivers/net/ixgbe/ixgbe_rxtx.c              |   3 +
 drivers/net/ixgbe/rte_pmd_ixgbe.h           | 100 ++++++
 drivers/net/ixgbe/rte_pmd_ixgbe_version.map |  11 +
 7 files changed, 638 insertions(+), 5 deletions(-)

diff --git a/doc/guides/nics/features/default.ini b/doc/guides/nics/features/default.ini
index f1bf9bf..c412d6f 100644
--- a/doc/guides/nics/features/default.ini
+++ b/doc/guides/nics/features/default.ini
@@ -43,6 +43,7 @@ VLAN offload         =
 QinQ offload         =
 L3 checksum offload  =
 L4 checksum offload  =
+MACsec offload       =
 Inner L3 checksum    =
 Inner L4 checksum    =
 Packet type parsing  =
diff --git a/doc/guides/nics/features/ixgbe.ini b/doc/guides/nics/features/ixgbe.ini
index 4a5667f..24c02fb 100644
--- a/doc/guides/nics/features/ixgbe.ini
+++ b/doc/guides/nics/features/ixgbe.ini
@@ -36,6 +36,7 @@ VLAN offload         = Y
 QinQ offload         = Y
 L3 checksum offload  = Y
 L4 checksum offload  = Y
+MACsec offload       = Y
 Inner L3 checksum    = Y
 Inner L4 checksum    = Y
 Packet type parsing  = Y
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 060772d..6aad7ef 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -231,6 +231,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 			uint16_t reta_size);
 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
@@ -747,6 +748,51 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
 			   sizeof(rte_ixgbe_stats_strings[0]))
 
+/* MACsec statistics */
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
+	{"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+		out_pkts_untagged)},
+	{"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
+		out_pkts_encrypted)},
+	{"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
+		out_pkts_protected)},
+	{"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
+		out_octets_encrypted)},
+	{"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
+		out_octets_protected)},
+	{"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_untagged)},
+	{"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_badtag)},
+	{"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_nosci)},
+	{"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_unknownsci)},
+	{"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
+		in_octets_decrypted)},
+	{"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
+		in_octets_validated)},
+	{"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_unchecked)},
+	{"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_delayed)},
+	{"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_late)},
+	{"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_ok)},
+	{"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_invalid)},
+	{"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_notvalid)},
+	{"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_unusedsa)},
+	{"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_notusingsa)},
+};
+
+#define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
+			   sizeof(rte_ixgbe_macsec_strings[0]))
+
 /* Per-queue statistics */
 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
 	{"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
@@ -2370,6 +2416,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 		/* check if lsc interrupt is enabled */
 		if (dev->data->dev_conf.intr_conf.lsc != 0)
 			ixgbe_dev_lsc_interrupt_setup(dev);
+		ixgbe_dev_macsec_interrupt_setup(dev);
 	} else {
 		rte_intr_callback_unregister(intr_handle,
 					     ixgbe_dev_interrupt_handler, dev);
@@ -2560,6 +2607,7 @@ ixgbe_dev_close(struct rte_eth_dev *dev)
 static void
 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
 			   struct ixgbe_hw_stats *hw_stats,
+			   struct ixgbe_macsec_stats *macsec_stats,
 			   uint64_t *total_missed_rx, uint64_t *total_qbrc,
 			   uint64_t *total_qprc, uint64_t *total_qprdc)
 {
@@ -2729,6 +2777,40 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
 	/* Flow Director Stats registers */
 	hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
 	hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+
+	/* MACsec Stats registers */
+	macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
+	macsec_stats->out_pkts_encrypted +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
+	macsec_stats->out_pkts_protected +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
+	macsec_stats->out_octets_encrypted +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
+	macsec_stats->out_octets_protected +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
+	macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
+	macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
+	macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
+	macsec_stats->in_pkts_unknownsci +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
+	macsec_stats->in_octets_decrypted +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
+	macsec_stats->in_octets_validated +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
+	macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
+	macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
+	macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
+	for (i = 0; i < 2; i++) {
+		macsec_stats->in_pkts_ok +=
+			IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
+		macsec_stats->in_pkts_invalid +=
+			IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
+		macsec_stats->in_pkts_notvalid +=
+			IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
+	}
+	macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
+	macsec_stats->in_pkts_notusingsa +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
 }
 
 /*
@@ -2741,6 +2823,9 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_hw_stats *hw_stats =
 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	struct ixgbe_macsec_stats *macsec_stats =
+			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+				dev->data->dev_private);
 	uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
 	unsigned i;
 
@@ -2749,8 +2834,8 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 	total_qprc = 0;
 	total_qprdc = 0;
 
-	ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
-			&total_qprc, &total_qprdc);
+	ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+			&total_qbrc, &total_qprc, &total_qprdc);
 
 	if (stats == NULL)
 		return;
@@ -2802,7 +2887,7 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
 /* This function calculates the number of xstats based on the current config */
 static unsigned
 ixgbe_xstats_calc_num(void) {
-	return IXGBE_NB_HW_STATS +
+	return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
 		(IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
 		(IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
 }
@@ -2829,6 +2914,15 @@ static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 			count++;
 		}
 
+		/* MACsec Stats */
+		for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+			snprintf(xstats_names[count].name,
+				sizeof(xstats_names[count].name),
+				"%s",
+				rte_ixgbe_macsec_strings[i].name);
+			count++;
+		}
+
 		/* RX Priority Stats */
 		for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
 			for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
@@ -2878,6 +2972,9 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_hw_stats *hw_stats =
 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	struct ixgbe_macsec_stats *macsec_stats =
+			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+				dev->data->dev_private);
 	uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
 	unsigned i, stat, count = 0;
 
@@ -2891,8 +2988,8 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 	total_qprc = 0;
 	total_qprdc = 0;
 
-	ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
-				   &total_qprc, &total_qprdc);
+	ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+			&total_qbrc, &total_qprc, &total_qprdc);
 
 	/* If this is a reset xstats is NULL, and we have cleared the
 	 * registers by reading them.
@@ -2909,6 +3006,14 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		count++;
 	}
 
+	/* MACsec Stats */
+	for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+		xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
+				rte_ixgbe_macsec_strings[i].offset);
+		xstats[count].id = count;
+		count++;
+	}
+
 	/* RX Priority Stats */
 	for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
 		for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
@@ -2938,6 +3043,9 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw_stats *stats =
 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	struct ixgbe_macsec_stats *macsec_stats =
+			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+				dev->data->dev_private);
 
 	unsigned count = ixgbe_xstats_calc_num();
 
@@ -2946,6 +3054,7 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
 
 	/* Reset software totals */
 	memset(stats, 0, sizeof(*stats));
+	memset(macsec_stats, 0, sizeof(*macsec_stats));
 }
 
 static void
@@ -3078,6 +3187,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	    !RTE_ETH_DEV_SRIOV(dev).active)
 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
 
+	if (hw->mac.type == ixgbe_mac_82599EB ||
+	    hw->mac.type == ixgbe_mac_X540)
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
@@ -3091,6 +3204,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		DEV_TX_OFFLOAD_SCTP_CKSUM  |
 		DEV_TX_OFFLOAD_TCP_TSO;
 
+	if (hw->mac.type == ixgbe_mac_82599EB ||
+	    hw->mac.type == ixgbe_mac_X540)
+		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
@@ -3388,6 +3505,28 @@ ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
 	return 0;
 }
 
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
+{
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+	intr->mask |= IXGBE_EICR_LINKSEC;
+
+	return 0;
+}
+
 /*
  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
  *
@@ -3422,6 +3561,9 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
 	if (eicr & IXGBE_EICR_MAILBOX)
 		intr->flags |= IXGBE_FLAG_MAILBOX;
 
+	if (eicr & IXGBE_EICR_LINKSEC)
+		intr->flags |= IXGBE_FLAG_MACSEC;
+
 	if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
 	    hw->phy.type == ixgbe_phy_x550em_ext_t &&
 	    (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
@@ -3576,6 +3718,12 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
 		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
 	}
 
+	if (intr->flags & IXGBE_FLAG_MACSEC) {
+		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
+					      NULL);
+		intr->flags &= ~IXGBE_FLAG_MACSEC;
+	}
+
 	PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
 	ixgbe_enable_intr(dev);
 	rte_intr_enable(intr_handle);
@@ -7616,6 +7764,330 @@ ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
 	ixgbevf_dev_interrupt_action(dev);
 }
 
+/**
+ *  ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Stops the transmit data path and waits for the HW to internally empty
+ *  the Tx security block
+ **/
+int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
+{
+#define IXGBE_MAX_SECTX_POLL 40
+
+	int i;
+	int sectxreg;
+
+	sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
+	for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
+		sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
+		if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
+			break;
+		/* Use interrupt-safe sleep just in case */
+		usec_delay(1000);
+	}
+
+	/* For informational purposes only */
+	if (i >= IXGBE_MAX_SECTX_POLL)
+		PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
+			 "path fully disabled.  Continuing with init.\n");
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Enables the transmit data path.
+ **/
+int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
+{
+	uint32_t sectxreg;
+
+	sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return IXGBE_SUCCESS;
+}
+
+int
+rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Stop the data paths */
+	if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+		return -ENOTSUP;
+	/*
+	 * Workaround:
+	 * As no ixgbe_disable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 * The hardware support has been checked by
+	 * ixgbe_disable_sec_rx_path().
+	 */
+	ixgbe_disable_sec_tx_path_generic(hw);
+
+	/* Enable Ethernet CRC (required by MACsec offload) */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+	ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
+	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
+
+	/* Enable the TX and RX crypto engines */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+	ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
+	ctrl |= 0x3;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
+
+	/* Enable SA lookup */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+	ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+	ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
+		     IXGBE_LSECTXCTRL_AUTH;
+	ctrl |= IXGBE_LSECTXCTRL_AISCI;
+	ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+	ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+	ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
+	ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
+	if (rp)
+		ctrl |= IXGBE_LSECRXCTRL_RP;
+	else
+		ctrl &= ~IXGBE_LSECRXCTRL_RP;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+	/* Start the data paths */
+	ixgbe_enable_sec_rx_path(hw);
+	/*
+	 * Workaround:
+	 * As no ixgbe_enable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 */
+	ixgbe_enable_sec_tx_path_generic(hw);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_disable(uint8_t port)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Stop the data paths */
+	if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+		return -ENOTSUP;
+	/*
+	 * Workaround:
+	 * As no ixgbe_disable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 * The hardware support has been checked by
+	 * ixgbe_disable_sec_rx_path().
+	 */
+	ixgbe_disable_sec_tx_path_generic(hw);
+
+	/* Disable the TX and RX crypto engines */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+	/* Disable SA lookup */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+	ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECTXCTRL_DISABLE;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+	ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+	/* Start the data paths */
+	ixgbe_enable_sec_rx_path(hw);
+	/*
+	 * Workaround:
+	 * As no ixgbe_enable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 */
+	ixgbe_enable_sec_tx_path_generic(hw);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
+
+	ctrl = mac[4] | (mac[5] << 8);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
+
+	pi = rte_cpu_to_be_16(pi);
+	ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+				 uint32_t pn, uint8_t *key)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl, i;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (idx != 0 && idx != 1)
+		return -EINVAL;
+
+	if (an >= 4)
+		return -EINVAL;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Set the PN and key */
+	pn = rte_cpu_to_be_32(pn);
+	if (idx == 0) {
+		IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
+
+		for (i = 0; i < 4; i++) {
+			ctrl = (key[i * 4 + 0] <<  0) |
+			       (key[i * 4 + 1] <<  8) |
+			       (key[i * 4 + 2] << 16) |
+			       (key[i * 4 + 3] << 24);
+			IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
+		}
+	} else {
+		IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
+
+		for (i = 0; i < 4; i++) {
+			ctrl = (key[i * 4 + 0] <<  0) |
+			       (key[i * 4 + 1] <<  8) |
+			       (key[i * 4 + 2] << 16) |
+			       (key[i * 4 + 3] << 24);
+			IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
+		}
+	}
+
+	/* Set AN and select the SA */
+	ctrl = (an << idx * 2) | (idx << 4);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+				 uint32_t pn, uint8_t *key)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl, i;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (idx != 0 && idx != 1)
+		return -EINVAL;
+
+	if (an >= 4)
+		return -EINVAL;
+
+	/* Set the PN */
+	pn = rte_cpu_to_be_32(pn);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
+
+	/* Set the key */
+	for (i = 0; i < 4; i++) {
+		ctrl = (key[i * 4 + 0] <<  0) |
+		       (key[i * 4 + 1] <<  8) |
+		       (key[i * 4 + 2] << 16) |
+		       (key[i * 4 + 3] << 24);
+		IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
+	}
+
+	/* Set the AN and validate the SA */
+	ctrl = an | (1 << 2);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
+
+	return 0;
+}
+
 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 69b276f..14e1fd6 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -43,6 +43,7 @@
 #define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
 #define IXGBE_FLAG_MAILBOX          (uint32_t)(1 << 1)
 #define IXGBE_FLAG_PHY_INTERRUPT    (uint32_t)(1 << 2)
+#define IXGBE_FLAG_MACSEC           (uint32_t)(1 << 3)
 
 /*
  * Defines that were not part of ixgbe_type.h as they are not used by the
@@ -130,6 +131,10 @@
 #define IXGBE_MISC_VEC_ID               RTE_INTR_VEC_ZERO_OFFSET
 #define IXGBE_RX_VEC_START              RTE_INTR_VEC_RXTX_OFFSET
 
+#define IXGBE_SECTX_MINSECIFG_MASK      0x0000000F
+
+#define IXGBE_MACSEC_PNTHRSH            0xFFFFFE00
+
 /*
  * Information about the fdir mode.
  */
@@ -265,11 +270,44 @@ struct ixgbe_filter_info {
 };
 
 /*
+ * Statistics counters collected by the MACsec
+ */
+struct ixgbe_macsec_stats {
+	/* TX port statistics */
+	uint64_t out_pkts_untagged;
+	uint64_t out_pkts_encrypted;
+	uint64_t out_pkts_protected;
+	uint64_t out_octets_encrypted;
+	uint64_t out_octets_protected;
+
+	/* RX port statistics */
+	uint64_t in_pkts_untagged;
+	uint64_t in_pkts_badtag;
+	uint64_t in_pkts_nosci;
+	uint64_t in_pkts_unknownsci;
+	uint64_t in_octets_decrypted;
+	uint64_t in_octets_validated;
+
+	/* RX SC statistics */
+	uint64_t in_pkts_unchecked;
+	uint64_t in_pkts_delayed;
+	uint64_t in_pkts_late;
+
+	/* RX SA statistics */
+	uint64_t in_pkts_ok;
+	uint64_t in_pkts_invalid;
+	uint64_t in_pkts_notvalid;
+	uint64_t in_pkts_unusedsa;
+	uint64_t in_pkts_notusingsa;
+};
+
+/*
  * Structure to store private data for each driver instance (for each port).
  */
 struct ixgbe_adapter {
 	struct ixgbe_hw             hw;
 	struct ixgbe_hw_stats       stats;
+	struct ixgbe_macsec_stats   macsec_stats;
 	struct ixgbe_hw_fdir_info   fdir;
 	struct ixgbe_interrupt      intr;
 	struct ixgbe_stat_mapping_registers stat_mappings;
@@ -300,6 +338,9 @@ struct ixgbe_adapter {
 #define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \
 	(&((struct ixgbe_adapter *)adapter)->stats)
 
+#define IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(adapter) \
+	(&((struct ixgbe_adapter *)adapter)->macsec_stats)
+
 #define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \
 	(&((struct ixgbe_adapter *)adapter)->intr)
 
@@ -448,4 +489,8 @@ uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
 
 int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
 			enum rte_filter_op filter_op, void *arg);
+
+int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw);
+
+int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw);
 #endif /* _IXGBE_ETHDEV_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 0bbc583..00013c6 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -86,6 +86,7 @@
 		PKT_TX_IP_CKSUM |		 \
 		PKT_TX_L4_MASK |		 \
 		PKT_TX_TCP_SEG |		 \
+		PKT_TX_MACSEC |			 \
 		PKT_TX_OUTER_IP_CKSUM)
 
 #define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
@@ -523,6 +524,8 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
 		cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
 	if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
 		cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
+	if (ol_flags & PKT_TX_MACSEC)
+		cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
 	return cmdtype;
 }
 
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index c2fb826..22bc4c7 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -183,6 +183,106 @@ int
 rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
 
 /**
+ * Enable MACsec offload.
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param en
+ *    1 - Enable encryption (encrypt and add integrity signature).
+ *    0 - Disable encryption (only add integrity signature).
+ * @param rp
+ *    1 - Enable replay protection.
+ *    0 - Disable replay protection.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp);
+
+/**
+ * Disable MACsec offload.
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_disable(uint8_t port);
+
+/**
+ * Configure Tx SC (Secure Connection).
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param mac
+ *   The MAC address on the local side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ */
+int rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac);
+
+/**
+ * Configure Rx SC (Secure Connection).
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param mac
+ *   The MAC address on the remote side.
+ * @param pi
+ *   The PI (port identifier) on the remote side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ */
+int rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi);
+
+/**
+ * Enable Tx SA (Secure Association).
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param idx
+ *   The SA to be enabled (0 or 1).
+ * @param an
+ *   The association number on the local side.
+ * @param pn
+ *   The packet number on the local side.
+ * @param key
+ *   The key on the local side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+		uint32_t pn, uint8_t *key);
+
+/**
+ * Enable Rx SA (Secure Association).
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param idx
+ *   The SA to be enabled (0 or 1)
+ * @param an
+ *   The association number on the remote side.
+ * @param pn
+ *   The packet number on the remote side.
+ * @param key
+ *   The key on the remote side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+		uint32_t pn, uint8_t *key);
+
+/**
  * Response sent back to ixgbe driver from user app after callback
  */
 enum rte_pmd_ixgbe_mb_event_rsp {
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
index 92434f3..6d68934 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
@@ -15,3 +15,14 @@ DPDK_16.11 {
 	rte_pmd_ixgbe_set_vf_vlan_insert;
 	rte_pmd_ixgbe_set_vf_vlan_stripq;
 } DPDK_2.0;
+
+DPDK_17.02 {
+	global:
+
+	rte_pmd_ixgbe_macsec_enable;
+	rte_pmd_ixgbe_macsec_disable;
+	rte_pmd_ixgbe_macsec_config_txsc;
+	rte_pmd_ixgbe_macsec_config_rxsc;
+	rte_pmd_ixgbe_macsec_select_txsa;
+	rte_pmd_ixgbe_macsec_select_rxsa;
+} DPDK_16.11;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v6 5/6] app/testpmd: add MACsec offload commands
  2017-01-11  4:31         ` [PATCH v6 0/6] " Tiwei Bie
                             ` (3 preceding siblings ...)
  2017-01-11  4:31           ` [PATCH v6 4/6] net/ixgbe: add MACsec offload support Tiwei Bie
@ 2017-01-11  4:31           ` Tiwei Bie
  2017-01-13  9:07             ` Thomas Monjalon
  2017-01-11  4:31           ` [PATCH v6 6/6] doc: add ixgbe specific APIs Tiwei Bie
  2017-01-13 11:21           ` [PATCH v7 0/6] Add MACsec offload support for ixgbe Tiwei Bie
  6 siblings, 1 reply; 79+ messages in thread
From: Tiwei Bie @ 2017-01-11  4:31 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

Below MACsec offload commands are added:

- set macsec offload <port_id> on encrypt on|off replay-protect on|off
- set macsec offload <port_id> off
- set macsec sc tx|rx <port_id> <mac> <pi>
- set macsec sa tx|rx <port_id> <idx> <an> <pn> <key>

Also update the testpmd user guide.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 app/test-pmd/cmdline.c                      | 389 ++++++++++++++++++++++++++++
 app/test-pmd/macfwd.c                       |   2 +
 app/test-pmd/macswap.c                      |   2 +
 app/test-pmd/testpmd.h                      |   2 +
 app/test-pmd/txonly.c                       |   2 +
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  32 +++
 6 files changed, 429 insertions(+)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index f768b6b..4a67894 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -275,6 +275,18 @@ static void cmd_help_long_parsed(void *parsed_result,
 
 			"set vf mac antispoof (port_id) (vf_id) (on|off).\n"
 			"    Set MAC antispoof for a VF from the PF.\n\n"
+
+			"set macsec offload (port_id) on encrypt (on|off) replay-protect (on|off)\n"
+			"    Enable MACsec offload.\n\n"
+
+			"set macsec offload (port_id) off\n"
+			"    Disable MACsec offload.\n\n"
+
+			"set macsec sc (tx|rx) (port_id) (mac) (pi)\n"
+			"    Configure MACsec secure connection (SC).\n\n"
+
+			"set macsec sa (tx|rx) (port_id) (idx) (an) (pn) (key)\n"
+			"    Configure MACsec secure association (SA).\n\n"
 #endif
 
 			"vlan set strip (on|off) (port_id)\n"
@@ -11488,6 +11500,379 @@ cmdline_parse_inst_t cmd_set_vf_mac_addr = {
 		NULL,
 	},
 };
+
+/* MACsec configuration */
+
+/* Common result structure for MACsec offload enable */
+struct cmd_macsec_offload_on_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t offload;
+	uint8_t port_id;
+	cmdline_fixed_string_t on;
+	cmdline_fixed_string_t encrypt;
+	cmdline_fixed_string_t en_on_off;
+	cmdline_fixed_string_t replay_protect;
+	cmdline_fixed_string_t rp_on_off;
+};
+
+/* Common CLI fields for MACsec offload disable */
+cmdline_parse_token_string_t cmd_macsec_offload_on_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_offload_on_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_offload_on_offload =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 offload, "offload");
+cmdline_parse_token_string_t cmd_macsec_offload_on_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 port_id, UINT8);
+cmdline_parse_token_string_t cmd_macsec_offload_on_on =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 on, "on");
+cmdline_parse_token_string_t cmd_macsec_offload_on_encrypt =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 encrypt, "encrypt");
+cmdline_parse_token_string_t cmd_macsec_offload_on_en_on_off =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 en_on_off, "on#off");
+cmdline_parse_token_string_t cmd_macsec_offload_on_replay_protect =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 replay_protect, "replay-protect");
+cmdline_parse_token_string_t cmd_macsec_offload_on_rp_on_off =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 rp_on_off, "on#off");
+
+static void
+cmd_set_macsec_offload_on_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_offload_on_result *res = parsed_result;
+	int ret;
+	portid_t port_id = res->port_id;
+	int en = (strcmp(res->en_on_off, "on") == 0) ? 1 : 0;
+	int rp = (strcmp(res->rp_on_off, "on") == 0) ? 1 : 0;
+
+	if (port_id_is_invalid(port_id, ENABLED_WARN))
+		return;
+
+	ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_MACSEC;
+	ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
+
+	switch (ret) {
+	case 0:
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_offload_on = {
+	.f = cmd_set_macsec_offload_on_parsed,
+	.data = NULL,
+	.help_str = "set macsec offload <port_id> on "
+		"encrypt on|off replay-protect on|off",
+	.tokens = {
+		(void *)&cmd_macsec_offload_on_set,
+		(void *)&cmd_macsec_offload_on_macsec,
+		(void *)&cmd_macsec_offload_on_offload,
+		(void *)&cmd_macsec_offload_on_port_id,
+		(void *)&cmd_macsec_offload_on_on,
+		(void *)&cmd_macsec_offload_on_encrypt,
+		(void *)&cmd_macsec_offload_on_en_on_off,
+		(void *)&cmd_macsec_offload_on_replay_protect,
+		(void *)&cmd_macsec_offload_on_rp_on_off,
+		NULL,
+	},
+};
+
+/* Common result structure for MACsec offload disable */
+struct cmd_macsec_offload_off_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t offload;
+	uint8_t port_id;
+	cmdline_fixed_string_t off;
+};
+
+/* Common CLI fields for MACsec offload disable */
+cmdline_parse_token_string_t cmd_macsec_offload_off_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_offload_off_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_offload_off_offload =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 offload, "offload");
+cmdline_parse_token_string_t cmd_macsec_offload_off_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 port_id, UINT8);
+cmdline_parse_token_string_t cmd_macsec_offload_off_off =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 off, "off");
+
+static void
+cmd_set_macsec_offload_off_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_offload_off_result *res = parsed_result;
+	int ret;
+	portid_t port_id = res->port_id;
+
+	if (port_id_is_invalid(port_id, ENABLED_WARN))
+		return;
+
+	ports[port_id].tx_ol_flags &= ~TESTPMD_TX_OFFLOAD_MACSEC;
+	ret = rte_pmd_ixgbe_macsec_disable(port_id);
+
+	switch (ret) {
+	case 0:
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_offload_off = {
+	.f = cmd_set_macsec_offload_off_parsed,
+	.data = NULL,
+	.help_str = "set macsec offload <port_id> off",
+	.tokens = {
+		(void *)&cmd_macsec_offload_off_set,
+		(void *)&cmd_macsec_offload_off_macsec,
+		(void *)&cmd_macsec_offload_off_offload,
+		(void *)&cmd_macsec_offload_off_port_id,
+		(void *)&cmd_macsec_offload_off_off,
+		NULL,
+	},
+};
+
+/* Common result structure for MACsec secure connection configure */
+struct cmd_macsec_sc_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t sc;
+	cmdline_fixed_string_t tx_rx;
+	uint8_t port_id;
+	struct ether_addr mac;
+	uint16_t pi;
+};
+
+/* Common CLI fields for MACsec secure connection configure */
+cmdline_parse_token_string_t cmd_macsec_sc_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_sc_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_sc_sc =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 sc, "sc");
+cmdline_parse_token_string_t cmd_macsec_sc_tx_rx =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 tx_rx, "tx#rx");
+cmdline_parse_token_num_t cmd_macsec_sc_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 port_id, UINT8);
+cmdline_parse_token_etheraddr_t cmd_macsec_sc_mac =
+	TOKEN_ETHERADDR_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 mac);
+cmdline_parse_token_num_t cmd_macsec_sc_pi =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 pi, UINT16);
+
+static void
+cmd_set_macsec_sc_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_sc_result *res = parsed_result;
+	int ret;
+	int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0;
+
+	ret = is_tx ?
+		rte_pmd_ixgbe_macsec_config_txsc(res->port_id,
+				res->mac.addr_bytes) :
+		rte_pmd_ixgbe_macsec_config_rxsc(res->port_id,
+				res->mac.addr_bytes, res->pi);
+	switch (ret) {
+	case 0:
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", res->port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_sc = {
+	.f = cmd_set_macsec_sc_parsed,
+	.data = NULL,
+	.help_str = "set macsec sc tx|rx <port_id> <mac> <pi>",
+	.tokens = {
+		(void *)&cmd_macsec_sc_set,
+		(void *)&cmd_macsec_sc_macsec,
+		(void *)&cmd_macsec_sc_sc,
+		(void *)&cmd_macsec_sc_tx_rx,
+		(void *)&cmd_macsec_sc_port_id,
+		(void *)&cmd_macsec_sc_mac,
+		(void *)&cmd_macsec_sc_pi,
+		NULL,
+	},
+};
+
+/* Common result structure for MACsec secure connection configure */
+struct cmd_macsec_sa_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t sa;
+	cmdline_fixed_string_t tx_rx;
+	uint8_t port_id;
+	uint8_t idx;
+	uint8_t an;
+	uint32_t pn;
+	cmdline_fixed_string_t key;
+};
+
+/* Common CLI fields for MACsec secure connection configure */
+cmdline_parse_token_string_t cmd_macsec_sa_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_sa_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_sa_sa =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 sa, "sa");
+cmdline_parse_token_string_t cmd_macsec_sa_tx_rx =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 tx_rx, "tx#rx");
+cmdline_parse_token_num_t cmd_macsec_sa_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 port_id, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_idx =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 idx, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_an =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 an, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_pn =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 pn, UINT32);
+cmdline_parse_token_string_t cmd_macsec_sa_key =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 key, NULL);
+
+static void
+cmd_set_macsec_sa_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_sa_result *res = parsed_result;
+	int ret;
+	int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0;
+	uint8_t key[16] = { 0 };
+	uint8_t xdgt0;
+	uint8_t xdgt1;
+	int key_len;
+	int i;
+
+	key_len = strlen(res->key) / 2;
+	if (key_len > 16)
+		key_len = 16;
+
+	for (i = 0; i < key_len; i++) {
+		xdgt0 = parse_and_check_key_hexa_digit(res->key, (i * 2));
+		if (xdgt0 == 0xFF)
+			return;
+		xdgt1 = parse_and_check_key_hexa_digit(res->key, (i * 2) + 1);
+		if (xdgt1 == 0xFF)
+			return;
+		key[i] = (uint8_t) ((xdgt0 * 16) + xdgt1);
+	}
+
+	ret = is_tx ?
+		rte_pmd_ixgbe_macsec_select_txsa(res->port_id,
+			res->idx, res->an, res->pn, key) :
+		rte_pmd_ixgbe_macsec_select_rxsa(res->port_id,
+			res->idx, res->an, res->pn, key);
+	switch (ret) {
+	case 0:
+		break;
+	case -EINVAL:
+		printf("invalid idx %d or an %d\n", res->idx, res->an);
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", res->port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_sa = {
+	.f = cmd_set_macsec_sa_parsed,
+	.data = NULL,
+	.help_str = "set macsec sa tx|rx <port_id> <idx> <an> <pn> <key>",
+	.tokens = {
+		(void *)&cmd_macsec_sa_set,
+		(void *)&cmd_macsec_sa_macsec,
+		(void *)&cmd_macsec_sa_sa,
+		(void *)&cmd_macsec_sa_tx_rx,
+		(void *)&cmd_macsec_sa_port_id,
+		(void *)&cmd_macsec_sa_idx,
+		(void *)&cmd_macsec_sa_an,
+		(void *)&cmd_macsec_sa_pn,
+		(void *)&cmd_macsec_sa_key,
+		NULL,
+	},
+};
 #endif
 
 /* ******************************************************************************** */
@@ -11656,6 +12041,10 @@ cmdline_parse_ctx_t main_ctx[] = {
 	(cmdline_parse_inst_t *)&cmd_set_all_queues_drop_en,
 	(cmdline_parse_inst_t *)&cmd_set_vf_split_drop_en,
 	(cmdline_parse_inst_t *)&cmd_set_vf_mac_addr,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_offload_on,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_offload_off,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_sc,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_sa,
 #endif
 	NULL,
 };
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index d361db1..cf7eab1 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -113,6 +113,8 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
 		ol_flags = PKT_TX_VLAN_PKT;
 	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
 		ol_flags |= PKT_TX_QINQ_PKT;
+	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+		ol_flags |= PKT_TX_MACSEC;
 	for (i = 0; i < nb_rx; i++) {
 		if (likely(i < nb_rx - 1))
 			rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index f996039..3a09351 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -113,6 +113,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
 		ol_flags = PKT_TX_VLAN_PKT;
 	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
 		ol_flags |= PKT_TX_QINQ_PKT;
+	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+		ol_flags |= PKT_TX_MACSEC;
 	for (i = 0; i < nb_rx; i++) {
 		if (likely(i < nb_rx - 1))
 			rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 22ce2d6..0a9a1af 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -143,6 +143,8 @@ struct fwd_stream {
 #define TESTPMD_TX_OFFLOAD_INSERT_VLAN       0x0040
 /** Insert double VLAN header in forward engine */
 #define TESTPMD_TX_OFFLOAD_INSERT_QINQ       0x0080
+/** Offload MACsec in forward engine */
+#define TESTPMD_TX_OFFLOAD_MACSEC            0x0100
 
 /** Descriptor for a single flow. */
 struct port_flow {
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index e996f35..8b1a2af 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -215,6 +215,8 @@ pkt_burst_transmit(struct fwd_stream *fs)
 		ol_flags = PKT_TX_VLAN_PKT;
 	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
 		ol_flags |= PKT_TX_QINQ_PKT;
+	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+		ol_flags |= PKT_TX_MACSEC;
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_mbuf_raw_alloc(mbp);
 		if (pkt == NULL) {
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index c611dc5..e3222c0 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -507,6 +507,38 @@ Set mac antispoof for a VF from the PF::
 
    testpmd> set vf mac antispoof  (port_id) (vf_id) (on|off)
 
+set macsec offload
+~~~~~~~~~~~~~~~~~~
+
+Enable/disable MACsec offload::
+
+   testpmd> set macsec offload (port_id) on encrypt (on|off) replay-protect (on|off)
+   testpmd> set macsec offload (port_id) off
+
+set macsec sc
+~~~~~~~~~~~~~
+
+Configure MACsec secure connection (SC)::
+
+   testpmd> set macsec sc (tx|rx) (port_id) (mac) (pi)
+
+.. note::
+
+   The pi argument is ignored for tx.
+   Check the NIC Datasheet for hardware limits.
+
+set macsec sa
+~~~~~~~~~~~~~
+
+Configure MACsec secure association (SA)::
+
+   testpmd> set macsec sa (tx|rx) (port_id) (idx) (an) (pn) (key)
+
+.. note::
+
+   The IDX value must be 0 or 1.
+   Check the NIC Datasheet for hardware limits.
+
 vlan set strip
 ~~~~~~~~~~~~~~
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v6 6/6] doc: add ixgbe specific APIs
  2017-01-11  4:31         ` [PATCH v6 0/6] " Tiwei Bie
                             ` (4 preceding siblings ...)
  2017-01-11  4:31           ` [PATCH v6 5/6] app/testpmd: add MACsec offload commands Tiwei Bie
@ 2017-01-11  4:31           ` Tiwei Bie
  2017-01-13 11:21           ` [PATCH v7 0/6] Add MACsec offload support for ixgbe Tiwei Bie
  6 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-11  4:31 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

Add information about the new ixgbe PMD APIs in the release notes.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Acked-by: John McNamara <john.mcnamara@intel.com>
---
 doc/guides/rel_notes/release_17_02.rst | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/doc/guides/rel_notes/release_17_02.rst b/doc/guides/rel_notes/release_17_02.rst
index 3958714..a3490a5 100644
--- a/doc/guides/rel_notes/release_17_02.rst
+++ b/doc/guides/rel_notes/release_17_02.rst
@@ -52,6 +52,11 @@ New Features
   See the :ref:`Generic flow API <Generic_flow_API>` documentation for more
   information.
 
+* **Added APIs for MACsec offload support to the ixgbe PMD.**
+
+  Six new APIs have been added to the ixgbe PMD for MACsec offload support.
+  The declarations for the APIs can be found in ``rte_pmd_ixgbe.h``.
+
 * **Improved offload support in mbuf.**
 
   Added a new Tx mbuf flag for MACsec, which can be set by applications
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
  2017-01-09 11:26                                 ` Thomas Monjalon
  2017-01-10  2:08                                   ` Tiwei Bie
@ 2017-01-11 17:32                                   ` Olivier MATZ
  2017-01-12  2:08                                     ` Tiwei Bie
  2017-01-12  2:42                                     ` Yuanhan Liu
  1 sibling, 2 replies; 79+ messages in thread
From: Olivier MATZ @ 2017-01-11 17:32 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: Tiwei Bie, Ananyev, Konstantin, Adrien Mazarguil, dev, Lu,
	Wenzhuo, Mcnamara, John, olivier.matz, Zhang, Helin, Dai, Wei,
	Wang, Xiao W

Hi Tiwei, Hi Thomas,

On Mon, 09 Jan 2017 12:26:53 +0100, Thomas Monjalon
<thomas.monjalon@6wind.com> wrote:
> 2017-01-09 11:57, Tiwei Bie:
> > On Sun, Jan 08, 2017 at 08:39:55PM +0800, Ananyev, Konstantin
> > wrote:  
> > > > Well my first reply to this thread was asking why isn't the
> > > > whole API global from the start then?  
> > > 
> > > That's good question, and my preference would always be to have
> > > the API to configure this feature as generic one.
> > > I guess the main reason why it is not right now we don't reach an
> > > agreement how this API should look like: 
> > > http://dpdk.org/ml/archives/dev/2016-September/047810.html
> > > But I'll leave it to the author to provide the real reason
> > > here.   
> > 
> > Yes, currently this work just provided a thin layer over 82599's
> > hardware MACsec offload support to allow users configure 82599's
> > MACsec offload engine. The current API may be too specific and may
> > need a rework to be used with other NICs.  
> 
> I think it is a really good approach to start such API privately in a
> driver. It will give us more time and experience to design a proper
> generic API.
> 
> Regarding the mbuf flag, it looks straight-forward, and as it is IEEE
> standardized, I do not see any objection to add it now.
> However, I will wait for the approval of Olivier - as maintainer of
> mbuf.
> 

Generally speaking, we have to be careful when introducing new mbuf
flags, since we don't have so much of them (~25 remaining out of 64,
which mean we may run out of them in 3-4 years).

In this particular case, despite the flag is added for an ixgbe-specific
API, when MACsec will be implemented on another PMD, the exact same
flag would also be needed. That's the same for the ethdev capability
flag. Moreover, as Thomas stated, it's a standardized protocol so it's
legitimate to have it included in rte_mbuf.h.

For me, having PMD-specific APIs for a new feature is not a problem,
but I think we should try to have a generic API as soon as the feature
is supported by several PMDs.

Regards,
Olivier

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v6 1/6] mbuf: add flag for MACsec
  2017-01-11  4:31           ` [PATCH v6 1/6] mbuf: add flag for MACsec Tiwei Bie
@ 2017-01-11 17:41             ` Olivier MATZ
  0 siblings, 0 replies; 79+ messages in thread
From: Olivier MATZ @ 2017-01-11 17:41 UTC (permalink / raw)
  To: Tiwei Bie
  Cc: dev, adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

On Wed, 11 Jan 2017 12:31:33 +0800, Tiwei Bie <tiwei.bie@intel.com>
wrote:
> Add a new Tx flag in mbuf, that can be set by applications to
> enable the MACsec offload for a packet to be transmitted.
> 
> Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>

Acked-by: Olivier Matz <olivier.matz@6wind.com>

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
  2017-01-11 17:32                                   ` Olivier MATZ
@ 2017-01-12  2:08                                     ` Tiwei Bie
  2017-01-12  2:42                                     ` Yuanhan Liu
  1 sibling, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-12  2:08 UTC (permalink / raw)
  To: Olivier MATZ
  Cc: Thomas Monjalon, Ananyev, Konstantin, Adrien Mazarguil, dev, Lu,
	Wenzhuo, Mcnamara, John, Zhang, Helin, Dai, Wei, Wang, Xiao W

On Wed, Jan 11, 2017 at 06:32:48PM +0100, Olivier MATZ wrote:
> Hi Tiwei, Hi Thomas,
> 
> On Mon, 09 Jan 2017 12:26:53 +0100, Thomas Monjalon
> <thomas.monjalon@6wind.com> wrote:
> > 2017-01-09 11:57, Tiwei Bie:
> > > On Sun, Jan 08, 2017 at 08:39:55PM +0800, Ananyev, Konstantin
> > > wrote:  
> > > > > Well my first reply to this thread was asking why isn't the
> > > > > whole API global from the start then?  
> > > > 
> > > > That's good question, and my preference would always be to have
> > > > the API to configure this feature as generic one.
> > > > I guess the main reason why it is not right now we don't reach an
> > > > agreement how this API should look like: 
> > > > http://dpdk.org/ml/archives/dev/2016-September/047810.html
> > > > But I'll leave it to the author to provide the real reason
> > > > here.   
> > > 
> > > Yes, currently this work just provided a thin layer over 82599's
> > > hardware MACsec offload support to allow users configure 82599's
> > > MACsec offload engine. The current API may be too specific and may
> > > need a rework to be used with other NICs.  
> > 
> > I think it is a really good approach to start such API privately in a
> > driver. It will give us more time and experience to design a proper
> > generic API.
> > 
> > Regarding the mbuf flag, it looks straight-forward, and as it is IEEE
> > standardized, I do not see any objection to add it now.
> > However, I will wait for the approval of Olivier - as maintainer of
> > mbuf.
> > 
> 
> Generally speaking, we have to be careful when introducing new mbuf
> flags, since we don't have so much of them (~25 remaining out of 64,
> which mean we may run out of them in 3-4 years).
> 
> In this particular case, despite the flag is added for an ixgbe-specific
> API, when MACsec will be implemented on another PMD, the exact same
> flag would also be needed. That's the same for the ethdev capability
> flag. Moreover, as Thomas stated, it's a standardized protocol so it's
> legitimate to have it included in rte_mbuf.h.
> 
> For me, having PMD-specific APIs for a new feature is not a problem,
> but I think we should try to have a generic API as soon as the feature
> is supported by several PMDs.
> 

Sure! Thank you very much!

Best regards,
Tiwei Bie

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v5 3/8] ethdev: reserve capability flags for PMD-specific API
  2017-01-11 17:32                                   ` Olivier MATZ
  2017-01-12  2:08                                     ` Tiwei Bie
@ 2017-01-12  2:42                                     ` Yuanhan Liu
  1 sibling, 0 replies; 79+ messages in thread
From: Yuanhan Liu @ 2017-01-12  2:42 UTC (permalink / raw)
  To: Olivier MATZ
  Cc: Thomas Monjalon, Tiwei Bie, Ananyev, Konstantin,
	Adrien Mazarguil, dev, Lu, Wenzhuo, Mcnamara, John, Zhang, Helin,
	Dai, Wei, Wang, Xiao W

On Wed, Jan 11, 2017 at 06:32:48PM +0100, Olivier MATZ wrote:
> Generally speaking, we have to be careful when introducing new mbuf
> flags, since we don't have so much of them (~25 remaining out of 64,
> which mean we may run out of them in 3-4 years).
> 
> In this particular case, despite the flag is added for an ixgbe-specific
> API, when MACsec will be implemented on another PMD, the exact same
> flag would also be needed. That's the same for the ethdev capability
> flag. Moreover, as Thomas stated, it's a standardized protocol so it's
> legitimate to have it included in rte_mbuf.h.
> 
> For me, having PMD-specific APIs for a new feature is not a problem,
> but I think we should try to have a generic API as soon as the feature
> is supported by several PMDs.

JFYI, here is how the virtio handle the feature bits. It basically
just divides it to two parts.

>From virtio 1.0 spec (2.2 Feature Bits):

    Feature bits are allocated as follows:

    - 0 to 23 Feature bits for the specific device type

    - 24 to 32 Feature bits reserved for extensions to the queue and feature
      negotiation mechanisms

    - 33 and above Feature bits reserved for future extensions.

    Note: For example, feature bit 0 for a network device (i.e. Device ID 1)
    indicates that the device supports checksumming of packets.


	--yliu

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v6 5/6] app/testpmd: add MACsec offload commands
  2017-01-11  4:31           ` [PATCH v6 5/6] app/testpmd: add MACsec offload commands Tiwei Bie
@ 2017-01-13  9:07             ` Thomas Monjalon
  2017-01-13  9:16               ` Tiwei Bie
  0 siblings, 1 reply; 79+ messages in thread
From: Thomas Monjalon @ 2017-01-13  9:07 UTC (permalink / raw)
  To: Tiwei Bie
  Cc: dev, adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	konstantin.ananyev, helin.zhang, wei.dai, xiao.w.wang

There is an error with clang:

app/test-pmd/cmdline.c:11535:13: fatal error:
expression which evaluates to zero treated as a null pointer
constant of type 'const char *' [-Wnon-literal-null-conversion]
                 port_id, UINT8);
                          ^~~~~

Please, send a v7

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v6 5/6] app/testpmd: add MACsec offload commands
  2017-01-13  9:07             ` Thomas Monjalon
@ 2017-01-13  9:16               ` Tiwei Bie
  0 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-13  9:16 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: dev, adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	konstantin.ananyev, helin.zhang, wei.dai, xiao.w.wang

On Fri, Jan 13, 2017 at 10:07:34AM +0100, Thomas Monjalon wrote:
> There is an error with clang:
> 
> app/test-pmd/cmdline.c:11535:13: fatal error:
> expression which evaluates to zero treated as a null pointer
> constant of type 'const char *' [-Wnon-literal-null-conversion]
>                  port_id, UINT8);
>                           ^~~~~
> 
> Please, send a v7

Oops.. Really sorry about it.. Will send v7!

Best regards,
Tiwei Bie

^ permalink raw reply	[flat|nested] 79+ messages in thread

* [PATCH v7 0/6] Add MACsec offload support for ixgbe
  2017-01-11  4:31         ` [PATCH v6 0/6] " Tiwei Bie
                             ` (5 preceding siblings ...)
  2017-01-11  4:31           ` [PATCH v6 6/6] doc: add ixgbe specific APIs Tiwei Bie
@ 2017-01-13 11:21           ` Tiwei Bie
  2017-01-13 11:21             ` [PATCH v7 1/6] mbuf: add flag for MACsec Tiwei Bie
                               ` (7 more replies)
  6 siblings, 8 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-13 11:21 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

This patch set adds the MACsec offload support for ixgbe.
The testpmd is also updated to support MACsec cmds.

v2:
- Update the documents for testpmd;
- Update the release notes;
- Reuse the functions provided by base code;

v3:
- Add the missing parts of MACsec mbuf flag and reorganize the patch set;
- Add an ethdev event type for MACsec;
- Advertise the MACsec offload capabilities based on the mac type;
- Minor fixes and improvements;

v4:
- Reserve bits in mbuf and ethdev for PMD specific API;
- Use the reserved bits in PMD specific API;

v5:
- Add MACsec offload in the NIC feature list;
- Minor improvements on comments;

v6:
- Revert the changes related to the reserved flags;
- Rebase the patch set on the latest branch, xstats code is changed recently;
- Update the feature list when adding the MACsec support for ixgbe;

v7:
- Fix clang build;

Tiwei Bie (6):
  mbuf: add flag for MACsec
  ethdev: add event type for MACsec
  ethdev: add MACsec offload capability flags
  net/ixgbe: add MACsec offload support
  app/testpmd: add MACsec offload commands
  doc: add ixgbe specific APIs

 app/test-pmd/cmdline.c                      | 389 ++++++++++++++++++++++
 app/test-pmd/macfwd.c                       |   2 +
 app/test-pmd/macswap.c                      |   2 +
 app/test-pmd/testpmd.h                      |   2 +
 app/test-pmd/txonly.c                       |   2 +
 doc/guides/nics/features/default.ini        |   1 +
 doc/guides/nics/features/ixgbe.ini          |   1 +
 doc/guides/rel_notes/release_17_02.rst      |  10 +
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  32 ++
 drivers/net/ixgbe/ixgbe_ethdev.c            | 482 +++++++++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_ethdev.h            |  45 +++
 drivers/net/ixgbe/ixgbe_rxtx.c              |   3 +
 drivers/net/ixgbe/rte_pmd_ixgbe.h           | 100 ++++++
 drivers/net/ixgbe/rte_pmd_ixgbe_version.map |  11 +
 lib/librte_ether/rte_ethdev.h               |   3 +
 lib/librte_mbuf/rte_mbuf.c                  |   2 +
 lib/librte_mbuf/rte_mbuf.h                  |   6 +
 17 files changed, 1088 insertions(+), 5 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 79+ messages in thread

* [PATCH v7 1/6] mbuf: add flag for MACsec
  2017-01-13 11:21           ` [PATCH v7 0/6] Add MACsec offload support for ixgbe Tiwei Bie
@ 2017-01-13 11:21             ` Tiwei Bie
  2017-01-13 11:21             ` [PATCH v7 2/6] ethdev: add event type " Tiwei Bie
                               ` (6 subsequent siblings)
  7 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-13 11:21 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

Add a new Tx flag in mbuf, that can be set by applications to
enable the MACsec offload for a packet to be transmitted.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Acked-by: Olivier Matz <olivier.matz@6wind.com>
---
 doc/guides/rel_notes/release_17_02.rst | 5 +++++
 lib/librte_mbuf/rte_mbuf.c             | 2 ++
 lib/librte_mbuf/rte_mbuf.h             | 6 ++++++
 3 files changed, 13 insertions(+)

diff --git a/doc/guides/rel_notes/release_17_02.rst b/doc/guides/rel_notes/release_17_02.rst
index 180af82..3958714 100644
--- a/doc/guides/rel_notes/release_17_02.rst
+++ b/doc/guides/rel_notes/release_17_02.rst
@@ -52,6 +52,11 @@ New Features
   See the :ref:`Generic flow API <Generic_flow_API>` documentation for more
   information.
 
+* **Improved offload support in mbuf.**
+
+  Added a new Tx mbuf flag for MACsec, which can be set by applications
+  to enable the MACsec offload for the packets to be transmitted.
+
 
 Resolved Issues
 ---------------
diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c
index 63f43c8..72ad91e 100644
--- a/lib/librte_mbuf/rte_mbuf.c
+++ b/lib/librte_mbuf/rte_mbuf.c
@@ -404,6 +404,7 @@ const char *rte_get_tx_ol_flag_name(uint64_t mask)
 	case PKT_TX_TUNNEL_GRE: return "PKT_TX_TUNNEL_GRE";
 	case PKT_TX_TUNNEL_IPIP: return "PKT_TX_TUNNEL_IPIP";
 	case PKT_TX_TUNNEL_GENEVE: return "PKT_TX_TUNNEL_GENEVE";
+	case PKT_TX_MACSEC: return "PKT_TX_MACSEC";
 	default: return NULL;
 	}
 }
@@ -434,6 +435,7 @@ rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
 		  "PKT_TX_TUNNEL_NONE" },
 		{ PKT_TX_TUNNEL_GENEVE, PKT_TX_TUNNEL_MASK,
 		  "PKT_TX_TUNNEL_NONE" },
+		{ PKT_TX_MACSEC, PKT_TX_MACSEC, NULL },
 	};
 	const char *name;
 	unsigned int i;
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 4476d75..ffbb4b3 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -182,6 +182,12 @@ extern "C" {
 /* add new TX flags here */
 
 /**
+ * Offload the MACsec. This flag must be set by the application to enable
+ * this offload feature for a packet to be transmitted.
+ */
+#define PKT_TX_MACSEC        (1ULL << 44)
+
+/**
  * Bits 45:48 used for the tunnel type.
  * When doing Tx offload like TSO or checksum, the HW needs to configure the
  * tunnel type into the HW descriptors.
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v7 2/6] ethdev: add event type for MACsec
  2017-01-13 11:21           ` [PATCH v7 0/6] Add MACsec offload support for ixgbe Tiwei Bie
  2017-01-13 11:21             ` [PATCH v7 1/6] mbuf: add flag for MACsec Tiwei Bie
@ 2017-01-13 11:21             ` Tiwei Bie
  2017-01-16  2:31               ` Ananyev, Konstantin
  2017-01-13 11:21             ` [PATCH v7 3/6] ethdev: add MACsec offload capability flags Tiwei Bie
                               ` (5 subsequent siblings)
  7 siblings, 1 reply; 79+ messages in thread
From: Tiwei Bie @ 2017-01-13 11:21 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

This commit adds a below event type:

- RTE_ETH_EVENT_MACSEC

This event will occur when the PN counter in a MACsec connection
reaches the exhaustion threshold.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 lib/librte_ether/rte_ethdev.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 1c356c1..d5d2956 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -3188,6 +3188,7 @@ enum rte_eth_event_type {
 	RTE_ETH_EVENT_INTR_RESET,
 			/**< reset interrupt event, sent to VF on PF reset */
 	RTE_ETH_EVENT_VF_MBOX,  /**< message from the VF received by PF */
+	RTE_ETH_EVENT_MACSEC,   /**< MACsec offload related event */
 	RTE_ETH_EVENT_MAX       /**< max value of this enum */
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v7 3/6] ethdev: add MACsec offload capability flags
  2017-01-13 11:21           ` [PATCH v7 0/6] Add MACsec offload support for ixgbe Tiwei Bie
  2017-01-13 11:21             ` [PATCH v7 1/6] mbuf: add flag for MACsec Tiwei Bie
  2017-01-13 11:21             ` [PATCH v7 2/6] ethdev: add event type " Tiwei Bie
@ 2017-01-13 11:21             ` Tiwei Bie
  2017-01-16  2:33               ` Ananyev, Konstantin
  2017-01-13 11:21             ` [PATCH v7 4/6] net/ixgbe: add MACsec offload support Tiwei Bie
                               ` (4 subsequent siblings)
  7 siblings, 1 reply; 79+ messages in thread
From: Tiwei Bie @ 2017-01-13 11:21 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

If these flags are advertised by a PMD, the NIC supports the MACsec
offload. The incoming MACsec traffics can be offloaded transparently
after the MACsec offload is configured correctly by the application.
And the application can set the PKT_TX_MACSEC flag in mbufs to enable
the MACsec offload for the packets to be transmitted.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
---
 lib/librte_ether/rte_ethdev.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index d5d2956..49c073b 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -881,6 +881,7 @@ struct rte_eth_conf {
 #define DEV_RX_OFFLOAD_TCP_LRO     0x00000010
 #define DEV_RX_OFFLOAD_QINQ_STRIP  0x00000020
 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
+#define DEV_RX_OFFLOAD_MACSEC_STRIP     0x00000080
 
 /**
  * TX offload capabilities of a device.
@@ -898,6 +899,7 @@ struct rte_eth_conf {
 #define DEV_TX_OFFLOAD_GRE_TNL_TSO      0x00000400    /**< Used for tunneling packet. */
 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO     0x00000800    /**< Used for tunneling packet. */
 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO   0x00001000    /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_MACSEC_INSERT    0x00002000
 
 /**
  * Ethernet device information
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v7 4/6] net/ixgbe: add MACsec offload support
  2017-01-13 11:21           ` [PATCH v7 0/6] Add MACsec offload support for ixgbe Tiwei Bie
                               ` (2 preceding siblings ...)
  2017-01-13 11:21             ` [PATCH v7 3/6] ethdev: add MACsec offload capability flags Tiwei Bie
@ 2017-01-13 11:21             ` Tiwei Bie
  2017-01-13 11:21             ` [PATCH v7 5/6] app/testpmd: add MACsec offload commands Tiwei Bie
                               ` (3 subsequent siblings)
  7 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-13 11:21 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

MACsec (or LinkSec, 802.1AE) is a MAC level encryption/authentication
scheme defined in IEEE 802.1AE that uses symmetric cryptography.
This commit adds the MACsec offload support for ixgbe.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 doc/guides/nics/features/default.ini        |   1 +
 doc/guides/nics/features/ixgbe.ini          |   1 +
 drivers/net/ixgbe/ixgbe_ethdev.c            | 482 +++++++++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_ethdev.h            |  45 +++
 drivers/net/ixgbe/ixgbe_rxtx.c              |   3 +
 drivers/net/ixgbe/rte_pmd_ixgbe.h           | 100 ++++++
 drivers/net/ixgbe/rte_pmd_ixgbe_version.map |  11 +
 7 files changed, 638 insertions(+), 5 deletions(-)

diff --git a/doc/guides/nics/features/default.ini b/doc/guides/nics/features/default.ini
index f1bf9bf..c412d6f 100644
--- a/doc/guides/nics/features/default.ini
+++ b/doc/guides/nics/features/default.ini
@@ -43,6 +43,7 @@ VLAN offload         =
 QinQ offload         =
 L3 checksum offload  =
 L4 checksum offload  =
+MACsec offload       =
 Inner L3 checksum    =
 Inner L4 checksum    =
 Packet type parsing  =
diff --git a/doc/guides/nics/features/ixgbe.ini b/doc/guides/nics/features/ixgbe.ini
index 4a5667f..24c02fb 100644
--- a/doc/guides/nics/features/ixgbe.ini
+++ b/doc/guides/nics/features/ixgbe.ini
@@ -36,6 +36,7 @@ VLAN offload         = Y
 QinQ offload         = Y
 L3 checksum offload  = Y
 L4 checksum offload  = Y
+MACsec offload       = Y
 Inner L3 checksum    = Y
 Inner L4 checksum    = Y
 Packet type parsing  = Y
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index b7415e8..08f4449 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -231,6 +231,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 			uint16_t reta_size);
 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
@@ -747,6 +748,51 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
 			   sizeof(rte_ixgbe_stats_strings[0]))
 
+/* MACsec statistics */
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
+	{"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+		out_pkts_untagged)},
+	{"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
+		out_pkts_encrypted)},
+	{"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
+		out_pkts_protected)},
+	{"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
+		out_octets_encrypted)},
+	{"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
+		out_octets_protected)},
+	{"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_untagged)},
+	{"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_badtag)},
+	{"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_nosci)},
+	{"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_unknownsci)},
+	{"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
+		in_octets_decrypted)},
+	{"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
+		in_octets_validated)},
+	{"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_unchecked)},
+	{"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_delayed)},
+	{"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_late)},
+	{"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_ok)},
+	{"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_invalid)},
+	{"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_notvalid)},
+	{"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_unusedsa)},
+	{"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
+		in_pkts_notusingsa)},
+};
+
+#define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
+			   sizeof(rte_ixgbe_macsec_strings[0]))
+
 /* Per-queue statistics */
 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
 	{"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
@@ -2371,6 +2417,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 		/* check if lsc interrupt is enabled */
 		if (dev->data->dev_conf.intr_conf.lsc != 0)
 			ixgbe_dev_lsc_interrupt_setup(dev);
+		ixgbe_dev_macsec_interrupt_setup(dev);
 	} else {
 		rte_intr_callback_unregister(intr_handle,
 					     ixgbe_dev_interrupt_handler, dev);
@@ -2561,6 +2608,7 @@ ixgbe_dev_close(struct rte_eth_dev *dev)
 static void
 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
 			   struct ixgbe_hw_stats *hw_stats,
+			   struct ixgbe_macsec_stats *macsec_stats,
 			   uint64_t *total_missed_rx, uint64_t *total_qbrc,
 			   uint64_t *total_qprc, uint64_t *total_qprdc)
 {
@@ -2730,6 +2778,40 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
 	/* Flow Director Stats registers */
 	hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
 	hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+
+	/* MACsec Stats registers */
+	macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
+	macsec_stats->out_pkts_encrypted +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
+	macsec_stats->out_pkts_protected +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
+	macsec_stats->out_octets_encrypted +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
+	macsec_stats->out_octets_protected +=
+		IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
+	macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
+	macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
+	macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
+	macsec_stats->in_pkts_unknownsci +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
+	macsec_stats->in_octets_decrypted +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
+	macsec_stats->in_octets_validated +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
+	macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
+	macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
+	macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
+	for (i = 0; i < 2; i++) {
+		macsec_stats->in_pkts_ok +=
+			IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
+		macsec_stats->in_pkts_invalid +=
+			IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
+		macsec_stats->in_pkts_notvalid +=
+			IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
+	}
+	macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
+	macsec_stats->in_pkts_notusingsa +=
+		IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
 }
 
 /*
@@ -2742,6 +2824,9 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_hw_stats *hw_stats =
 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	struct ixgbe_macsec_stats *macsec_stats =
+			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+				dev->data->dev_private);
 	uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
 	unsigned i;
 
@@ -2750,8 +2835,8 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 	total_qprc = 0;
 	total_qprdc = 0;
 
-	ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
-			&total_qprc, &total_qprdc);
+	ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+			&total_qbrc, &total_qprc, &total_qprdc);
 
 	if (stats == NULL)
 		return;
@@ -2803,7 +2888,7 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
 /* This function calculates the number of xstats based on the current config */
 static unsigned
 ixgbe_xstats_calc_num(void) {
-	return IXGBE_NB_HW_STATS +
+	return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
 		(IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
 		(IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
 }
@@ -2830,6 +2915,15 @@ static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 			count++;
 		}
 
+		/* MACsec Stats */
+		for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+			snprintf(xstats_names[count].name,
+				sizeof(xstats_names[count].name),
+				"%s",
+				rte_ixgbe_macsec_strings[i].name);
+			count++;
+		}
+
 		/* RX Priority Stats */
 		for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
 			for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
@@ -2879,6 +2973,9 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_hw_stats *hw_stats =
 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	struct ixgbe_macsec_stats *macsec_stats =
+			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+				dev->data->dev_private);
 	uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
 	unsigned i, stat, count = 0;
 
@@ -2892,8 +2989,8 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 	total_qprc = 0;
 	total_qprdc = 0;
 
-	ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
-				   &total_qprc, &total_qprdc);
+	ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+			&total_qbrc, &total_qprc, &total_qprdc);
 
 	/* If this is a reset xstats is NULL, and we have cleared the
 	 * registers by reading them.
@@ -2910,6 +3007,14 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		count++;
 	}
 
+	/* MACsec Stats */
+	for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+		xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
+				rte_ixgbe_macsec_strings[i].offset);
+		xstats[count].id = count;
+		count++;
+	}
+
 	/* RX Priority Stats */
 	for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
 		for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
@@ -2939,6 +3044,9 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
 {
 	struct ixgbe_hw_stats *stats =
 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+	struct ixgbe_macsec_stats *macsec_stats =
+			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+				dev->data->dev_private);
 
 	unsigned count = ixgbe_xstats_calc_num();
 
@@ -2947,6 +3055,7 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
 
 	/* Reset software totals */
 	memset(stats, 0, sizeof(*stats));
+	memset(macsec_stats, 0, sizeof(*macsec_stats));
 }
 
 static void
@@ -3079,6 +3188,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	    !RTE_ETH_DEV_SRIOV(dev).active)
 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
 
+	if (hw->mac.type == ixgbe_mac_82599EB ||
+	    hw->mac.type == ixgbe_mac_X540)
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
@@ -3092,6 +3205,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		DEV_TX_OFFLOAD_SCTP_CKSUM  |
 		DEV_TX_OFFLOAD_TCP_TSO;
 
+	if (hw->mac.type == ixgbe_mac_82599EB ||
+	    hw->mac.type == ixgbe_mac_X540)
+		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
 	if (hw->mac.type == ixgbe_mac_X550 ||
 	    hw->mac.type == ixgbe_mac_X550EM_x ||
 	    hw->mac.type == ixgbe_mac_X550EM_a)
@@ -3389,6 +3506,28 @@ ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
 	return 0;
 }
 
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
+{
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+	intr->mask |= IXGBE_EICR_LINKSEC;
+
+	return 0;
+}
+
 /*
  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
  *
@@ -3423,6 +3562,9 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
 	if (eicr & IXGBE_EICR_MAILBOX)
 		intr->flags |= IXGBE_FLAG_MAILBOX;
 
+	if (eicr & IXGBE_EICR_LINKSEC)
+		intr->flags |= IXGBE_FLAG_MACSEC;
+
 	if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
 	    hw->phy.type == ixgbe_phy_x550em_ext_t &&
 	    (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
@@ -3577,6 +3719,12 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
 		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
 	}
 
+	if (intr->flags & IXGBE_FLAG_MACSEC) {
+		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
+					      NULL);
+		intr->flags &= ~IXGBE_FLAG_MACSEC;
+	}
+
 	PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
 	ixgbe_enable_intr(dev);
 	rte_intr_enable(intr_handle);
@@ -7617,6 +7765,330 @@ ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
 	ixgbevf_dev_interrupt_action(dev);
 }
 
+/**
+ *  ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Stops the transmit data path and waits for the HW to internally empty
+ *  the Tx security block
+ **/
+int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
+{
+#define IXGBE_MAX_SECTX_POLL 40
+
+	int i;
+	int sectxreg;
+
+	sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
+	for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
+		sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
+		if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
+			break;
+		/* Use interrupt-safe sleep just in case */
+		usec_delay(1000);
+	}
+
+	/* For informational purposes only */
+	if (i >= IXGBE_MAX_SECTX_POLL)
+		PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
+			 "path fully disabled.  Continuing with init.\n");
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Enables the transmit data path.
+ **/
+int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
+{
+	uint32_t sectxreg;
+
+	sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return IXGBE_SUCCESS;
+}
+
+int
+rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Stop the data paths */
+	if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+		return -ENOTSUP;
+	/*
+	 * Workaround:
+	 * As no ixgbe_disable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 * The hardware support has been checked by
+	 * ixgbe_disable_sec_rx_path().
+	 */
+	ixgbe_disable_sec_tx_path_generic(hw);
+
+	/* Enable Ethernet CRC (required by MACsec offload) */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+	ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
+	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
+
+	/* Enable the TX and RX crypto engines */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+	ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
+	ctrl |= 0x3;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
+
+	/* Enable SA lookup */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+	ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+	ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
+		     IXGBE_LSECTXCTRL_AUTH;
+	ctrl |= IXGBE_LSECTXCTRL_AISCI;
+	ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+	ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+	ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
+	ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
+	if (rp)
+		ctrl |= IXGBE_LSECRXCTRL_RP;
+	else
+		ctrl &= ~IXGBE_LSECRXCTRL_RP;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+	/* Start the data paths */
+	ixgbe_enable_sec_rx_path(hw);
+	/*
+	 * Workaround:
+	 * As no ixgbe_enable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 */
+	ixgbe_enable_sec_tx_path_generic(hw);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_disable(uint8_t port)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Stop the data paths */
+	if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+		return -ENOTSUP;
+	/*
+	 * Workaround:
+	 * As no ixgbe_disable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 * The hardware support has been checked by
+	 * ixgbe_disable_sec_rx_path().
+	 */
+	ixgbe_disable_sec_tx_path_generic(hw);
+
+	/* Disable the TX and RX crypto engines */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+	ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+	ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
+	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+	/* Disable SA lookup */
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+	ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECTXCTRL_DISABLE;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+	ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+	ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+	/* Start the data paths */
+	ixgbe_enable_sec_rx_path(hw);
+	/*
+	 * Workaround:
+	 * As no ixgbe_enable_sec_rx_path equivalent is
+	 * implemented for tx in the base code, and we are
+	 * not allowed to modify the base code in DPDK, so
+	 * just call the hand-written one directly for now.
+	 */
+	ixgbe_enable_sec_tx_path_generic(hw);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
+
+	ctrl = mac[4] | (mac[5] << 8);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
+
+	pi = rte_cpu_to_be_16(pi);
+	ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+				 uint32_t pn, uint8_t *key)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl, i;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (idx != 0 && idx != 1)
+		return -EINVAL;
+
+	if (an >= 4)
+		return -EINVAL;
+
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	/* Set the PN and key */
+	pn = rte_cpu_to_be_32(pn);
+	if (idx == 0) {
+		IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
+
+		for (i = 0; i < 4; i++) {
+			ctrl = (key[i * 4 + 0] <<  0) |
+			       (key[i * 4 + 1] <<  8) |
+			       (key[i * 4 + 2] << 16) |
+			       (key[i * 4 + 3] << 24);
+			IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
+		}
+	} else {
+		IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
+
+		for (i = 0; i < 4; i++) {
+			ctrl = (key[i * 4 + 0] <<  0) |
+			       (key[i * 4 + 1] <<  8) |
+			       (key[i * 4 + 2] << 16) |
+			       (key[i * 4 + 3] << 24);
+			IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
+		}
+	}
+
+	/* Set AN and select the SA */
+	ctrl = (an << idx * 2) | (idx << 4);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
+
+	return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+				 uint32_t pn, uint8_t *key)
+{
+	struct ixgbe_hw *hw;
+	struct rte_eth_dev *dev;
+	uint32_t ctrl, i;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+	dev = &rte_eth_devices[port];
+	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (idx != 0 && idx != 1)
+		return -EINVAL;
+
+	if (an >= 4)
+		return -EINVAL;
+
+	/* Set the PN */
+	pn = rte_cpu_to_be_32(pn);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
+
+	/* Set the key */
+	for (i = 0; i < 4; i++) {
+		ctrl = (key[i * 4 + 0] <<  0) |
+		       (key[i * 4 + 1] <<  8) |
+		       (key[i * 4 + 2] << 16) |
+		       (key[i * 4 + 3] << 24);
+		IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
+	}
+
+	/* Set the AN and validate the SA */
+	ctrl = an | (1 << 2);
+	IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
+
+	return 0;
+}
+
 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 69b276f..14e1fd6 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -43,6 +43,7 @@
 #define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
 #define IXGBE_FLAG_MAILBOX          (uint32_t)(1 << 1)
 #define IXGBE_FLAG_PHY_INTERRUPT    (uint32_t)(1 << 2)
+#define IXGBE_FLAG_MACSEC           (uint32_t)(1 << 3)
 
 /*
  * Defines that were not part of ixgbe_type.h as they are not used by the
@@ -130,6 +131,10 @@
 #define IXGBE_MISC_VEC_ID               RTE_INTR_VEC_ZERO_OFFSET
 #define IXGBE_RX_VEC_START              RTE_INTR_VEC_RXTX_OFFSET
 
+#define IXGBE_SECTX_MINSECIFG_MASK      0x0000000F
+
+#define IXGBE_MACSEC_PNTHRSH            0xFFFFFE00
+
 /*
  * Information about the fdir mode.
  */
@@ -265,11 +270,44 @@ struct ixgbe_filter_info {
 };
 
 /*
+ * Statistics counters collected by the MACsec
+ */
+struct ixgbe_macsec_stats {
+	/* TX port statistics */
+	uint64_t out_pkts_untagged;
+	uint64_t out_pkts_encrypted;
+	uint64_t out_pkts_protected;
+	uint64_t out_octets_encrypted;
+	uint64_t out_octets_protected;
+
+	/* RX port statistics */
+	uint64_t in_pkts_untagged;
+	uint64_t in_pkts_badtag;
+	uint64_t in_pkts_nosci;
+	uint64_t in_pkts_unknownsci;
+	uint64_t in_octets_decrypted;
+	uint64_t in_octets_validated;
+
+	/* RX SC statistics */
+	uint64_t in_pkts_unchecked;
+	uint64_t in_pkts_delayed;
+	uint64_t in_pkts_late;
+
+	/* RX SA statistics */
+	uint64_t in_pkts_ok;
+	uint64_t in_pkts_invalid;
+	uint64_t in_pkts_notvalid;
+	uint64_t in_pkts_unusedsa;
+	uint64_t in_pkts_notusingsa;
+};
+
+/*
  * Structure to store private data for each driver instance (for each port).
  */
 struct ixgbe_adapter {
 	struct ixgbe_hw             hw;
 	struct ixgbe_hw_stats       stats;
+	struct ixgbe_macsec_stats   macsec_stats;
 	struct ixgbe_hw_fdir_info   fdir;
 	struct ixgbe_interrupt      intr;
 	struct ixgbe_stat_mapping_registers stat_mappings;
@@ -300,6 +338,9 @@ struct ixgbe_adapter {
 #define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \
 	(&((struct ixgbe_adapter *)adapter)->stats)
 
+#define IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(adapter) \
+	(&((struct ixgbe_adapter *)adapter)->macsec_stats)
+
 #define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \
 	(&((struct ixgbe_adapter *)adapter)->intr)
 
@@ -448,4 +489,8 @@ uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
 
 int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
 			enum rte_filter_op filter_op, void *arg);
+
+int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw);
+
+int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw);
 #endif /* _IXGBE_ETHDEV_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 0bbc583..00013c6 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -86,6 +86,7 @@
 		PKT_TX_IP_CKSUM |		 \
 		PKT_TX_L4_MASK |		 \
 		PKT_TX_TCP_SEG |		 \
+		PKT_TX_MACSEC |			 \
 		PKT_TX_OUTER_IP_CKSUM)
 
 #define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
@@ -523,6 +524,8 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
 		cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
 	if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
 		cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
+	if (ol_flags & PKT_TX_MACSEC)
+		cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
 	return cmdtype;
 }
 
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index c2fb826..22bc4c7 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -183,6 +183,106 @@ int
 rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
 
 /**
+ * Enable MACsec offload.
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param en
+ *    1 - Enable encryption (encrypt and add integrity signature).
+ *    0 - Disable encryption (only add integrity signature).
+ * @param rp
+ *    1 - Enable replay protection.
+ *    0 - Disable replay protection.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp);
+
+/**
+ * Disable MACsec offload.
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_disable(uint8_t port);
+
+/**
+ * Configure Tx SC (Secure Connection).
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param mac
+ *   The MAC address on the local side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ */
+int rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac);
+
+/**
+ * Configure Rx SC (Secure Connection).
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param mac
+ *   The MAC address on the remote side.
+ * @param pi
+ *   The PI (port identifier) on the remote side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ */
+int rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi);
+
+/**
+ * Enable Tx SA (Secure Association).
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param idx
+ *   The SA to be enabled (0 or 1).
+ * @param an
+ *   The association number on the local side.
+ * @param pn
+ *   The packet number on the local side.
+ * @param key
+ *   The key on the local side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+		uint32_t pn, uint8_t *key);
+
+/**
+ * Enable Rx SA (Secure Association).
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param idx
+ *   The SA to be enabled (0 or 1)
+ * @param an
+ *   The association number on the remote side.
+ * @param pn
+ *   The packet number on the remote side.
+ * @param key
+ *   The key on the remote side.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+		uint32_t pn, uint8_t *key);
+
+/**
  * Response sent back to ixgbe driver from user app after callback
  */
 enum rte_pmd_ixgbe_mb_event_rsp {
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
index 92434f3..6d68934 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
@@ -15,3 +15,14 @@ DPDK_16.11 {
 	rte_pmd_ixgbe_set_vf_vlan_insert;
 	rte_pmd_ixgbe_set_vf_vlan_stripq;
 } DPDK_2.0;
+
+DPDK_17.02 {
+	global:
+
+	rte_pmd_ixgbe_macsec_enable;
+	rte_pmd_ixgbe_macsec_disable;
+	rte_pmd_ixgbe_macsec_config_txsc;
+	rte_pmd_ixgbe_macsec_config_rxsc;
+	rte_pmd_ixgbe_macsec_select_txsa;
+	rte_pmd_ixgbe_macsec_select_rxsa;
+} DPDK_16.11;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v7 5/6] app/testpmd: add MACsec offload commands
  2017-01-13 11:21           ` [PATCH v7 0/6] Add MACsec offload support for ixgbe Tiwei Bie
                               ` (3 preceding siblings ...)
  2017-01-13 11:21             ` [PATCH v7 4/6] net/ixgbe: add MACsec offload support Tiwei Bie
@ 2017-01-13 11:21             ` Tiwei Bie
  2017-01-13 11:21             ` [PATCH v7 6/6] doc: add ixgbe specific APIs Tiwei Bie
                               ` (2 subsequent siblings)
  7 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-13 11:21 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

Below MACsec offload commands are added:

- set macsec offload <port_id> on encrypt on|off replay-protect on|off
- set macsec offload <port_id> off
- set macsec sc tx|rx <port_id> <mac> <pi>
- set macsec sa tx|rx <port_id> <idx> <an> <pn> <key>

Also update the testpmd user guide.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 app/test-pmd/cmdline.c                      | 389 ++++++++++++++++++++++++++++
 app/test-pmd/macfwd.c                       |   2 +
 app/test-pmd/macswap.c                      |   2 +
 app/test-pmd/testpmd.h                      |   2 +
 app/test-pmd/txonly.c                       |   2 +
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  32 +++
 6 files changed, 429 insertions(+)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index f768b6b..1bf42ba 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -275,6 +275,18 @@ static void cmd_help_long_parsed(void *parsed_result,
 
 			"set vf mac antispoof (port_id) (vf_id) (on|off).\n"
 			"    Set MAC antispoof for a VF from the PF.\n\n"
+
+			"set macsec offload (port_id) on encrypt (on|off) replay-protect (on|off)\n"
+			"    Enable MACsec offload.\n\n"
+
+			"set macsec offload (port_id) off\n"
+			"    Disable MACsec offload.\n\n"
+
+			"set macsec sc (tx|rx) (port_id) (mac) (pi)\n"
+			"    Configure MACsec secure connection (SC).\n\n"
+
+			"set macsec sa (tx|rx) (port_id) (idx) (an) (pn) (key)\n"
+			"    Configure MACsec secure association (SA).\n\n"
 #endif
 
 			"vlan set strip (on|off) (port_id)\n"
@@ -11488,6 +11500,379 @@ cmdline_parse_inst_t cmd_set_vf_mac_addr = {
 		NULL,
 	},
 };
+
+/* MACsec configuration */
+
+/* Common result structure for MACsec offload enable */
+struct cmd_macsec_offload_on_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t offload;
+	uint8_t port_id;
+	cmdline_fixed_string_t on;
+	cmdline_fixed_string_t encrypt;
+	cmdline_fixed_string_t en_on_off;
+	cmdline_fixed_string_t replay_protect;
+	cmdline_fixed_string_t rp_on_off;
+};
+
+/* Common CLI fields for MACsec offload disable */
+cmdline_parse_token_string_t cmd_macsec_offload_on_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_offload_on_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_offload_on_offload =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 offload, "offload");
+cmdline_parse_token_num_t cmd_macsec_offload_on_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 port_id, UINT8);
+cmdline_parse_token_string_t cmd_macsec_offload_on_on =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 on, "on");
+cmdline_parse_token_string_t cmd_macsec_offload_on_encrypt =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 encrypt, "encrypt");
+cmdline_parse_token_string_t cmd_macsec_offload_on_en_on_off =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 en_on_off, "on#off");
+cmdline_parse_token_string_t cmd_macsec_offload_on_replay_protect =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 replay_protect, "replay-protect");
+cmdline_parse_token_string_t cmd_macsec_offload_on_rp_on_off =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_on_result,
+		 rp_on_off, "on#off");
+
+static void
+cmd_set_macsec_offload_on_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_offload_on_result *res = parsed_result;
+	int ret;
+	portid_t port_id = res->port_id;
+	int en = (strcmp(res->en_on_off, "on") == 0) ? 1 : 0;
+	int rp = (strcmp(res->rp_on_off, "on") == 0) ? 1 : 0;
+
+	if (port_id_is_invalid(port_id, ENABLED_WARN))
+		return;
+
+	ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_MACSEC;
+	ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
+
+	switch (ret) {
+	case 0:
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_offload_on = {
+	.f = cmd_set_macsec_offload_on_parsed,
+	.data = NULL,
+	.help_str = "set macsec offload <port_id> on "
+		"encrypt on|off replay-protect on|off",
+	.tokens = {
+		(void *)&cmd_macsec_offload_on_set,
+		(void *)&cmd_macsec_offload_on_macsec,
+		(void *)&cmd_macsec_offload_on_offload,
+		(void *)&cmd_macsec_offload_on_port_id,
+		(void *)&cmd_macsec_offload_on_on,
+		(void *)&cmd_macsec_offload_on_encrypt,
+		(void *)&cmd_macsec_offload_on_en_on_off,
+		(void *)&cmd_macsec_offload_on_replay_protect,
+		(void *)&cmd_macsec_offload_on_rp_on_off,
+		NULL,
+	},
+};
+
+/* Common result structure for MACsec offload disable */
+struct cmd_macsec_offload_off_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t offload;
+	uint8_t port_id;
+	cmdline_fixed_string_t off;
+};
+
+/* Common CLI fields for MACsec offload disable */
+cmdline_parse_token_string_t cmd_macsec_offload_off_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_offload_off_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_offload_off_offload =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 offload, "offload");
+cmdline_parse_token_num_t cmd_macsec_offload_off_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 port_id, UINT8);
+cmdline_parse_token_string_t cmd_macsec_offload_off_off =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_offload_off_result,
+		 off, "off");
+
+static void
+cmd_set_macsec_offload_off_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_offload_off_result *res = parsed_result;
+	int ret;
+	portid_t port_id = res->port_id;
+
+	if (port_id_is_invalid(port_id, ENABLED_WARN))
+		return;
+
+	ports[port_id].tx_ol_flags &= ~TESTPMD_TX_OFFLOAD_MACSEC;
+	ret = rte_pmd_ixgbe_macsec_disable(port_id);
+
+	switch (ret) {
+	case 0:
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_offload_off = {
+	.f = cmd_set_macsec_offload_off_parsed,
+	.data = NULL,
+	.help_str = "set macsec offload <port_id> off",
+	.tokens = {
+		(void *)&cmd_macsec_offload_off_set,
+		(void *)&cmd_macsec_offload_off_macsec,
+		(void *)&cmd_macsec_offload_off_offload,
+		(void *)&cmd_macsec_offload_off_port_id,
+		(void *)&cmd_macsec_offload_off_off,
+		NULL,
+	},
+};
+
+/* Common result structure for MACsec secure connection configure */
+struct cmd_macsec_sc_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t sc;
+	cmdline_fixed_string_t tx_rx;
+	uint8_t port_id;
+	struct ether_addr mac;
+	uint16_t pi;
+};
+
+/* Common CLI fields for MACsec secure connection configure */
+cmdline_parse_token_string_t cmd_macsec_sc_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_sc_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_sc_sc =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 sc, "sc");
+cmdline_parse_token_string_t cmd_macsec_sc_tx_rx =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 tx_rx, "tx#rx");
+cmdline_parse_token_num_t cmd_macsec_sc_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 port_id, UINT8);
+cmdline_parse_token_etheraddr_t cmd_macsec_sc_mac =
+	TOKEN_ETHERADDR_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 mac);
+cmdline_parse_token_num_t cmd_macsec_sc_pi =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sc_result,
+		 pi, UINT16);
+
+static void
+cmd_set_macsec_sc_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_sc_result *res = parsed_result;
+	int ret;
+	int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0;
+
+	ret = is_tx ?
+		rte_pmd_ixgbe_macsec_config_txsc(res->port_id,
+				res->mac.addr_bytes) :
+		rte_pmd_ixgbe_macsec_config_rxsc(res->port_id,
+				res->mac.addr_bytes, res->pi);
+	switch (ret) {
+	case 0:
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", res->port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_sc = {
+	.f = cmd_set_macsec_sc_parsed,
+	.data = NULL,
+	.help_str = "set macsec sc tx|rx <port_id> <mac> <pi>",
+	.tokens = {
+		(void *)&cmd_macsec_sc_set,
+		(void *)&cmd_macsec_sc_macsec,
+		(void *)&cmd_macsec_sc_sc,
+		(void *)&cmd_macsec_sc_tx_rx,
+		(void *)&cmd_macsec_sc_port_id,
+		(void *)&cmd_macsec_sc_mac,
+		(void *)&cmd_macsec_sc_pi,
+		NULL,
+	},
+};
+
+/* Common result structure for MACsec secure connection configure */
+struct cmd_macsec_sa_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t macsec;
+	cmdline_fixed_string_t sa;
+	cmdline_fixed_string_t tx_rx;
+	uint8_t port_id;
+	uint8_t idx;
+	uint8_t an;
+	uint32_t pn;
+	cmdline_fixed_string_t key;
+};
+
+/* Common CLI fields for MACsec secure connection configure */
+cmdline_parse_token_string_t cmd_macsec_sa_set =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 set, "set");
+cmdline_parse_token_string_t cmd_macsec_sa_macsec =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_sa_sa =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 sa, "sa");
+cmdline_parse_token_string_t cmd_macsec_sa_tx_rx =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 tx_rx, "tx#rx");
+cmdline_parse_token_num_t cmd_macsec_sa_port_id =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 port_id, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_idx =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 idx, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_an =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 an, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_pn =
+	TOKEN_NUM_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 pn, UINT32);
+cmdline_parse_token_string_t cmd_macsec_sa_key =
+	TOKEN_STRING_INITIALIZER
+		(struct cmd_macsec_sa_result,
+		 key, NULL);
+
+static void
+cmd_set_macsec_sa_parsed(
+	void *parsed_result,
+	__attribute__((unused)) struct cmdline *cl,
+	__attribute__((unused)) void *data)
+{
+	struct cmd_macsec_sa_result *res = parsed_result;
+	int ret;
+	int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0;
+	uint8_t key[16] = { 0 };
+	uint8_t xdgt0;
+	uint8_t xdgt1;
+	int key_len;
+	int i;
+
+	key_len = strlen(res->key) / 2;
+	if (key_len > 16)
+		key_len = 16;
+
+	for (i = 0; i < key_len; i++) {
+		xdgt0 = parse_and_check_key_hexa_digit(res->key, (i * 2));
+		if (xdgt0 == 0xFF)
+			return;
+		xdgt1 = parse_and_check_key_hexa_digit(res->key, (i * 2) + 1);
+		if (xdgt1 == 0xFF)
+			return;
+		key[i] = (uint8_t) ((xdgt0 * 16) + xdgt1);
+	}
+
+	ret = is_tx ?
+		rte_pmd_ixgbe_macsec_select_txsa(res->port_id,
+			res->idx, res->an, res->pn, key) :
+		rte_pmd_ixgbe_macsec_select_rxsa(res->port_id,
+			res->idx, res->an, res->pn, key);
+	switch (ret) {
+	case 0:
+		break;
+	case -EINVAL:
+		printf("invalid idx %d or an %d\n", res->idx, res->an);
+		break;
+	case -ENODEV:
+		printf("invalid port_id %d\n", res->port_id);
+		break;
+	default:
+		printf("programming error: (%s)\n", strerror(-ret));
+	}
+}
+
+cmdline_parse_inst_t cmd_set_macsec_sa = {
+	.f = cmd_set_macsec_sa_parsed,
+	.data = NULL,
+	.help_str = "set macsec sa tx|rx <port_id> <idx> <an> <pn> <key>",
+	.tokens = {
+		(void *)&cmd_macsec_sa_set,
+		(void *)&cmd_macsec_sa_macsec,
+		(void *)&cmd_macsec_sa_sa,
+		(void *)&cmd_macsec_sa_tx_rx,
+		(void *)&cmd_macsec_sa_port_id,
+		(void *)&cmd_macsec_sa_idx,
+		(void *)&cmd_macsec_sa_an,
+		(void *)&cmd_macsec_sa_pn,
+		(void *)&cmd_macsec_sa_key,
+		NULL,
+	},
+};
 #endif
 
 /* ******************************************************************************** */
@@ -11656,6 +12041,10 @@ cmdline_parse_ctx_t main_ctx[] = {
 	(cmdline_parse_inst_t *)&cmd_set_all_queues_drop_en,
 	(cmdline_parse_inst_t *)&cmd_set_vf_split_drop_en,
 	(cmdline_parse_inst_t *)&cmd_set_vf_mac_addr,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_offload_on,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_offload_off,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_sc,
+	(cmdline_parse_inst_t *)&cmd_set_macsec_sa,
 #endif
 	NULL,
 };
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index d361db1..cf7eab1 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -113,6 +113,8 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
 		ol_flags = PKT_TX_VLAN_PKT;
 	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
 		ol_flags |= PKT_TX_QINQ_PKT;
+	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+		ol_flags |= PKT_TX_MACSEC;
 	for (i = 0; i < nb_rx; i++) {
 		if (likely(i < nb_rx - 1))
 			rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index f996039..3a09351 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -113,6 +113,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
 		ol_flags = PKT_TX_VLAN_PKT;
 	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
 		ol_flags |= PKT_TX_QINQ_PKT;
+	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+		ol_flags |= PKT_TX_MACSEC;
 	for (i = 0; i < nb_rx; i++) {
 		if (likely(i < nb_rx - 1))
 			rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 22ce2d6..0a9a1af 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -143,6 +143,8 @@ struct fwd_stream {
 #define TESTPMD_TX_OFFLOAD_INSERT_VLAN       0x0040
 /** Insert double VLAN header in forward engine */
 #define TESTPMD_TX_OFFLOAD_INSERT_QINQ       0x0080
+/** Offload MACsec in forward engine */
+#define TESTPMD_TX_OFFLOAD_MACSEC            0x0100
 
 /** Descriptor for a single flow. */
 struct port_flow {
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index e996f35..8b1a2af 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -215,6 +215,8 @@ pkt_burst_transmit(struct fwd_stream *fs)
 		ol_flags = PKT_TX_VLAN_PKT;
 	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
 		ol_flags |= PKT_TX_QINQ_PKT;
+	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+		ol_flags |= PKT_TX_MACSEC;
 	for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
 		pkt = rte_mbuf_raw_alloc(mbp);
 		if (pkt == NULL) {
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index c611dc5..e3222c0 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -507,6 +507,38 @@ Set mac antispoof for a VF from the PF::
 
    testpmd> set vf mac antispoof  (port_id) (vf_id) (on|off)
 
+set macsec offload
+~~~~~~~~~~~~~~~~~~
+
+Enable/disable MACsec offload::
+
+   testpmd> set macsec offload (port_id) on encrypt (on|off) replay-protect (on|off)
+   testpmd> set macsec offload (port_id) off
+
+set macsec sc
+~~~~~~~~~~~~~
+
+Configure MACsec secure connection (SC)::
+
+   testpmd> set macsec sc (tx|rx) (port_id) (mac) (pi)
+
+.. note::
+
+   The pi argument is ignored for tx.
+   Check the NIC Datasheet for hardware limits.
+
+set macsec sa
+~~~~~~~~~~~~~
+
+Configure MACsec secure association (SA)::
+
+   testpmd> set macsec sa (tx|rx) (port_id) (idx) (an) (pn) (key)
+
+.. note::
+
+   The IDX value must be 0 or 1.
+   Check the NIC Datasheet for hardware limits.
+
 vlan set strip
 ~~~~~~~~~~~~~~
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* [PATCH v7 6/6] doc: add ixgbe specific APIs
  2017-01-13 11:21           ` [PATCH v7 0/6] Add MACsec offload support for ixgbe Tiwei Bie
                               ` (4 preceding siblings ...)
  2017-01-13 11:21             ` [PATCH v7 5/6] app/testpmd: add MACsec offload commands Tiwei Bie
@ 2017-01-13 11:21             ` Tiwei Bie
  2017-01-15 18:19             ` [PATCH v7 0/6] Add MACsec offload support for ixgbe Thomas Monjalon
  2017-01-23  2:30             ` Peng, Yuan
  7 siblings, 0 replies; 79+ messages in thread
From: Tiwei Bie @ 2017-01-13 11:21 UTC (permalink / raw)
  To: dev
  Cc: adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	thomas.monjalon, konstantin.ananyev, helin.zhang, wei.dai,
	xiao.w.wang

Add information about the new ixgbe PMD APIs in the release notes.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Acked-by: John McNamara <john.mcnamara@intel.com>
---
 doc/guides/rel_notes/release_17_02.rst | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/doc/guides/rel_notes/release_17_02.rst b/doc/guides/rel_notes/release_17_02.rst
index 3958714..a3490a5 100644
--- a/doc/guides/rel_notes/release_17_02.rst
+++ b/doc/guides/rel_notes/release_17_02.rst
@@ -52,6 +52,11 @@ New Features
   See the :ref:`Generic flow API <Generic_flow_API>` documentation for more
   information.
 
+* **Added APIs for MACsec offload support to the ixgbe PMD.**
+
+  Six new APIs have been added to the ixgbe PMD for MACsec offload support.
+  The declarations for the APIs can be found in ``rte_pmd_ixgbe.h``.
+
 * **Improved offload support in mbuf.**
 
   Added a new Tx mbuf flag for MACsec, which can be set by applications
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 79+ messages in thread

* Re: [PATCH v7 0/6] Add MACsec offload support for ixgbe
  2017-01-13 11:21           ` [PATCH v7 0/6] Add MACsec offload support for ixgbe Tiwei Bie
                               ` (5 preceding siblings ...)
  2017-01-13 11:21             ` [PATCH v7 6/6] doc: add ixgbe specific APIs Tiwei Bie
@ 2017-01-15 18:19             ` Thomas Monjalon
  2017-01-23  2:30             ` Peng, Yuan
  7 siblings, 0 replies; 79+ messages in thread
From: Thomas Monjalon @ 2017-01-15 18:19 UTC (permalink / raw)
  To: Tiwei Bie
  Cc: dev, adrien.mazarguil, wenzhuo.lu, john.mcnamara, olivier.matz,
	konstantin.ananyev, helin.zhang, wei.dai, xiao.w.wang

2017-01-13 19:21, Tiwei Bie:
> This patch set adds the MACsec offload support for ixgbe.
> The testpmd is also updated to support MACsec cmds.

Applied, thanks

Minor updates when applying:
- no need of release notes section for mbuf flag (covered by ixgbe entry)
- squashed release notes patch with ixgbe patch

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v7 2/6] ethdev: add event type for MACsec
  2017-01-13 11:21             ` [PATCH v7 2/6] ethdev: add event type " Tiwei Bie
@ 2017-01-16  2:31               ` Ananyev, Konstantin
  0 siblings, 0 replies; 79+ messages in thread
From: Ananyev, Konstantin @ 2017-01-16  2:31 UTC (permalink / raw)
  To: Bie, Tiwei, dev
  Cc: adrien.mazarguil, Lu, Wenzhuo, Mcnamara, John, olivier.matz,
	thomas.monjalon, Zhang, Helin, Dai, Wei, Wang, Xiao W



> 
> This commit adds a below event type:
> 
> - RTE_ETH_EVENT_MACSEC
> 
> This event will occur when the PN counter in a MACsec connection
> reaches the exhaustion threshold.
> 
> Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
> ---
>  lib/librte_ether/rte_ethdev.h | 1 +
>  1 file changed, 1 insertion(+)
> 
> diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
> index 1c356c1..d5d2956 100644
> --- a/lib/librte_ether/rte_ethdev.h
> +++ b/lib/librte_ether/rte_ethdev.h
> @@ -3188,6 +3188,7 @@ enum rte_eth_event_type {
>  	RTE_ETH_EVENT_INTR_RESET,
>  			/**< reset interrupt event, sent to VF on PF reset */
>  	RTE_ETH_EVENT_VF_MBOX,  /**< message from the VF received by PF */
> +	RTE_ETH_EVENT_MACSEC,   /**< MACsec offload related event */
>  	RTE_ETH_EVENT_MAX       /**< max value of this enum */
>  };
> 
> --

Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>

> 2.7.4

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v7 3/6] ethdev: add MACsec offload capability flags
  2017-01-13 11:21             ` [PATCH v7 3/6] ethdev: add MACsec offload capability flags Tiwei Bie
@ 2017-01-16  2:33               ` Ananyev, Konstantin
  0 siblings, 0 replies; 79+ messages in thread
From: Ananyev, Konstantin @ 2017-01-16  2:33 UTC (permalink / raw)
  To: Bie, Tiwei, dev
  Cc: adrien.mazarguil, Lu, Wenzhuo, Mcnamara, John, olivier.matz,
	thomas.monjalon, Zhang, Helin, Dai, Wei, Wang, Xiao W



> 
> If these flags are advertised by a PMD, the NIC supports the MACsec
> offload. The incoming MACsec traffics can be offloaded transparently
> after the MACsec offload is configured correctly by the application.
> And the application can set the PKT_TX_MACSEC flag in mbufs to enable
> the MACsec offload for the packets to be transmitted.
> 
> Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
> ---

Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>

^ permalink raw reply	[flat|nested] 79+ messages in thread

* Re: [PATCH v7 0/6] Add MACsec offload support for ixgbe
  2017-01-13 11:21           ` [PATCH v7 0/6] Add MACsec offload support for ixgbe Tiwei Bie
                               ` (6 preceding siblings ...)
  2017-01-15 18:19             ` [PATCH v7 0/6] Add MACsec offload support for ixgbe Thomas Monjalon
@ 2017-01-23  2:30             ` Peng, Yuan
  7 siblings, 0 replies; 79+ messages in thread
From: Peng, Yuan @ 2017-01-23  2:30 UTC (permalink / raw)
  To: Bie, Tiwei, dev
  Cc: adrien.mazarguil, Lu, Wenzhuo, Mcnamara, John, olivier.matz,
	thomas.monjalon, Ananyev, Konstantin, Zhang, Helin, Dai, Wei,
	Wang, Xiao W

Tested-by: Peng Yuan <yuan.peng@intel.com>

- version: 17.02-rc1
- OS/Kernel: 4.5.5-300.fc24.x86_64
- GCC: gcc (GCC) 5.3.1 20151207 (Red Hat 5.3.1-2)
- NIC: 82599

Test cases: 5
Passed: 5

Test steps:

Test Case 1: MACsec packets send and receive
============================================

1. connect the two ixgbe ports with a cable,
   and bind the two ports to dpdk driver::
 ./tools/dpdk-devbind.py -b igb_uio 07:00.0 07:00.1

2. config the rx port
1). start the testpmd of rx port::
 ./testpmd -c 0xc --socket-mem 1024,1024 --file-prefix=rx -w 0000:07:00.1 \
 -- --port-topology=chained -i --crc-strip

2). set MACsec offload on::
 testpmd>set macsec offload 0 on encrypt on replay-protect on

3). set MACsec parameters as rx_port::
 testpmd>set macsec sc rx 0 00:00:00:00:00:01 0
 testpmd>set macsec sa rx 0 0 0 0 00112200000000000000000000000000

4). set MACsec parameters as tx_port::
 testpmd>set macsec sc tx 0 00:00:00:00:00:02 0
 testpmd>set macsec sa tx 0 0 0 0 00112200000000000000000000000000

5). set rxonly::
 testpmd>set fwd rxonly

6). start::
 testpmd>set promisc all on
 testpmd>start

3. config the tx port
1). start the testpmd of tx port::
 ./testpmd -c 0x30 --socket-mem 1024,1024 --file-prefix=tx -w 0000:07:00.0 \
 -- --port-topology=chained -i --crc-strip --txqflags=0x0

2). set MACsec offload on::
 testpmd>set macsec offload 0 on encrypt on replay-protect on

3). set MACsec parameters as tx_port::
 testpmd>set macsec sc tx 0 00:00:00:00:00:01 0
 testpmd>set macsec sa tx 0 0 0 0 00112200000000000000000000000000

4). set MACsec parameters as rx_port::
 testpmd>set macsec sc rx 0 00:00:00:00:00:02 0
 testpmd>set macsec sa rx 0 0 0 0 00112200000000000000000000000000

5). set txonly::
 testpmd>set fwd txonly

6). start::
 testpmd>start

4. check the result::
 testpmd>stop
 testpmd>show port xstats 0
stop the packet transmiting on tx_port first, then stop the packet receiving
on rx_port.
check the rx data and tx data:
tx_good_packets == rx_good_packets
out_pkts_encrypted == in_pkts_ok == tx_good_packets == rx_good_packets
out_octets_encrypted == in_octets_decrypted
out_octets_protected == in_octets_validated

 if you want to check the content of the packet, use the command::
 testpmd>set verbose 1
the received packets are Decrypted.
check the ol_flags:PKT_RX_IP_CKSUM_GOOD
check the content of the packet:
type=0x0800, the ptype of L2,L3,L4: L2_ETHER L3_IPV4 L4_UDP


Test Case 2: MACsec packets send and normal receive
===================================================

1. disable MACsec offload on rx port::
 testpmd>set macsec offload 0 off

2. start the the packets transfer

3. check the result::
 testpmd>stop
 testpmd>show port xstats 0
stop the testpmd on tx_port first, then stop the testpmd on rx_port.
the received packets are encrypted.
check the content of the packet:
type=0x88e5 sw ptype: L2_ETHER  - l2_len=14 - Receive queue=0x0
you can't find L3 and L4 infomation in the packet
in_octets_decrypted and in_octets_validated doesn't increase on last data
transfer.


Test Case 3: normal packet send and MACsec receive
==================================================

1. enable MACsec offload on rx port::
 testpmd>set macsec offload 0 on encrypt on replay-protect on

2. disable MACsec offload on tx port::
 testpmd>set macsec offload 0 off

3. start the the packets transfer

4. check the result::
 testpmd>stop
 testpmd>show port xstats 0
stop the testpmd on tx_port first, then stop the testpmd on rx_port.
the received packets are not encrypted.
check the content of the packet:
type=0x0800, the ptype of L2,L3,L4: L2_ETHER L3_IPV4 L4_UDP
in_octets_decrypted and out_pkts_encrypted doesn't increase on last data
transfer.

Test Case 4: MACsec send and receive with wrong parameters
==========================================================

1. don't add "--txqflags=0x0" in the tx_port command line.
   the MACsec offload can't work. the tx packets are normal packets.

2. set different pn on rx and tx port, then start the data transfer.

1) set the parameters as test case 1, start and stop the data transfer.
   check the result, rx port can receive and decrypt the packets normally.
2) reset the pn of tx port to 0::
    testpmd>set macsec sa tx 0 0 0 0 00112200000000000000000000000000
   rx port can receive the packets until the pn equals the pn of tx port::
    out_pkts_encrypted = in_pkts_late + in_pkts_ok

3. set different keys on rx and tx port, then start the data transfer::
    the RX-packets=0,
    in_octets_decrypted == out_octets_encrypted,
    in_pkts_notvalid == out_pkts_encrypted,
    in_pkts_ok=0,
    rx_good_packets=0

4. set different pi on rx and tx port(reset on rx_port), then start the data
   transfer::
    in_octets_decrypted == out_octets_encrypted,
    in_pkts_ok = 0,
    in_pkts_nosci == out_pkts_encrypted

5. set different an on rx and tx port, then start the data transfer::
    rx_good_packets=0,
    in_octets_decrypted == out_octets_encrypted,
    in_pkts_notusingsa == out_pkts_encrypted,
    in_pkts_ok=0,
    rx_good_packets=0

6. set different index on rx and tx port, then start the data transfer::
    in_octets_decrypted == out_octets_encrypted,
    in_pkts_ok == out_pkts_encrypted

Test Case 5: performance test of MACsec offload packets
==========================================================

1. tx linerate
   port0 connected to IXIA port5, port1 connected to IXIA port6, set port0
   MACsec offload on, set fwd mac::
    ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xc -- -i \
    --port-topology=chained --crc-strip --txqflags=0x0
   on IXIA side, start IXIA port6 transmit, start the IXIA capture.
   view the IXIA port5 captrued packet, the protocol is MACsec, the EtherType
   is 0x88E5, and the packet length is 96bytes, more than the normal packet
   32 bytes.
   The valid frames received rate is 10.78Mpps, and the %linerate is 100%.

2. rx linerate
   there are three ports 05:00.0 07:00.0 07:00.1. connect 07:00.0 to 07:00.1
   with cable, connect 05:00.0 to IXIA. bind the three ports to dpdk driver.
   start two testpmd::
    ./testpmd -c 0x3 --socket-mem 1024,1024 --file-prefix=rx -w 0000:07:00.1 \
    -- --port-topology=chained -i --crc-strip --txqflags=0x0

    testpmd>set macsec offload 0 on encrypt on replay-protect on
    testpmd>set macsec sc rx 0 00:00:00:00:00:01 0
    testpmd>set macsec sa rx 0 0 0 0 00112200000000000000000000000000
    testpmd>set macsec sc tx 0 00:00:00:00:00:02 0
    testpmd>set macsec sa tx 0 0 0 0 00112200000000000000000000000000
    testpmd>set fwd rxonly

    ./testpmd -c 0xc --socket-mem 1024,1024 --file-prefix=tx -b 0000:07:00.1 \
    -- --port-topology=chained -i --crc-strip --txqflags=0x0

    testpmd>set macsec offload 1 on encrypt on replay-protect on
    testpmd>set macsec sc rx 1 00:00:00:00:00:02 0
    testpmd>set macsec sa rx 1 0 0 0 00112200000000000000000000000000
    testpmd>set macsec sc tx 1 00:00:00:00:00:01 0
    testpmd>set macsec sa tx 1 0 0 0 00112200000000000000000000000000
    testpmd>set fwd mac

   start on both two testpmd.
   start data transmit from IXIA port, the frame size is 64bytes,
   the Ethertype is 0x0800. the rate is 14.88Mpps.
   check the linerate on rxonly port::
    testpmd>show port stats 0
   It shows "Rx-pps:     10775697", so the rx %linerate is 100%.
   check the MACsec packets number on tx side::
    testpmd>show port xstats 1
   on rx side::
    testpmd>show port xstats 0
   in_pkts_ok == out_pkts_encrypted



Thank you.
Yuan.


-----Original Message-----
From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Tiwei Bie
Sent: Friday, January 13, 2017 7:22 PM
To: dev@dpdk.org
Cc: adrien.mazarguil@6wind.com; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Mcnamara, John <john.mcnamara@intel.com>; olivier.matz@6wind.com; thomas.monjalon@6wind.com; Ananyev, Konstantin <konstantin.ananyev@intel.com>; Zhang, Helin <helin.zhang@intel.com>; Dai, Wei <wei.dai@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
Subject: [dpdk-dev] [PATCH v7 0/6] Add MACsec offload support for ixgbe

This patch set adds the MACsec offload support for ixgbe.
The testpmd is also updated to support MACsec cmds.

v2:
- Update the documents for testpmd;
- Update the release notes;
- Reuse the functions provided by base code;

v3:
- Add the missing parts of MACsec mbuf flag and reorganize the patch set;
- Add an ethdev event type for MACsec;
- Advertise the MACsec offload capabilities based on the mac type;
- Minor fixes and improvements;

v4:
- Reserve bits in mbuf and ethdev for PMD specific API;
- Use the reserved bits in PMD specific API;

v5:
- Add MACsec offload in the NIC feature list;
- Minor improvements on comments;

v6:
- Revert the changes related to the reserved flags;
- Rebase the patch set on the latest branch, xstats code is changed recently;
- Update the feature list when adding the MACsec support for ixgbe;

v7:
- Fix clang build;

Tiwei Bie (6):
  mbuf: add flag for MACsec
  ethdev: add event type for MACsec
  ethdev: add MACsec offload capability flags
  net/ixgbe: add MACsec offload support
  app/testpmd: add MACsec offload commands
  doc: add ixgbe specific APIs

 app/test-pmd/cmdline.c                      | 389 ++++++++++++++++++++++
 app/test-pmd/macfwd.c                       |   2 +
 app/test-pmd/macswap.c                      |   2 +
 app/test-pmd/testpmd.h                      |   2 +
 app/test-pmd/txonly.c                       |   2 +
 doc/guides/nics/features/default.ini        |   1 +
 doc/guides/nics/features/ixgbe.ini          |   1 +
 doc/guides/rel_notes/release_17_02.rst      |  10 +
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  32 ++
 drivers/net/ixgbe/ixgbe_ethdev.c            | 482 +++++++++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_ethdev.h            |  45 +++
 drivers/net/ixgbe/ixgbe_rxtx.c              |   3 +
 drivers/net/ixgbe/rte_pmd_ixgbe.h           | 100 ++++++
 drivers/net/ixgbe/rte_pmd_ixgbe_version.map |  11 +
 lib/librte_ether/rte_ethdev.h               |   3 +
 lib/librte_mbuf/rte_mbuf.c                  |   2 +
 lib/librte_mbuf/rte_mbuf.h                  |   6 +
 17 files changed, 1088 insertions(+), 5 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 79+ messages in thread

end of thread, other threads:[~2017-01-23  2:30 UTC | newest]

Thread overview: 79+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-12-03 14:59 [PATCH 0/3] Add MACsec offload support for ixgbe Tiwei Bie
2016-12-03 14:59 ` [PATCH 1/3] lib: add MACsec offload flags Tiwei Bie
2016-12-03 14:59 ` [PATCH 2/3] net/ixgbe: add MACsec offload support Tiwei Bie
2016-12-03 14:59 ` [PATCH 3/3] app/testpmd: add ixgbe " Tiwei Bie
2016-12-16  1:43 ` [PATCH v2 0/4] Add MACsec offload support for ixgbe Tiwei Bie
2016-12-16  1:43   ` [PATCH v2 1/4] lib: add MACsec offload flags Tiwei Bie
2016-12-16  1:43   ` [PATCH v2 2/4] net/ixgbe: add MACsec offload support Tiwei Bie
2016-12-16  1:43   ` [PATCH v2 3/4] app/testpmd: add ixgbe " Tiwei Bie
2016-12-16  1:43   ` [PATCH v2 4/4] doc: add ixgbe specific APIs Tiwei Bie
2016-12-16 10:29     ` Mcnamara, John
2016-12-25 14:57   ` [PATCH v3 0/6] Add MACsec offload support for ixgbe Tiwei Bie
2016-12-25 14:57     ` [PATCH v3 1/6] mbuf: add flag for MACsec Tiwei Bie
2016-12-25 14:57     ` [PATCH v3 2/6] ethdev: add event type " Tiwei Bie
2016-12-25 14:57     ` [PATCH v3 3/6] ethdev: add MACsec offload capability flags Tiwei Bie
2016-12-25 14:57     ` [PATCH v3 4/6] net/ixgbe: add MACsec offload support Tiwei Bie
2016-12-25 14:57     ` [PATCH v3 5/6] app/testpmd: add MACsec offload commands Tiwei Bie
2016-12-25 14:58     ` [PATCH v3 6/6] doc: add ixgbe specific APIs Tiwei Bie
2016-12-26 15:15     ` [PATCH v3 0/6] Add MACsec offload support for ixgbe Adrien Mazarguil
2016-12-27  1:33       ` Tiwei Bie
2016-12-27 14:37         ` Adrien Mazarguil
2016-12-27 17:42           ` Tiwei Bie
2016-12-28 15:41     ` [PATCH v4 0/7] " Tiwei Bie
2016-12-28 15:41       ` [PATCH v4 1/7] mbuf: reserve a Tx offload flag for PMD-specific API Tiwei Bie
2016-12-28 15:41       ` [PATCH v4 2/7] ethdev: reserve an event type " Tiwei Bie
2016-12-28 15:41       ` [PATCH v4 3/7] ethdev: reserve capability flags " Tiwei Bie
2016-12-28 15:41       ` [PATCH v4 4/7] net/ixgbe: add MACsec offload support Tiwei Bie
2016-12-28 15:41       ` [PATCH v4 5/7] app/testpmd: add MACsec offload commands Tiwei Bie
2016-12-28 15:41       ` [PATCH v4 6/7] doc: add ixgbe specific APIs Tiwei Bie
2016-12-28 15:41       ` [PATCH v4 7/7] doc: update the release notes for the reserved flags Tiwei Bie
2017-01-03  6:15       ` [PATCH v4 0/7] Add MACsec offload support for ixgbe Lu, Wenzhuo
2017-01-04  7:21       ` [PATCH v5 0/8] " Tiwei Bie
2017-01-04  7:21         ` [PATCH v5 1/8] mbuf: reserve a Tx offload flag for PMD-specific API Tiwei Bie
2017-01-04  7:21         ` [PATCH v5 2/8] ethdev: reserve an event type " Tiwei Bie
2017-01-04  7:21         ` [PATCH v5 3/8] ethdev: reserve capability flags " Tiwei Bie
2017-01-04 14:21           ` Ananyev, Konstantin
2017-01-04 14:39             ` Tiwei Bie
2017-01-04 15:14               ` Ananyev, Konstantin
2017-01-04 17:00                 ` Tiwei Bie
2017-01-04 17:44                   ` Ananyev, Konstantin
2017-01-04 23:56                     ` Tiwei Bie
2017-01-05  8:33                       ` Adrien Mazarguil
2017-01-05 10:05                         ` Tiwei Bie
2017-01-05 11:32                         ` Ananyev, Konstantin
2017-01-05 18:21                           ` Adrien Mazarguil
2017-01-08 12:39                             ` Ananyev, Konstantin
2017-01-09  3:57                               ` Tiwei Bie
2017-01-09 11:26                                 ` Thomas Monjalon
2017-01-10  2:08                                   ` Tiwei Bie
2017-01-11 17:32                                   ` Olivier MATZ
2017-01-12  2:08                                     ` Tiwei Bie
2017-01-12  2:42                                     ` Yuanhan Liu
2017-01-09 14:41                               ` Adrien Mazarguil
2017-01-04  7:21         ` [PATCH v5 4/8] net/ixgbe: add MACsec offload support Tiwei Bie
2017-01-04  7:21         ` [PATCH v5 5/8] app/testpmd: add MACsec offload commands Tiwei Bie
2017-01-04  7:21         ` [PATCH v5 6/8] doc: add ixgbe specific APIs Tiwei Bie
2017-01-04  7:21         ` [PATCH v5 7/8] doc: update the release notes for the reserved flags Tiwei Bie
2017-01-04  7:21         ` [PATCH v5 8/8] doc: add MACsec offload into NIC feature list Tiwei Bie
2017-01-04  8:29         ` [PATCH v5 0/8] Add MACsec offload support for ixgbe Peng, Yuan
2017-01-11  4:31         ` [PATCH v6 0/6] " Tiwei Bie
2017-01-11  4:31           ` [PATCH v6 1/6] mbuf: add flag for MACsec Tiwei Bie
2017-01-11 17:41             ` Olivier MATZ
2017-01-11  4:31           ` [PATCH v6 2/6] ethdev: add event type " Tiwei Bie
2017-01-11  4:31           ` [PATCH v6 3/6] ethdev: add MACsec offload capability flags Tiwei Bie
2017-01-11  4:31           ` [PATCH v6 4/6] net/ixgbe: add MACsec offload support Tiwei Bie
2017-01-11  4:31           ` [PATCH v6 5/6] app/testpmd: add MACsec offload commands Tiwei Bie
2017-01-13  9:07             ` Thomas Monjalon
2017-01-13  9:16               ` Tiwei Bie
2017-01-11  4:31           ` [PATCH v6 6/6] doc: add ixgbe specific APIs Tiwei Bie
2017-01-13 11:21           ` [PATCH v7 0/6] Add MACsec offload support for ixgbe Tiwei Bie
2017-01-13 11:21             ` [PATCH v7 1/6] mbuf: add flag for MACsec Tiwei Bie
2017-01-13 11:21             ` [PATCH v7 2/6] ethdev: add event type " Tiwei Bie
2017-01-16  2:31               ` Ananyev, Konstantin
2017-01-13 11:21             ` [PATCH v7 3/6] ethdev: add MACsec offload capability flags Tiwei Bie
2017-01-16  2:33               ` Ananyev, Konstantin
2017-01-13 11:21             ` [PATCH v7 4/6] net/ixgbe: add MACsec offload support Tiwei Bie
2017-01-13 11:21             ` [PATCH v7 5/6] app/testpmd: add MACsec offload commands Tiwei Bie
2017-01-13 11:21             ` [PATCH v7 6/6] doc: add ixgbe specific APIs Tiwei Bie
2017-01-15 18:19             ` [PATCH v7 0/6] Add MACsec offload support for ixgbe Thomas Monjalon
2017-01-23  2:30             ` Peng, Yuan

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.