All of lore.kernel.org
 help / color / mirror / Atom feed
From: Akhil Goyal <akhil.goyal@nxp.com>
To: <dev@dpdk.org>
Cc: <declan.doherty@intel.com>, <pablo.de.lara.guarch@intel.com>,
	<hemant.agrawal@nxp.com>, <radu.nicolau@intel.com>,
	<borisp@mellanox.com>, <aviadye@mellanox.com>,
	<thomas@monjalon.net>, <sandeep.malik@nxp.com>,
	<jerin.jacob@caviumnetworks.com>, <john.mcnamara@intel.com>,
	<konstantin.ananyev@intel.com>, <shahafs@mellanox.com>,
	<olivier.matz@6wind.com>
Subject: [PATCH v6 08/10] net/ixgbe: enable inline ipsec
Date: Wed, 25 Oct 2017 20:37:25 +0530	[thread overview]
Message-ID: <20171025150727.30364-9-akhil.goyal@nxp.com> (raw)
In-Reply-To: <20171025150727.30364-1-akhil.goyal@nxp.com>

From: Radu Nicolau <radu.nicolau@intel.com>

Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
---
 drivers/net/ixgbe/Makefile             |   2 +-
 drivers/net/ixgbe/base/ixgbe_osdep.h   |   8 +
 drivers/net/ixgbe/ixgbe_ethdev.c       |  11 +
 drivers/net/ixgbe/ixgbe_ethdev.h       |   6 +-
 drivers/net/ixgbe/ixgbe_flow.c         |  47 +++
 drivers/net/ixgbe/ixgbe_ipsec.c        | 737 +++++++++++++++++++++++++++++++++
 drivers/net/ixgbe/ixgbe_ipsec.h        | 151 +++++++
 drivers/net/ixgbe/ixgbe_rxtx.c         |  59 ++-
 drivers/net/ixgbe/ixgbe_rxtx.h         |  11 +-
 drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c |  57 +++
 10 files changed, 1082 insertions(+), 7 deletions(-)
 create mode 100644 drivers/net/ixgbe/ixgbe_ipsec.c
 create mode 100644 drivers/net/ixgbe/ixgbe_ipsec.h

diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 6a144e7..f03c426 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -120,11 +120,11 @@ SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_neon.c
 else
 SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_sse.c
 endif
-
 ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_BYPASS),y)
 SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
 SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c
 endif
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ipsec.c
 SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c
 SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_tm.c
 
diff --git a/drivers/net/ixgbe/base/ixgbe_osdep.h b/drivers/net/ixgbe/base/ixgbe_osdep.h
index 4aab278..bb5dfd2 100644
--- a/drivers/net/ixgbe/base/ixgbe_osdep.h
+++ b/drivers/net/ixgbe/base/ixgbe_osdep.h
@@ -161,4 +161,12 @@ static inline uint32_t ixgbe_read_addr(volatile void* addr)
 #define IXGBE_WRITE_REG_ARRAY(hw, reg, index, value) \
 	IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value))
 
+#define IXGBE_WRITE_REG_THEN_POLL_MASK(hw, reg, val, mask, poll_ms)	\
+do {									\
+	uint32_t cnt = poll_ms;						\
+	IXGBE_WRITE_REG(hw, (reg), (val));				\
+	while (((IXGBE_READ_REG(hw, (reg))) & (mask)) && (cnt--))	\
+		rte_delay_ms(1);					\
+} while (0)
+
 #endif /* _IXGBE_OS_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 14b9c53..10bf486 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -61,6 +61,7 @@
 #include <rte_random.h>
 #include <rte_dev.h>
 #include <rte_hash_crc.h>
+#include <rte_security_driver.h>
 
 #include "ixgbe_logs.h"
 #include "base/ixgbe_api.h"
@@ -1167,6 +1168,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 		return 0;
 	}
 
+	/* Initialize security_ctx only for primary process*/
+	eth_dev->security_ctx = ixgbe_ipsec_ctx_create(eth_dev);
+	if (eth_dev->security_ctx == NULL)
+		return -ENOMEM;
+
 	rte_eth_copy_pci_info(eth_dev, pci_dev);
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
 
@@ -1401,6 +1407,8 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
 	/* Remove all Traffic Manager configuration */
 	ixgbe_tm_conf_uninit(eth_dev);
 
+	rte_free(eth_dev->security_ctx);
+
 	return 0;
 }
 
@@ -3695,6 +3703,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	    hw->mac.type == ixgbe_mac_X550EM_a)
 		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
+	dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+	dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
 			.pthresh = IXGBE_DEFAULT_RX_PTHRESH,
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index e28c856..f5b52c4 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -38,6 +38,7 @@
 #include "base/ixgbe_dcb_82599.h"
 #include "base/ixgbe_dcb_82598.h"
 #include "ixgbe_bypass.h"
+#include "ixgbe_ipsec.h"
 #include <rte_time.h>
 #include <rte_hash.h>
 #include <rte_pci.h>
@@ -486,7 +487,7 @@ struct ixgbe_adapter {
 	struct ixgbe_filter_info    filter;
 	struct ixgbe_l2_tn_info     l2_tn;
 	struct ixgbe_bw_conf        bw_conf;
-
+	struct ixgbe_ipsec          ipsec;
 	bool rx_bulk_alloc_allowed;
 	bool rx_vec_allowed;
 	struct rte_timecounter      systime_tc;
@@ -543,6 +544,9 @@ struct ixgbe_adapter {
 #define IXGBE_DEV_PRIVATE_TO_TM_CONF(adapter) \
 	(&((struct ixgbe_adapter *)adapter)->tm_conf)
 
+#define IXGBE_DEV_PRIVATE_TO_IPSEC(adapter)\
+	(&((struct ixgbe_adapter *)adapter)->ipsec)
+
 /*
  * RX/TX function prototypes
  */
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 904c146..13c8243 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -187,6 +187,9 @@ const struct rte_flow_action *next_no_void_action(
  * END
  * other members in mask and spec should set to 0x00.
  * item->last should be NULL.
+ *
+ * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
+ *
  */
 static int
 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
@@ -226,6 +229,41 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
 		return -rte_errno;
 	}
 
+	/**
+	 *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
+	 */
+	act = next_no_void_action(actions, NULL);
+	if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
+		const void *conf = act->conf;
+		/* check if the next not void item is END */
+		act = next_no_void_action(actions, act);
+		if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+			memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION,
+				act, "Not supported action.");
+			return -rte_errno;
+		}
+
+		/* get the IP pattern*/
+		item = next_no_void_pattern(pattern, NULL);
+		while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+				item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+			if (item->last ||
+					item->type == RTE_FLOW_ITEM_TYPE_END) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "IP pattern missing.");
+				return -rte_errno;
+			}
+			item = next_no_void_pattern(pattern, item);
+		}
+
+		filter->proto = IPPROTO_ESP;
+		return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
+					item->type == RTE_FLOW_ITEM_TYPE_IPV6);
+	}
+
 	/* the first not void item can be MAC or IPv4 */
 	item = next_no_void_pattern(pattern, NULL);
 
@@ -519,6 +557,10 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
 	if (ret)
 		return ret;
 
+	/* ESP flow not really a flow*/
+	if (filter->proto == IPPROTO_ESP)
+		return 0;
+
 	/* Ixgbe doesn't support tcp flags. */
 	if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
 		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
@@ -2758,6 +2800,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
 	memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
 	ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
 			actions, &ntuple_filter, error);
+
+	/* ESP flow not really a flow*/
+	if (ntuple_filter.proto == IPPROTO_ESP)
+		return flow;
+
 	if (!ret) {
 		ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
 		if (!ret) {
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
new file mode 100644
index 0000000..99c0a73
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -0,0 +1,737 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
+#include <rte_ip.h>
+#include <rte_jhash.h>
+#include <rte_security_driver.h>
+#include <rte_cryptodev.h>
+#include <rte_flow.h>
+
+#include "base/ixgbe_type.h"
+#include "base/ixgbe_api.h"
+#include "ixgbe_ethdev.h"
+#include "ixgbe_ipsec.h"
+
+#define RTE_IXGBE_REGISTER_POLL_WAIT_5_MS  5
+
+#define IXGBE_WAIT_RREAD \
+	IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \
+	IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
+#define IXGBE_WAIT_RWRITE \
+	IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSRXIDX, reg_val, \
+	IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
+#define IXGBE_WAIT_TREAD \
+	IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \
+	IPSRXIDX_READ, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
+#define IXGBE_WAIT_TWRITE \
+	IXGBE_WRITE_REG_THEN_POLL_MASK(hw, IXGBE_IPSTXIDX, reg_val, \
+	IPSRXIDX_WRITE, RTE_IXGBE_REGISTER_POLL_WAIT_5_MS)
+
+#define CMP_IP(a, b) (\
+	(a).ipv6[0] == (b).ipv6[0] && \
+	(a).ipv6[1] == (b).ipv6[1] && \
+	(a).ipv6[2] == (b).ipv6[2] && \
+	(a).ipv6[3] == (b).ipv6[3])
+
+
+static void
+ixgbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	int i = 0;
+
+	/* clear Rx IP table*/
+	for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+		uint16_t index = i << 3;
+		uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP | index;
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0);
+		IXGBE_WAIT_RWRITE;
+	}
+
+	/* clear Rx SPI and Rx/Tx SA tables*/
+	for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+		uint32_t index = i << 3;
+		uint32_t reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | index;
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
+		IXGBE_WAIT_RWRITE;
+		reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | index;
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0);
+		IXGBE_WAIT_RWRITE;
+		reg_val = IPSRXIDX_WRITE | index;
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0);
+		IXGBE_WAIT_TWRITE;
+	}
+}
+
+static int
+ixgbe_crypto_add_sa(struct ixgbe_crypto_session *ic_session)
+{
+	struct rte_eth_dev *dev = ic_session->dev;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_ipsec *priv = IXGBE_DEV_PRIVATE_TO_IPSEC(
+			dev->data->dev_private);
+	uint32_t reg_val;
+	int sa_index = -1;
+
+	if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
+		int i, ip_index = -1;
+
+		/* Find a match in the IP table*/
+		for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+			if (CMP_IP(priv->rx_ip_tbl[i].ip,
+				   ic_session->dst_ip)) {
+				ip_index = i;
+				break;
+			}
+		}
+		/* If no match, find a free entry in the IP table*/
+		if (ip_index < 0) {
+			for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+				if (priv->rx_ip_tbl[i].ref_count == 0) {
+					ip_index = i;
+					break;
+				}
+			}
+		}
+
+		/* Fail if no match and no free entries*/
+		if (ip_index < 0) {
+			PMD_DRV_LOG(ERR,
+				    "No free entry left in the Rx IP table\n");
+			return -1;
+		}
+
+		/* Find a free entry in the SA table*/
+		for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+			if (priv->rx_sa_tbl[i].used == 0) {
+				sa_index = i;
+				break;
+			}
+		}
+		/* Fail if no free entries*/
+		if (sa_index < 0) {
+			PMD_DRV_LOG(ERR,
+				    "No free entry left in the Rx SA table\n");
+			return -1;
+		}
+
+		priv->rx_ip_tbl[ip_index].ip.ipv6[0] =
+				ic_session->dst_ip.ipv6[0];
+		priv->rx_ip_tbl[ip_index].ip.ipv6[1] =
+				ic_session->dst_ip.ipv6[1];
+		priv->rx_ip_tbl[ip_index].ip.ipv6[2] =
+				ic_session->dst_ip.ipv6[2];
+		priv->rx_ip_tbl[ip_index].ip.ipv6[3] =
+				ic_session->dst_ip.ipv6[3];
+		priv->rx_ip_tbl[ip_index].ref_count++;
+
+		priv->rx_sa_tbl[sa_index].spi =
+			rte_cpu_to_be_32(ic_session->spi);
+		priv->rx_sa_tbl[sa_index].ip_index = ip_index;
+		priv->rx_sa_tbl[sa_index].key[3] =
+			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]);
+		priv->rx_sa_tbl[sa_index].key[2] =
+			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]);
+		priv->rx_sa_tbl[sa_index].key[1] =
+			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]);
+		priv->rx_sa_tbl[sa_index].key[0] =
+			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]);
+		priv->rx_sa_tbl[sa_index].salt =
+			rte_cpu_to_be_32(ic_session->salt);
+		priv->rx_sa_tbl[sa_index].mode = IPSRXMOD_VALID;
+		if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION)
+			priv->rx_sa_tbl[sa_index].mode |=
+					(IPSRXMOD_PROTO | IPSRXMOD_DECRYPT);
+		if (ic_session->dst_ip.type == IPv6)
+			priv->rx_sa_tbl[sa_index].mode |= IPSRXMOD_IPV6;
+		priv->rx_sa_tbl[sa_index].used = 1;
+
+		/* write IP table entry*/
+		reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
+				IPSRXIDX_TABLE_IP | (ip_index << 3);
+		if (priv->rx_ip_tbl[ip_index].ip.type == IPv4) {
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3),
+					priv->rx_ip_tbl[ip_index].ip.ipv4);
+		} else {
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0),
+					priv->rx_ip_tbl[ip_index].ip.ipv6[0]);
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1),
+					priv->rx_ip_tbl[ip_index].ip.ipv6[1]);
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2),
+					priv->rx_ip_tbl[ip_index].ip.ipv6[2]);
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3),
+					priv->rx_ip_tbl[ip_index].ip.ipv6[3]);
+		}
+		IXGBE_WAIT_RWRITE;
+
+		/* write SPI table entry*/
+		reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
+				IPSRXIDX_TABLE_SPI | (sa_index << 3);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI,
+				priv->rx_sa_tbl[sa_index].spi);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX,
+				priv->rx_sa_tbl[sa_index].ip_index);
+		IXGBE_WAIT_RWRITE;
+
+		/* write Key table entry*/
+		reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE |
+				IPSRXIDX_TABLE_KEY | (sa_index << 3);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0),
+				priv->rx_sa_tbl[sa_index].key[0]);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1),
+				priv->rx_sa_tbl[sa_index].key[1]);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2),
+				priv->rx_sa_tbl[sa_index].key[2]);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3),
+				priv->rx_sa_tbl[sa_index].key[3]);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT,
+				priv->rx_sa_tbl[sa_index].salt);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD,
+				priv->rx_sa_tbl[sa_index].mode);
+		IXGBE_WAIT_RWRITE;
+
+	} else { /* sess->dir == RTE_CRYPTO_OUTBOUND */
+		int i;
+
+		/* Find a free entry in the SA table*/
+		for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+			if (priv->tx_sa_tbl[i].used == 0) {
+				sa_index = i;
+				break;
+			}
+		}
+		/* Fail if no free entries*/
+		if (sa_index < 0) {
+			PMD_DRV_LOG(ERR,
+				    "No free entry left in the Tx SA table\n");
+			return -1;
+		}
+
+		priv->tx_sa_tbl[sa_index].spi =
+			rte_cpu_to_be_32(ic_session->spi);
+		priv->tx_sa_tbl[sa_index].key[3] =
+			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[0]);
+		priv->tx_sa_tbl[sa_index].key[2] =
+			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[4]);
+		priv->tx_sa_tbl[sa_index].key[1] =
+			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[8]);
+		priv->tx_sa_tbl[sa_index].key[0] =
+			rte_cpu_to_be_32(*(uint32_t *)&ic_session->key[12]);
+		priv->tx_sa_tbl[sa_index].salt =
+			rte_cpu_to_be_32(ic_session->salt);
+
+		reg_val = IPSRXIDX_RX_EN | IPSRXIDX_WRITE | (sa_index << 3);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0),
+				priv->tx_sa_tbl[sa_index].key[0]);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1),
+				priv->tx_sa_tbl[sa_index].key[1]);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2),
+				priv->tx_sa_tbl[sa_index].key[2]);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3),
+				priv->tx_sa_tbl[sa_index].key[3]);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT,
+				priv->tx_sa_tbl[sa_index].salt);
+		IXGBE_WAIT_TWRITE;
+
+		priv->tx_sa_tbl[i].used = 1;
+		ic_session->sa_index = sa_index;
+	}
+
+	return 0;
+}
+
+static int
+ixgbe_crypto_remove_sa(struct rte_eth_dev *dev,
+		       struct ixgbe_crypto_session *ic_session)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ixgbe_ipsec *priv =
+			IXGBE_DEV_PRIVATE_TO_IPSEC(dev->data->dev_private);
+	uint32_t reg_val;
+	int sa_index = -1;
+
+	if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
+		int i, ip_index = -1;
+
+		/* Find a match in the IP table*/
+		for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+			if (CMP_IP(priv->rx_ip_tbl[i].ip, ic_session->dst_ip)) {
+				ip_index = i;
+				break;
+			}
+		}
+
+		/* Fail if no match*/
+		if (ip_index < 0) {
+			PMD_DRV_LOG(ERR,
+				    "Entry not found in the Rx IP table\n");
+			return -1;
+		}
+
+		/* Find a free entry in the SA table*/
+		for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+			if (priv->rx_sa_tbl[i].spi ==
+				  rte_cpu_to_be_32(ic_session->spi)) {
+				sa_index = i;
+				break;
+			}
+		}
+		/* Fail if no match*/
+		if (sa_index < 0) {
+			PMD_DRV_LOG(ERR,
+				    "Entry not found in the Rx SA table\n");
+			return -1;
+		}
+
+		/* Disable and clear Rx SPI and key table table entryes*/
+		reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | (sa_index << 3);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
+		IXGBE_WAIT_RWRITE;
+		reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_KEY | (sa_index << 3);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(0), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(1), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(2), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(3), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, 0);
+		IXGBE_WAIT_RWRITE;
+		priv->rx_sa_tbl[sa_index].used = 0;
+
+		/* If last used then clear the IP table entry*/
+		priv->rx_ip_tbl[ip_index].ref_count--;
+		if (priv->rx_ip_tbl[ip_index].ref_count == 0) {
+			reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_IP |
+					(ip_index << 3);
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(0), 0);
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(1), 0);
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(2), 0);
+			IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(3), 0);
+		}
+	} else { /* session->dir == RTE_CRYPTO_OUTBOUND */
+		int i;
+
+		/* Find a match in the SA table*/
+		for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+			if (priv->tx_sa_tbl[i].spi ==
+				    rte_cpu_to_be_32(ic_session->spi)) {
+				sa_index = i;
+				break;
+			}
+		}
+		/* Fail if no match entries*/
+		if (sa_index < 0) {
+			PMD_DRV_LOG(ERR,
+				    "Entry not found in the Tx SA table\n");
+			return -1;
+		}
+		reg_val = IPSRXIDX_WRITE | (sa_index << 3);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(0), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(1), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(2), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(3), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, 0);
+		IXGBE_WAIT_TWRITE;
+
+		priv->tx_sa_tbl[sa_index].used = 0;
+	}
+
+	return 0;
+}
+
+static int
+ixgbe_crypto_create_session(void *device,
+		struct rte_security_session_conf *conf,
+		struct rte_security_session *session,
+		struct rte_mempool *mempool)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct ixgbe_crypto_session *ic_session = NULL;
+	struct rte_crypto_aead_xform *aead_xform;
+	struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
+
+	if (rte_mempool_get(mempool, (void **)&ic_session)) {
+		PMD_DRV_LOG(ERR, "Cannot get object from ic_session mempool");
+		return -ENOMEM;
+	}
+
+	if (conf->crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD ||
+			conf->crypto_xform->aead.algo !=
+					RTE_CRYPTO_AEAD_AES_GCM) {
+		PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode\n");
+		return -ENOTSUP;
+	}
+	aead_xform = &conf->crypto_xform->aead;
+
+	if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+			ic_session->op = IXGBE_OP_AUTHENTICATED_DECRYPTION;
+		} else {
+			PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
+			return -ENOTSUP;
+		}
+	} else {
+		if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+			ic_session->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION;
+		} else {
+			PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
+			return -ENOTSUP;
+		}
+	}
+
+	ic_session->key = aead_xform->key.data;
+	memcpy(&ic_session->salt,
+	       &aead_xform->key.data[aead_xform->key.length], 4);
+	ic_session->spi = conf->ipsec.spi;
+	ic_session->dev = eth_dev;
+
+	set_sec_session_private_data(session, ic_session);
+
+	if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
+		if (ixgbe_crypto_add_sa(ic_session)) {
+			PMD_DRV_LOG(ERR, "Failed to add SA\n");
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+static int
+ixgbe_crypto_remove_session(void *device,
+		struct rte_security_session *session)
+{
+	struct rte_eth_dev *eth_dev = device;
+	struct ixgbe_crypto_session *ic_session =
+		(struct ixgbe_crypto_session *)
+		get_sec_session_private_data(session);
+	struct rte_mempool *mempool = rte_mempool_from_obj(ic_session);
+
+	if (eth_dev != ic_session->dev) {
+		PMD_DRV_LOG(ERR, "Session not bound to this device\n");
+		return -ENODEV;
+	}
+
+	if (ixgbe_crypto_remove_sa(eth_dev, ic_session)) {
+		PMD_DRV_LOG(ERR, "Failed to remove session\n");
+		return -EFAULT;
+	}
+
+	rte_mempool_put(mempool, (void *)ic_session);
+
+	return 0;
+}
+
+static inline uint8_t
+ixgbe_crypto_compute_pad_len(struct rte_mbuf *m)
+{
+	if (m->nb_segs == 1) {
+		/* 16 bytes ICV + 2 bytes ESP trailer + payload padding size
+		 * payload padding size is stored at <pkt_len - 18>
+		 */
+		uint8_t *esp_pad_len = rte_pktmbuf_mtod_offset(m, uint8_t *,
+					rte_pktmbuf_pkt_len(m) -
+					(ESP_TRAILER_SIZE + ESP_ICV_SIZE));
+		return *esp_pad_len + ESP_TRAILER_SIZE + ESP_ICV_SIZE;
+	}
+	return 0;
+}
+
+static int
+ixgbe_crypto_update_mb(void *device __rte_unused,
+		struct rte_security_session *session,
+		       struct rte_mbuf *m, void *params __rte_unused)
+{
+	struct ixgbe_crypto_session *ic_session =
+			get_sec_session_private_data(session);
+	if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
+		union ixgbe_crypto_tx_desc_md *mdata =
+			(union ixgbe_crypto_tx_desc_md *)&m->udata64;
+		mdata->enc = 1;
+		mdata->sa_idx = ic_session->sa_index;
+		mdata->pad_len = ixgbe_crypto_compute_pad_len(m);
+	}
+	return 0;
+}
+
+
+static const struct rte_security_capability *
+ixgbe_crypto_capabilities_get(void *device __rte_unused)
+{
+	static const struct rte_cryptodev_capabilities
+	aes_gcm_gmac_crypto_capabilities[] = {
+		{	/* AES GMAC (128-bit) */
+			.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+			{.sym = {
+				.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+				{.auth = {
+					.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+					.block_size = 16,
+					.key_size = {
+						.min = 16,
+						.max = 16,
+						.increment = 0
+					},
+					.digest_size = {
+						.min = 16,
+						.max = 16,
+						.increment = 0
+					},
+					.iv_size = {
+						.min = 12,
+						.max = 12,
+						.increment = 0
+					}
+				}, }
+			}, }
+		},
+		{	/* AES GCM (128-bit) */
+			.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+			{.sym = {
+				.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+				{.aead = {
+					.algo = RTE_CRYPTO_AEAD_AES_GCM,
+					.block_size = 16,
+					.key_size = {
+						.min = 16,
+						.max = 16,
+						.increment = 0
+					},
+					.digest_size = {
+						.min = 16,
+						.max = 16,
+						.increment = 0
+					},
+					.aad_size = {
+						.min = 0,
+						.max = 65535,
+						.increment = 1
+					},
+					.iv_size = {
+						.min = 12,
+						.max = 12,
+						.increment = 0
+					}
+				}, }
+			}, }
+		},
+		{
+			.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+			{.sym = {
+				.xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
+			}, }
+		},
+	};
+
+	static const struct rte_security_capability
+	ixgbe_security_capabilities[] = {
+		{ /* IPsec Inline Crypto ESP Transport Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { 0 }
+			},
+			.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Transport Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { 0 }
+			},
+			.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+			.ol_flags = 0
+		},
+		{ /* IPsec Inline Crypto ESP Tunnel Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { 0 }
+			},
+			.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Tunnel Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { 0 }
+			},
+			.crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+			.ol_flags = 0
+		},
+		{
+			.action = RTE_SECURITY_ACTION_TYPE_NONE
+		}
+	};
+
+	return ixgbe_security_capabilities;
+}
+
+
+int
+ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
+{
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	uint32_t reg;
+
+	/* sanity checks */
+	if (dev->data->dev_conf.rxmode.enable_lro) {
+		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
+		return -1;
+	}
+	if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
+		PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
+		return -1;
+	}
+
+
+	/* Set IXGBE_SECTXBUFFAF to 0x15 as required in the datasheet*/
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x15);
+
+	/* IFG needs to be set to 3 when we are using security. Otherwise a Tx
+	 * hang will occur with heavy traffic.
+	 */
+	reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+	reg = (reg & 0xFFFFFFF0) | 0x3;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+
+	reg  = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+	reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
+	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
+
+	if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+		IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
+		reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+		if (reg != 0) {
+			PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
+			return -1;
+		}
+	}
+	if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+		IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
+				IXGBE_SECTXCTRL_STORE_FORWARD);
+		reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+		if (reg != IXGBE_SECTXCTRL_STORE_FORWARD) {
+			PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
+			return -1;
+		}
+	}
+
+	ixgbe_crypto_clear_ipsec_tables(dev);
+
+	return 0;
+}
+
+int
+ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
+				      const void *ip_spec,
+				      uint8_t is_ipv6)
+{
+	struct ixgbe_crypto_session *ic_session
+		= get_sec_session_private_data(sess);
+
+	if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) {
+		if (is_ipv6) {
+			const struct rte_flow_item_ipv6 *ipv6 = ip_spec;
+			ic_session->src_ip.type = IPv6;
+			ic_session->dst_ip.type = IPv6;
+			rte_memcpy(ic_session->src_ip.ipv6,
+				   ipv6->hdr.src_addr, 16);
+			rte_memcpy(ic_session->dst_ip.ipv6,
+				   ipv6->hdr.dst_addr, 16);
+		} else {
+			const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
+			ic_session->src_ip.type = IPv4;
+			ic_session->dst_ip.type = IPv4;
+			ic_session->src_ip.ipv4 = ipv4->hdr.src_addr;
+			ic_session->dst_ip.ipv4 = ipv4->hdr.dst_addr;
+		}
+		return ixgbe_crypto_add_sa(ic_session);
+	}
+
+	return 0;
+}
+
+static struct rte_security_ops ixgbe_security_ops = {
+	.session_create = ixgbe_crypto_create_session,
+	.session_update = NULL,
+	.session_stats_get = NULL,
+	.session_destroy = ixgbe_crypto_remove_session,
+	.set_pkt_metadata = ixgbe_crypto_update_mb,
+	.capabilities_get = ixgbe_crypto_capabilities_get
+};
+
+struct rte_security_ctx *
+ixgbe_ipsec_ctx_create(struct rte_eth_dev *dev)
+{
+	struct rte_security_ctx *ctx = rte_malloc("rte_security_instances_ops",
+					sizeof(struct rte_security_ctx), 0);
+	if (ctx) {
+		ctx->device = (void *)dev;
+		ctx->ops = &ixgbe_security_ops;
+		ctx->sess_cnt = 0;
+	}
+	return ctx;
+}
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.h b/drivers/net/ixgbe/ixgbe_ipsec.h
new file mode 100644
index 0000000..fb8fefc
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_ipsec.h
@@ -0,0 +1,151 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef IXGBE_IPSEC_H_
+#define IXGBE_IPSEC_H_
+
+#include <rte_security.h>
+
+#define IPSRXIDX_RX_EN                                    0x00000001
+#define IPSRXIDX_TABLE_IP                                 0x00000002
+#define IPSRXIDX_TABLE_SPI                                0x00000004
+#define IPSRXIDX_TABLE_KEY                                0x00000006
+#define IPSRXIDX_WRITE                                    0x80000000
+#define IPSRXIDX_READ                                     0x40000000
+#define IPSRXMOD_VALID                                    0x00000001
+#define IPSRXMOD_PROTO                                    0x00000004
+#define IPSRXMOD_DECRYPT                                  0x00000008
+#define IPSRXMOD_IPV6                                     0x00000010
+#define IXGBE_ADVTXD_POPTS_IPSEC                          0x00000400
+#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP                 0x00002000
+#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN               0x00004000
+#define IXGBE_RXDADV_IPSEC_STATUS_SECP                    0x00020000
+#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK                 0x18000000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL         0x08000000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH           0x10000000
+#define IXGBE_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED    0x18000000
+
+#define IPSEC_MAX_RX_IP_COUNT           128
+#define IPSEC_MAX_SA_COUNT              1024
+
+#define ESP_ICV_SIZE 16
+#define ESP_TRAILER_SIZE 2
+
+enum ixgbe_operation {
+	IXGBE_OP_AUTHENTICATED_ENCRYPTION,
+	IXGBE_OP_AUTHENTICATED_DECRYPTION
+};
+
+enum ixgbe_gcm_key {
+	IXGBE_GCM_KEY_128,
+	IXGBE_GCM_KEY_256
+};
+
+/**
+ * Generic IP address structure
+ * TODO: Find better location for this rte_net.h possibly.
+ **/
+struct ipaddr {
+	enum ipaddr_type {
+		IPv4,
+		IPv6
+	} type;
+	/**< IP Address Type - IPv4/IPv6 */
+
+	union {
+		uint32_t ipv4;
+		uint32_t ipv6[4];
+	};
+};
+
+/** inline crypto crypto private session structure */
+struct ixgbe_crypto_session {
+	enum ixgbe_operation op;
+	uint8_t *key;
+	uint32_t salt;
+	uint32_t sa_index;
+	uint32_t spi;
+	struct ipaddr src_ip;
+	struct ipaddr dst_ip;
+	struct rte_eth_dev *dev;
+} __rte_cache_aligned;
+
+struct ixgbe_crypto_rx_ip_table {
+	struct ipaddr ip;
+	uint16_t ref_count;
+};
+struct ixgbe_crypto_rx_sa_table {
+	uint32_t spi;
+	uint32_t ip_index;
+	uint32_t key[4];
+	uint32_t salt;
+	uint8_t  mode;
+	uint8_t  used;
+};
+
+struct ixgbe_crypto_tx_sa_table {
+	uint32_t spi;
+	uint32_t key[4];
+	uint32_t salt;
+	uint8_t  used;
+};
+
+union ixgbe_crypto_tx_desc_md {
+	uint64_t data;
+	struct {
+		/**< SA table index */
+		uint32_t sa_idx;
+		/**< ICV and ESP trailer length */
+		uint8_t pad_len;
+		/**< enable encryption */
+		uint8_t enc;
+	};
+};
+
+struct ixgbe_ipsec {
+	struct ixgbe_crypto_rx_ip_table rx_ip_tbl[IPSEC_MAX_RX_IP_COUNT];
+	struct ixgbe_crypto_rx_sa_table rx_sa_tbl[IPSEC_MAX_SA_COUNT];
+	struct ixgbe_crypto_tx_sa_table tx_sa_tbl[IPSEC_MAX_SA_COUNT];
+};
+
+
+struct rte_security_ctx *
+ixgbe_ipsec_ctx_create(struct rte_eth_dev *dev);
+int ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev);
+int ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
+					  const void *ip_spec,
+					  uint8_t is_ipv6);
+
+
+
+#endif /*IXGBE_IPSEC_H_*/
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 0038dfb..38a014a 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -93,6 +93,7 @@
 		PKT_TX_TCP_SEG |		 \
 		PKT_TX_MACSEC |			 \
 		PKT_TX_OUTER_IP_CKSUM |		 \
+		PKT_TX_SEC_OFFLOAD |	 \
 		IXGBE_TX_IEEE1588_TMST)
 
 #define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
@@ -395,7 +396,8 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 static inline void
 ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
 		volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
-		uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
+		uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
+		union ixgbe_crypto_tx_desc_md *mdata)
 {
 	uint32_t type_tucmd_mlhl;
 	uint32_t mss_l4len_idx = 0;
@@ -479,6 +481,17 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
 		seqnum_seed |= tx_offload.l2_len
 			       << IXGBE_ADVTXD_TUNNEL_LEN;
 	}
+	if (ol_flags & PKT_TX_SEC_OFFLOAD) {
+		seqnum_seed |=
+			(IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & mdata->sa_idx);
+		type_tucmd_mlhl |= mdata->enc ?
+				(IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
+				IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0;
+		type_tucmd_mlhl |=
+			(mdata->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);
+		tx_offload_mask.sa_idx |= ~0;
+		tx_offload_mask.sec_pad_len |= ~0;
+	}
 
 	txq->ctx_cache[ctx_idx].flags = ol_flags;
 	txq->ctx_cache[ctx_idx].tx_offload.data[0]  =
@@ -657,6 +670,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint32_t ctx = 0;
 	uint32_t new_ctx;
 	union ixgbe_tx_offload tx_offload;
+	uint8_t use_ipsec;
 
 	tx_offload.data[0] = 0;
 	tx_offload.data[1] = 0;
@@ -684,6 +698,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		 * are needed for offload functionality.
 		 */
 		ol_flags = tx_pkt->ol_flags;
+		use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
 
 		/* If hardware offload required */
 		tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
@@ -695,6 +710,13 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			tx_offload.tso_segsz = tx_pkt->tso_segsz;
 			tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
 			tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
+			if (use_ipsec) {
+				union ixgbe_crypto_tx_desc_md *ipsec_mdata =
+					(union ixgbe_crypto_tx_desc_md *)
+							&tx_pkt->udata64;
+				tx_offload.sa_idx = ipsec_mdata->sa_idx;
+				tx_offload.sec_pad_len = ipsec_mdata->pad_len;
+			}
 
 			/* If new context need be built or reuse the exist ctx. */
 			ctx = what_advctx_update(txq, tx_ol_req,
@@ -855,7 +877,9 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 				}
 
 				ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
-					tx_offload);
+					tx_offload,
+					(union ixgbe_crypto_tx_desc_md *)
+					&tx_pkt->udata64);
 
 				txe->last_id = tx_last;
 				tx_id = txe->next_id;
@@ -873,6 +897,8 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		}
 
 		olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+		if (use_ipsec)
+			olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
 
 		m_seg = tx_pkt;
 		do {
@@ -1447,6 +1473,12 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
 		pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
 	}
 
+	if (rx_status & IXGBE_RXD_STAT_SECP) {
+		pkt_flags |= PKT_RX_SEC_OFFLOAD;
+		if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
+			pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+	}
+
 	return pkt_flags;
 }
 
@@ -2364,8 +2396,10 @@ void __attribute__((cold))
 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
 {
 	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
-	if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
-			&& (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
+	if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
+			(txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST) &&
+			!(dev->data->dev_conf.txmode.offloads
+					& DEV_TX_OFFLOAD_SECURITY)) {
 		PMD_INIT_LOG(DEBUG, "Using simple tx code path");
 		dev->tx_pkt_prepare = NULL;
 #ifdef RTE_IXGBE_INC_VECTOR
@@ -2535,6 +2569,8 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->txq_flags = tx_conf->txq_flags;
 	txq->ops = &def_txq_ops;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
+	txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
+			DEV_TX_OFFLOAD_SECURITY);
 
 	/*
 	 * Modification to set VFTDT for virtual function if vf is detected
@@ -4519,6 +4555,8 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
 		struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
 
 		rxq->rx_using_sse = rx_using_sse;
+		rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
+				DEV_RX_OFFLOAD_SECURITY);
 	}
 }
 
@@ -5006,6 +5044,19 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 			dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
 		ixgbe_setup_loopback_link_82599(hw);
 
+	if ((dev->data->dev_conf.rxmode.offloads &
+			DEV_RX_OFFLOAD_SECURITY) ||
+		(dev->data->dev_conf.txmode.offloads &
+			DEV_TX_OFFLOAD_SECURITY)) {
+		ret = ixgbe_crypto_enable_ipsec(dev);
+		if (ret != 0) {
+			PMD_DRV_LOG(ERR,
+				    "ixgbe_crypto_enable_ipsec fails with %d.",
+				    ret);
+			return ret;
+		}
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 81c527f..4017831 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -138,8 +138,10 @@ struct ixgbe_rx_queue {
 	uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
 	uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
 	uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
-	uint16_t            rx_using_sse;
+	uint8_t            rx_using_sse;
 	/**< indicates that vector RX is in use */
+	uint8_t            using_ipsec;
+	/**< indicates that IPsec RX feature is in use */
 #ifdef RTE_IXGBE_INC_VECTOR
 	uint16_t            rxrearm_nb;     /**< number of remaining to be re-armed */
 	uint16_t            rxrearm_start;  /**< the idx we start the re-arming from */
@@ -183,6 +185,10 @@ union ixgbe_tx_offload {
 		/* fields for TX offloading of tunnels */
 		uint64_t outer_l3_len:8; /**< Outer L3 (IP) Hdr Length. */
 		uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */
+
+		/* inline ipsec related*/
+		uint64_t sa_idx:8;	/**< TX SA database entry index */
+		uint64_t sec_pad_len:4;	/**< padding length */
 	};
 };
 
@@ -247,6 +253,9 @@ struct ixgbe_tx_queue {
 	struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
 	const struct ixgbe_txq_ops *ops;       /**< txq ops */
 	uint8_t             tx_deferred_start; /**< not in global dev start. */
+	uint8_t		    using_ipsec;
+	/**< indicates that IPsec TX feature is in use */
+
 };
 
 struct ixgbe_txq_ops {
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index e704a7f..b65220f 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -123,6 +123,59 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
 }
 
 static inline void
+desc_to_olflags_v_ipsec(__m128i descs[4], struct rte_mbuf **rx_pkts)
+{
+	__m128i sterr0, sterr1, sterr2, sterr3;
+	__m128i tmp1, tmp2, tmp3, tmp4;
+	__m128i rearm0, rearm1, rearm2, rearm3;
+
+	const __m128i ipsec_sterr_msk = _mm_set_epi32(
+		0, IXGBE_RXDADV_IPSEC_STATUS_SECP |
+			IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED,
+		0, 0);
+	const __m128i ipsec_proc_msk  = _mm_set_epi32(
+		0, IXGBE_RXDADV_IPSEC_STATUS_SECP, 0, 0);
+	const __m128i ipsec_err_flag  = _mm_set_epi32(
+		0, PKT_RX_SEC_OFFLOAD_FAILED | PKT_RX_SEC_OFFLOAD,
+		0, 0);
+	const __m128i ipsec_proc_flag = _mm_set_epi32(
+		0, PKT_RX_SEC_OFFLOAD, 0, 0);
+
+	rearm0 = _mm_load_si128((__m128i *)&rx_pkts[0]->rearm_data);
+	rearm1 = _mm_load_si128((__m128i *)&rx_pkts[1]->rearm_data);
+	rearm2 = _mm_load_si128((__m128i *)&rx_pkts[2]->rearm_data);
+	rearm3 = _mm_load_si128((__m128i *)&rx_pkts[3]->rearm_data);
+	sterr0 = _mm_and_si128(descs[0], ipsec_sterr_msk);
+	sterr1 = _mm_and_si128(descs[1], ipsec_sterr_msk);
+	sterr2 = _mm_and_si128(descs[2], ipsec_sterr_msk);
+	sterr3 = _mm_and_si128(descs[3], ipsec_sterr_msk);
+	tmp1 = _mm_cmpeq_epi32(sterr0, ipsec_sterr_msk);
+	tmp2 = _mm_cmpeq_epi32(sterr0, ipsec_proc_msk);
+	tmp3 = _mm_cmpeq_epi32(sterr1, ipsec_sterr_msk);
+	tmp4 = _mm_cmpeq_epi32(sterr1, ipsec_proc_msk);
+	sterr0 = _mm_or_si128(_mm_and_si128(tmp1, ipsec_err_flag),
+				_mm_and_si128(tmp2, ipsec_proc_flag));
+	sterr1 = _mm_or_si128(_mm_and_si128(tmp3, ipsec_err_flag),
+				_mm_and_si128(tmp4, ipsec_proc_flag));
+	tmp1 = _mm_cmpeq_epi32(sterr2, ipsec_sterr_msk);
+	tmp2 = _mm_cmpeq_epi32(sterr2, ipsec_proc_msk);
+	tmp3 = _mm_cmpeq_epi32(sterr3, ipsec_sterr_msk);
+	tmp4 = _mm_cmpeq_epi32(sterr3, ipsec_proc_msk);
+	sterr2 = _mm_or_si128(_mm_and_si128(tmp1, ipsec_err_flag),
+				_mm_and_si128(tmp2, ipsec_proc_flag));
+	sterr3 = _mm_or_si128(_mm_and_si128(tmp3, ipsec_err_flag),
+				_mm_and_si128(tmp4, ipsec_proc_flag));
+	rearm0 = _mm_or_si128(rearm0, sterr0);
+	rearm1 = _mm_or_si128(rearm1, sterr1);
+	rearm2 = _mm_or_si128(rearm2, sterr2);
+	rearm3 = _mm_or_si128(rearm3, sterr3);
+	_mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
+	_mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
+	_mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
+	_mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
+}
+
+static inline void
 desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
 	struct rte_mbuf **rx_pkts)
 {
@@ -310,6 +363,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 	volatile union ixgbe_adv_rx_desc *rxdp;
 	struct ixgbe_rx_entry *sw_ring;
 	uint16_t nb_pkts_recd;
+	uint8_t use_ipsec = rxq->using_ipsec;
 	int pos;
 	uint64_t var;
 	__m128i shuf_msk;
@@ -473,6 +527,9 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		/* set ol_flags with vlan packet type */
 		desc_to_olflags_v(descs, mbuf_init, vlan_flags, &rx_pkts[pos]);
 
+		if (unlikely(use_ipsec))
+			desc_to_olflags_v_ipsec(descs, rx_pkts);
+
 		/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
 		pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
 		pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
-- 
2.9.3

  parent reply	other threads:[~2017-10-25 15:11 UTC|newest]

Thread overview: 195+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-09-14  8:26 [PATCH 00/11] introduce security offload library Akhil Goyal
2017-09-14  8:26 ` [PATCH 01/11] lib/rte_security: add security library Akhil Goyal
2017-09-15  5:32   ` Hemant Agrawal
2017-09-17 13:31     ` Boris Pismenny
2017-09-20 11:35     ` Akhil Goyal
2017-09-18 13:13   ` Jerin Jacob
2017-09-22 11:55     ` Radu Nicolau
2017-09-14  8:26 ` [PATCH 02/11] doc: add details of rte security Akhil Goyal
2017-09-18 11:13   ` Jerin Jacob
2017-09-20 10:59     ` Akhil Goyal
2017-09-18 15:38   ` Mcnamara, John
2017-09-20 11:00     ` Akhil Goyal
2017-09-14  8:26 ` [PATCH 03/11] cryptodev: extend cryptodev to support security APIs Akhil Goyal
2017-09-14  8:26 ` [PATCH 04/11] lib/librte_net: add ESP header to generic flow steering Akhil Goyal
2017-09-15  4:51   ` Hemant Agrawal
2017-09-17  7:19     ` Boris Pismenny
2017-09-14  8:26 ` [PATCH 05/11] lib/librte_mbuf: add security crypto flags and mbuf fields Akhil Goyal
2017-09-18  7:54   ` Boris Pismenny
2017-09-20  9:43     ` Olivier MATZ
2017-09-26 10:19       ` Boris Pismenny
2017-09-14  8:26 ` [PATCH 06/11] ethdev: extend ethdev to support security APIs Akhil Goyal
2017-09-17 13:45   ` Shahaf Shuler
2017-09-22 11:42     ` Radu Nicolau
2017-09-18  7:57   ` Jerin Jacob
2017-09-22 11:49     ` Radu Nicolau
2017-09-14  8:26 ` [PATCH 07/11] ethdev: add rte flow action for crypto Akhil Goyal
2017-09-21  9:16   ` Jerin Jacob
2017-09-21 16:53     ` Boris Pismenny
2017-09-14  8:26 ` [PATCH 08/11] mk: add rte security into build system Akhil Goyal
2017-09-14  8:26 ` [PATCH 09/11] net/ixgbe: enable inline ipsec Akhil Goyal
2017-09-15  4:48   ` Hemant Agrawal
2017-09-15 13:14     ` Doherty, Declan
2017-09-14  8:26 ` [PATCH 10/11] crypto/dpaa2_sec: add support for protocol offload ipsec Akhil Goyal
2017-09-14  8:26 ` [PATCH 11/11] examples/ipsec-secgw: add support for security offload Akhil Goyal
2017-10-03 13:14 ` [PATCH v2 00/12] introduce security offload library Akhil Goyal
2017-10-03 13:14   ` [PATCH v2 01/12] lib/rte_security: add security library Akhil Goyal
2017-10-05 15:32     ` De Lara Guarch, Pablo
2017-10-05 16:30     ` Ananyev, Konstantin
2017-10-06 18:11       ` Akhil Goyal
2017-10-09 13:42         ` Ananyev, Konstantin
2017-10-10 12:17           ` Akhil Goyal
2017-10-11  9:02             ` Ananyev, Konstantin
2017-10-03 13:14   ` [PATCH v2 02/12] doc: add details of rte security Akhil Goyal
2017-10-03 15:56     ` Mcnamara, John
2017-10-03 13:14   ` [PATCH v2 03/12] cryptodev: extend cryptodev to support security APIs Akhil Goyal
2017-10-05  8:49     ` De Lara Guarch, Pablo
2017-10-03 13:14   ` [PATCH v2 04/12] lib/librte_net: add ESP header to generic flow steering Akhil Goyal
2017-10-03 13:14   ` [PATCH v2 05/12] lib/librte_mbuf: add security crypto flags and mbuf fields Akhil Goyal
2017-10-05  8:54     ` De Lara Guarch, Pablo
2017-10-03 13:14   ` [PATCH v2 06/12] ethdev: extend ethdev to support security APIs Akhil Goyal
2017-10-04 10:52     ` Shahaf Shuler
2017-10-06 16:31       ` Radu Nicolau
2017-10-05 18:01     ` Ananyev, Konstantin
2017-10-03 13:14   ` [PATCH v2 07/12] ethdev: add rte flow action for crypto Akhil Goyal
2017-10-03 13:14   ` [PATCH v2 08/12] doc: add details of rte_flow security actions Akhil Goyal
2017-10-03 15:38     ` Mcnamara, John
2017-10-03 15:39       ` Mcnamara, John
2017-10-05 15:34     ` De Lara Guarch, Pablo
2017-10-03 13:14   ` [PATCH v2 09/12] mk: add rte security into build system Akhil Goyal
2017-10-03 13:14   ` [PATCH v2 10/12] net/ixgbe: enable inline ipsec Akhil Goyal
2017-10-05 17:55     ` Ananyev, Konstantin
2017-10-06  9:17       ` Radu Nicolau
2017-10-06 18:33         ` Ananyev, Konstantin
2017-10-10 16:10           ` Radu Nicolau
2017-10-03 13:14   ` [PATCH v2 11/12] crypto/dpaa2_sec: add support for protocol offload ipsec Akhil Goyal
2017-10-05  9:13     ` De Lara Guarch, Pablo
2017-10-03 13:14   ` [PATCH v2 12/12] examples/ipsec-secgw: add support for security offload Akhil Goyal
2017-10-06 18:11   ` [PATCH v3 00/12] introduce security offload library Akhil Goyal
2017-10-06 18:11     ` [PATCH v3 01/12] lib/rte_security: add security library Akhil Goyal
2017-10-06 18:11     ` [PATCH v3 02/12] doc: add details of rte security Akhil Goyal
2017-10-06 18:11     ` [PATCH v3 03/12] cryptodev: support security APIs Akhil Goyal
2017-10-10 13:43       ` De Lara Guarch, Pablo
2017-10-21 15:22         ` Akhil Goyal
2017-10-06 18:11     ` [PATCH v3 04/12] net: add ESP header to generic flow steering Akhil Goyal
2017-10-06 18:11     ` [PATCH v3 05/12] mbuf: add security crypto flags and mbuf fields Akhil Goyal
2017-10-06 18:11     ` [PATCH v3 06/12] ethdev: support security APIs Akhil Goyal
2017-10-06 18:11     ` [PATCH v3 07/12] ethdev: add rte flow action for crypto Akhil Goyal
2017-10-06 18:11     ` [PATCH v3 08/12] doc: add details of rte_flow security actions Akhil Goyal
2017-10-12 13:41       ` Mcnamara, John
2017-10-06 18:11     ` [PATCH v3 09/12] mk: add rte security into build system Akhil Goyal
2017-10-06 18:11     ` [PATCH v3 10/12] net/ixgbe: enable inline ipsec Akhil Goyal
2017-10-06 18:11     ` [PATCH v3 11/12] crypto/dpaa2_sec: add support for protocol offload ipsec Akhil Goyal
2017-10-06 18:11     ` [PATCH v3 12/12] examples/ipsec-secgw: add support for security offload Akhil Goyal
2017-10-09 13:49     ` [PATCH v3 00/12] introduce security offload library Ananyev, Konstantin
2017-10-10 12:22       ` Akhil Goyal
2017-10-14 22:17     ` [PATCH v4 " Akhil Goyal
2017-10-14 22:17       ` [PATCH v4 01/12] lib/rte_security: add security library Akhil Goyal
2017-10-15 12:47         ` Aviad Yehezkel
2017-10-19  9:30         ` Ananyev, Konstantin
2017-10-21 15:54           ` Akhil Goyal
2017-10-20  9:37         ` Thomas Monjalon
2017-10-20  9:39           ` Thomas Monjalon
2017-10-21 19:46             ` Akhil Goyal
2017-10-21 19:45           ` Akhil Goyal
2017-10-14 22:17       ` [PATCH v4 02/12] doc: add details of rte security Akhil Goyal
2017-10-15 12:47         ` Aviad Yehezkel
2017-10-20  9:41         ` Thomas Monjalon
2017-10-21 19:48           ` Akhil Goyal
2017-10-14 22:17       ` [PATCH v4 03/12] cryptodev: support security APIs Akhil Goyal
2017-10-15 12:48         ` Aviad Yehezkel
2017-10-14 22:17       ` [PATCH v4 04/12] net: add ESP header to generic flow steering Akhil Goyal
2017-10-15 12:48         ` Aviad Yehezkel
2017-10-20 10:15         ` Thomas Monjalon
2017-10-21 19:49           ` Akhil Goyal
2017-10-14 22:17       ` [PATCH v4 05/12] mbuf: add security crypto flags and mbuf fields Akhil Goyal
2017-10-15 12:49         ` Aviad Yehezkel
2017-10-14 22:17       ` [PATCH v4 06/12] ethdev: support security APIs Akhil Goyal
2017-10-15 12:49         ` Aviad Yehezkel
2017-10-15 13:13         ` Shahaf Shuler
2017-10-16  8:46           ` Nicolau, Radu
2017-10-19  9:23         ` Ananyev, Konstantin
2017-10-21 16:00           ` Akhil Goyal
2017-10-23  9:56             ` Ananyev, Konstantin
2017-10-23 13:08               ` Nicolau, Radu
2017-10-20 10:58         ` Thomas Monjalon
2017-10-21 19:50           ` Akhil Goyal
2017-10-14 22:17       ` [PATCH v4 07/12] ethdev: add rte flow action for crypto Akhil Goyal
2017-10-15 12:49         ` Aviad Yehezkel
2017-10-14 22:17       ` [PATCH v4 08/12] doc: add details of rte_flow security actions Akhil Goyal
2017-10-15 12:50         ` Aviad Yehezkel
2017-10-16 19:17         ` Mcnamara, John
2017-10-20 11:00         ` Thomas Monjalon
2017-10-21 19:50           ` Akhil Goyal
2017-10-14 22:17       ` [PATCH v4 09/12] mk: add rte security into build system Akhil Goyal
2017-10-15 12:50         ` Aviad Yehezkel
2017-10-20 11:06         ` Thomas Monjalon
2017-10-21 19:44           ` Akhil Goyal
2017-10-14 22:17       ` [PATCH v4 10/12] net/ixgbe: enable inline ipsec Akhil Goyal
2017-10-15 12:51         ` Aviad Yehezkel
2017-10-16 10:41           ` Thomas Monjalon
2017-10-18 21:29         ` Ananyev, Konstantin
2017-10-19 10:51           ` Radu Nicolau
2017-10-19 11:04             ` Ananyev, Konstantin
2017-10-19 11:57               ` Nicolau, Radu
2017-10-19 12:16                 ` Ananyev, Konstantin
2017-10-19 12:29                   ` Ananyev, Konstantin
2017-10-19 13:14                     ` Radu Nicolau
2017-10-19 13:22                       ` Ananyev, Konstantin
2017-10-19 14:19                         ` Nicolau, Radu
2017-10-19 14:36                           ` Ananyev, Konstantin
2017-10-19 13:09                   ` Radu Nicolau
2017-10-19  9:04         ` Ananyev, Konstantin
2017-10-14 22:17       ` [PATCH v4 11/12] crypto/dpaa2_sec: add support for protocol offload ipsec Akhil Goyal
2017-10-14 22:17       ` [PATCH v4 12/12] examples/ipsec-secgw: add support for security offload Akhil Goyal
2017-10-15 12:51         ` Aviad Yehezkel
2017-10-16 10:44       ` [PATCH v4 00/12] introduce security offload library Thomas Monjalon
2017-10-20  9:32         ` Thomas Monjalon
2017-10-21 16:13           ` Akhil Goyal
2017-10-22 20:37             ` Akhil Goyal
2017-10-22 20:59               ` Thomas Monjalon
2017-10-23 11:44                 ` Aviad Yehezkel
2017-10-24  9:41                 ` Akhil Goyal
2017-10-24  9:52                   ` Thomas Monjalon
2017-10-24 14:27                     ` Akhil Goyal
2017-10-24 14:15       ` [PATCH v5 00/11] " Akhil Goyal
2017-10-24 14:15         ` [PATCH v5 01/11] lib/rte_security: add security library Akhil Goyal
2017-10-24 15:15           ` De Lara Guarch, Pablo
2017-10-25 11:06             ` Akhil Goyal
2017-10-24 20:47           ` Thomas Monjalon
2017-10-25 11:08             ` Akhil Goyal
2017-10-25  5:13           ` Hemant Agrawal
2017-10-24 14:15         ` [PATCH v5 02/11] doc: add details of rte security Akhil Goyal
2017-10-24 14:15         ` [PATCH v5 03/11] cryptodev: support security APIs Akhil Goyal
2017-10-24 14:15         ` [PATCH v5 04/11] net: add ESP header to generic flow steering Akhil Goyal
2017-10-24 14:15         ` [PATCH v5 05/11] mbuf: add security crypto flags and mbuf fields Akhil Goyal
2017-10-25  9:38           ` Olivier MATZ
2017-10-25 12:05             ` Akhil Goyal
2017-10-24 14:15         ` [PATCH v5 06/11] ethdev: support security APIs Akhil Goyal
2017-10-25  5:05           ` Hemant Agrawal
2017-10-25  7:01           ` Shahaf Shuler
2017-10-25 12:35             ` Aviad Yehezkel
2017-10-24 14:15         ` [PATCH v5 07/11] ethdev: add rte flow action for crypto Akhil Goyal
2017-10-24 14:15         ` [PATCH v5 08/11] mk: add rte security into build system Akhil Goyal
2017-10-24 20:48           ` Thomas Monjalon
2017-10-25 11:12             ` Akhil Goyal
2017-10-25  5:04           ` Hemant Agrawal
2017-10-24 14:15         ` [PATCH v5 09/11] net/ixgbe: enable inline ipsec Akhil Goyal
2017-10-24 14:15         ` [PATCH v5 10/11] crypto/dpaa2_sec: add support for protocol offload ipsec Akhil Goyal
2017-10-24 14:15         ` [PATCH v5 11/11] examples/ipsec-secgw: add support for security offload Akhil Goyal
2017-10-25 15:07         ` [PATCH v6 00/10] introduce security offload library Akhil Goyal
2017-10-25 15:07           ` [PATCH v6 01/10] cryptodev: support security APIs Akhil Goyal
2017-10-25 15:07           ` [PATCH v6 02/10] net: add ESP header to generic flow steering Akhil Goyal
2017-10-25 15:07           ` [PATCH v6 03/10] mbuf: add security crypto flags and mbuf fields Akhil Goyal
2017-10-25 15:07           ` [PATCH v6 04/10] ethdev: support security APIs Akhil Goyal
2017-10-25 15:07           ` [PATCH v6 05/10] ethdev: add rte flow action for crypto Akhil Goyal
2017-10-25 15:07           ` [PATCH v6 06/10] security: introduce security API and framework Akhil Goyal
2017-10-25 15:07           ` [PATCH v6 07/10] doc: add details of rte security Akhil Goyal
2017-10-25 15:07           ` Akhil Goyal [this message]
2017-10-26  7:09             ` [PATCH v6 08/10] net/ixgbe: enable inline ipsec David Marchand
2017-10-26  7:19               ` David Marchand
2017-11-01 19:58             ` Thomas Monjalon
2017-11-01 20:10               ` Ferruh Yigit
2017-10-25 15:07           ` [PATCH v6 09/10] crypto/dpaa2_sec: add support for protocol offload ipsec Akhil Goyal
2017-10-25 15:07           ` [PATCH v6 10/10] examples/ipsec-secgw: add support for security offload Akhil Goyal
2017-10-26  1:16           ` [PATCH v6 00/10] introduce security offload library Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20171025150727.30364-9-akhil.goyal@nxp.com \
    --to=akhil.goyal@nxp.com \
    --cc=aviadye@mellanox.com \
    --cc=borisp@mellanox.com \
    --cc=declan.doherty@intel.com \
    --cc=dev@dpdk.org \
    --cc=hemant.agrawal@nxp.com \
    --cc=jerin.jacob@caviumnetworks.com \
    --cc=john.mcnamara@intel.com \
    --cc=konstantin.ananyev@intel.com \
    --cc=olivier.matz@6wind.com \
    --cc=pablo.de.lara.guarch@intel.com \
    --cc=radu.nicolau@intel.com \
    --cc=sandeep.malik@nxp.com \
    --cc=shahafs@mellanox.com \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.