All of lore.kernel.org
 help / color / mirror / Atom feed
From: Akhil Goyal <akhil.goyal@nxp.com>
To: <dev@dpdk.org>
Cc: <thomas.monjalon@6wind.com>, <declan.doherty@intel.com>,
	<pablo.de.lara.guarch@intel.com>, <hemant.agrawal@nxp.com>,
	<john.mcnamara@intel.com>, <nhorman@tuxdriver.com>,
	Akhil Goyal <akhil.goyal@nxp.com>
Subject: [PATCH v2 06/11] crypto: Add DPAA2_SEC PMD for NXP DPAA2 platform
Date: Fri, 23 Dec 2016 01:46:55 +0530	[thread overview]
Message-ID: <20161222201700.20020-7-akhil.goyal@nxp.com> (raw)
In-Reply-To: <20161222201700.20020-1-akhil.goyal@nxp.com>

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/bus/fslmc/fslmc_vfio.c              |    3 +-
 drivers/bus/fslmc/rte_fslmc.h               |    5 +-
 drivers/common/dpaa2/dpio/dpaa2_hw_pvt.h    |   25 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 1592 +++++++++++++++++++++++++++
 drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h   |   70 ++
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |  368 +++++++
 6 files changed, 2061 insertions(+), 2 deletions(-)
 create mode 100644 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
 create mode 100644 drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
 create mode 100644 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h

diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c
index 44cf3d1..ca76584 100644
--- a/drivers/bus/fslmc/fslmc_vfio.c
+++ b/drivers/bus/fslmc/fslmc_vfio.c
@@ -315,7 +315,8 @@ dpaa2_compare_dpaa2_dev(const struct rte_dpaa2_device *dev,
 			 const struct rte_dpaa2_device *dev2)
 {
 	/*not the same family device */
-	if (dev->dev_type != dev2->dev_type)
+	if (dev->dev_type != DPAA2_MC_DPNI_DEVID ||
+			dev->dev_type != DPAA2_MC_DPSECI_DEVID)
 		return -1;
 
 	if (dev->object_id == dev2->object_id)
diff --git a/drivers/bus/fslmc/rte_fslmc.h b/drivers/bus/fslmc/rte_fslmc.h
index d891933..bc1525f 100644
--- a/drivers/bus/fslmc/rte_fslmc.h
+++ b/drivers/bus/fslmc/rte_fslmc.h
@@ -63,7 +63,10 @@ struct rte_dpaa2_driver;
 struct rte_dpaa2_device {
 	TAILQ_ENTRY(rte_dpaa2_device) next; /**< Next probed DPAA2 device. */
 	struct rte_device device;           /**< Inherit core device */
-	struct rte_eth_dev *eth_dev;        /**< ethernet device */
+	union {
+		struct rte_eth_dev *eth_dev;        /**< ethernet device */
+		struct rte_cryptodev *cryptodev;    /**< Crypto Device */
+	};
 	uint16_t dev_type;                  /**< Device Type */
 	uint16_t object_id;             /**< DPAA2 Object ID */
 	struct rte_intr_handle intr_handle; /**< Interrupt handle */
diff --git a/drivers/common/dpaa2/dpio/dpaa2_hw_pvt.h b/drivers/common/dpaa2/dpio/dpaa2_hw_pvt.h
index 6e28d1a..27a4538 100644
--- a/drivers/common/dpaa2/dpio/dpaa2_hw_pvt.h
+++ b/drivers/common/dpaa2/dpio/dpaa2_hw_pvt.h
@@ -137,8 +137,11 @@ struct qbman_fle {
 } while (0)
 #define DPAA2_SET_FD_LEN(fd, length)	(fd)->simple.len = length
 #define DPAA2_SET_FD_BPID(fd, bpid)	((fd)->simple.bpid_offset |= bpid)
+#define DPAA2_SET_FD_IVP(fd)   ((fd->simple.bpid_offset |= 0x00004000))
 #define DPAA2_SET_FD_OFFSET(fd, offset)	\
 	((fd->simple.bpid_offset |= (uint32_t)(offset) << 16))
+#define DPAA2_SET_FD_INTERNAL_JD(fd, len) fd->simple.frc = (0x80000000 | (len))
+#define DPAA2_SET_FD_FRC(fd, frc)	fd->simple.frc = frc
 #define DPAA2_RESET_FD_CTRL(fd)	(fd)->simple.ctrl = 0
 
 #define	DPAA2_SET_FD_ASAL(fd, asal)	((fd)->simple.ctrl |= (asal << 16))
@@ -146,12 +149,32 @@ struct qbman_fle {
 	fd->simple.flc_lo = lower_32_bits((uint64_t)(addr));	\
 	fd->simple.flc_hi = upper_32_bits((uint64_t)(addr));	\
 } while (0)
+#define DPAA2_SET_FLE_INTERNAL_JD(fle, len) (fle->frc = (0x80000000 | (len)))
+#define DPAA2_GET_FLE_ADDR(fle)					\
+	(uint64_t)((((uint64_t)(fle->addr_hi)) << 32) + fle->addr_lo)
+#define DPAA2_SET_FLE_ADDR(fle, addr) do { \
+	fle->addr_lo = lower_32_bits((uint64_t)addr);     \
+	fle->addr_hi = upper_32_bits((uint64_t)addr);	  \
+} while (0)
+#define DPAA2_SET_FLE_OFFSET(fle, offset) \
+	((fle)->fin_bpid_offset |= (uint32_t)(offset) << 16)
+#define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (uint64_t)bpid)
+#define DPAA2_GET_FLE_BPID(fle, bpid) (fle->fin_bpid_offset & 0x000000ff)
+#define DPAA2_SET_FLE_FIN(fle)	(fle->fin_bpid_offset |= (uint64_t)1 << 31)
+#define DPAA2_SET_FLE_IVP(fle)   (((fle)->fin_bpid_offset |= 0x00004000))
+#define DPAA2_SET_FD_COMPOUND_FMT(fd)	\
+	(fd->simple.bpid_offset |= (uint32_t)1 << 28)
 #define DPAA2_GET_FD_ADDR(fd)	\
 ((uint64_t)((((uint64_t)((fd)->simple.addr_hi)) << 32) + (fd)->simple.addr_lo))
 
 #define DPAA2_GET_FD_LEN(fd)	((fd)->simple.len)
 #define DPAA2_GET_FD_BPID(fd)	(((fd)->simple.bpid_offset & 0x00003FFF))
+#define DPAA2_GET_FD_IVP(fd)   ((fd->simple.bpid_offset & 0x00004000) >> 14)
 #define DPAA2_GET_FD_OFFSET(fd)	(((fd)->simple.bpid_offset & 0x0FFF0000) >> 16)
+#define DPAA2_SET_FLE_SG_EXT(fle) (fle->fin_bpid_offset |= (uint64_t)1 << 29)
+#define DPAA2_IS_SET_FLE_SG_EXT(fle)	\
+	((fle->fin_bpid_offset & ((uint64_t)1 << 29)) ? 1 : 0)
+
 #define DPAA2_INLINE_MBUF_FROM_BUF(buf, meta_data_size) \
 	((struct rte_mbuf *)((uint64_t)(buf) - (meta_data_size)))
 
@@ -206,6 +229,7 @@ static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
  */
 
 #define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_physaddr)
+#define DPAA2_OP_VADDR_TO_IOVA(op) (op->phys_addr)
 
 /**
  * macro to convert Virtual address to IOVA
@@ -226,6 +250,7 @@ static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
 #else	/* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
 
 #define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_addr)
+#define DPAA2_OP_VADDR_TO_IOVA(op) (op)
 #define DPAA2_VADDR_TO_IOVA(_vaddr) (_vaddr)
 #define DPAA2_IOVA_TO_VADDR(_iova) (_iova)
 #define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type)
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
new file mode 100644
index 0000000..6c9895f
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -0,0 +1,1592 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ *   Copyright (c) 2016 NXP. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of  Freescale Semiconductor, Inc nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <time.h>
+#include <net/if.h>
+#include <rte_mbuf.h>
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_common.h>
+
+#include <rte_fslmc.h>
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_dpio.h>
+#include <dpaa2_hw_mempool.h>
+#include <mc/fsl_dpseci.h>
+
+#include "dpaa2_sec_priv.h"
+#include "dpaa2_sec_logs.h"
+
+/* RTA header files */
+#include <hw/desc/ipsec.h>
+#include <hw/desc/algo.h>
+
+/* Minimum job descriptor consists of a oneword job descriptor HEADER and
+ * a pointer to the shared descriptor
+ */
+#define MIN_JOB_DESC_SIZE	(CAAM_CMD_SZ + CAAM_PTR_SZ)
+#define FSL_VENDOR_ID           0x1957
+#define FSL_DEVICE_ID           0x410
+#define FSL_SUBSYSTEM_SEC       1
+#define FSL_MC_DPSECI_DEVID     3
+
+#define NO_PREFETCH 0
+#define TDES_CBC_IV_LEN 8
+#define AES_CBC_IV_LEN 16
+enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
+extern struct dpaa2_bp_info bpid_info[MAX_BPID];
+
+static inline void print_fd(const struct qbman_fd *fd)
+{
+	printf("addr_lo:          %u\n", fd->simple.addr_lo);
+	printf("addr_hi:          %u\n", fd->simple.addr_hi);
+	printf("len:              %u\n", fd->simple.len);
+	printf("bpid:             %u\n", DPAA2_GET_FD_BPID(fd));
+	printf("fi_bpid_off:      %u\n", fd->simple.bpid_offset);
+	printf("frc:              %u\n", fd->simple.frc);
+	printf("ctrl:             %u\n", fd->simple.ctrl);
+	printf("flc_lo:           %u\n", fd->simple.flc_lo);
+	printf("flc_hi:           %u\n\n", fd->simple.flc_hi);
+}
+
+static inline void print_fle(const struct qbman_fle *fle)
+{
+	printf("addr_lo:          %u\n", fle->addr_lo);
+	printf("addr_hi:          %u\n", fle->addr_hi);
+	printf("len:              %u\n", fle->length);
+	printf("fi_bpid_off:      %u\n", fle->fin_bpid_offset);
+	printf("frc:              %u\n", fle->frc);
+}
+
+static inline int build_authenc_fd(dpaa2_sec_session *sess,
+				   struct rte_crypto_op *op,
+		struct qbman_fd *fd, uint16_t bpid)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge;
+	struct sec_flow_context *flc;
+	uint32_t auth_only_len = sym_op->auth.data.length -
+				sym_op->cipher.data.length;
+	int icv_len = sym_op->auth.digest.length;
+	uint8_t *old_icv;
+	uint32_t mem_len = (7 * sizeof(struct qbman_fle)) + icv_len;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* TODO we are using the first FLE entry to store Mbuf.
+	 * Currently we donot know which FLE has the mbuf stored.
+	 * So while retreiving we can go back 1 FLE from the FD -ADDR
+	 * to get the MBUF Addr from the previous FLE.
+	 * We can have a better approach to use the inline Mbuf
+	 */
+	/* TODO - we can use some mempool to avoid malloc here */
+	fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
+	if (!fle) {
+		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+		return -1;
+	}
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	fle = fle + 1;
+	sge = fle + 2;
+	if (likely(bpid < MAX_BPID)) {
+		DPAA2_SET_FD_BPID(fd, bpid);
+		DPAA2_SET_FLE_BPID(fle, bpid);
+		DPAA2_SET_FLE_BPID(fle + 1, bpid);
+		DPAA2_SET_FLE_BPID(sge, bpid);
+		DPAA2_SET_FLE_BPID(sge + 1, bpid);
+		DPAA2_SET_FLE_BPID(sge + 2, bpid);
+		DPAA2_SET_FLE_BPID(sge + 3, bpid);
+	} else {
+		DPAA2_SET_FD_IVP(fd);
+		DPAA2_SET_FLE_IVP(fle);
+		DPAA2_SET_FLE_IVP((fle + 1));
+		DPAA2_SET_FLE_IVP(sge);
+		DPAA2_SET_FLE_IVP((sge + 1));
+		DPAA2_SET_FLE_IVP((sge + 2));
+		DPAA2_SET_FLE_IVP((sge + 3));
+	}
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
+		   "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
+		   sym_op->auth.data.offset,
+		   sym_op->auth.data.length,
+		   sym_op->auth.digest.length,
+		   sym_op->cipher.data.offset,
+		   sym_op->cipher.data.length,
+		   sym_op->cipher.iv.length,
+		   sym_op->m_src->data_off);
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+	if (auth_only_len)
+		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+	fle->length = (sess->dir == DIR_ENC) ?
+			(sym_op->cipher.data.length + icv_len) :
+			sym_op->cipher.data.length;
+
+	DPAA2_SET_FLE_SG_EXT(fle);
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
+				sym_op->m_src->data_off);
+	sge->length = sym_op->cipher.data.length;
+
+	if (sess->dir == DIR_ENC) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge,
+				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+		sge->length = sym_op->auth.digest.length;
+		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
+					sym_op->cipher.iv.length));
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	sge++;
+	fle++;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(fle);
+	DPAA2_SET_FLE_FIN(fle);
+	fle->length = (sess->dir == DIR_ENC) ?
+			(sym_op->auth.data.length + sym_op->cipher.iv.length) :
+			(sym_op->auth.data.length + sym_op->cipher.iv.length +
+			 sym_op->auth.digest.length);
+
+	/* Configure Input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(sym_op->cipher.iv.data));
+	sge->length = sym_op->cipher.iv.length;
+	sge++;
+
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
+				sym_op->m_src->data_off);
+	sge->length = sym_op->auth.data.length;
+	if (sess->dir == DIR_DEC) {
+		sge++;
+		old_icv = (uint8_t *)(sge + 1);
+		memcpy(old_icv,	sym_op->auth.digest.data,
+		       sym_op->auth.digest.length);
+		memset(sym_op->auth.digest.data, 0, sym_op->auth.digest.length);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+		sge->length = sym_op->auth.digest.length;
+		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
+				 sym_op->auth.digest.length +
+				 sym_op->cipher.iv.length));
+	}
+	DPAA2_SET_FLE_FIN(sge);
+	if (auth_only_len) {
+		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+	}
+	return 0;
+}
+
+static inline int build_auth_fd(
+		dpaa2_sec_session *sess,
+		struct rte_crypto_op *op,
+		struct qbman_fd *fd,
+		uint16_t bpid)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct qbman_fle *fle, *sge;
+	uint32_t mem_len = (sess->dir == DIR_ENC) ?
+			   (3 * sizeof(struct qbman_fle)) :
+			   (5 * sizeof(struct qbman_fle) +
+			    sym_op->auth.digest.length);
+	struct sec_flow_context *flc;
+	struct ctxt_priv *priv = sess->ctxt;
+	uint8_t *old_digest;
+
+	PMD_INIT_FUNC_TRACE();
+
+	fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
+	if (!fle) {
+		RTE_LOG(ERR, PMD, "Memory alloc failed for FLE\n");
+		return -1;
+	}
+	/* TODO we are using the first FLE entry to store Mbuf.
+	 * Currently we donot know which FLE has the mbuf stored.
+	 * So while retreiving we can go back 1 FLE from the FD -ADDR
+	 * to get the MBUF Addr from the previous FLE.
+	 * We can have a better approach to use the inline Mbuf
+	 */
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	fle = fle + 1;
+
+	if (likely(bpid < MAX_BPID)) {
+		DPAA2_SET_FD_BPID(fd, bpid);
+		DPAA2_SET_FLE_BPID(fle, bpid);
+		DPAA2_SET_FLE_BPID(fle + 1, bpid);
+	} else {
+		DPAA2_SET_FD_IVP(fd);
+		DPAA2_SET_FLE_IVP(fle);
+		DPAA2_SET_FLE_IVP((fle + 1));
+	}
+	flc = &priv->flc_desc[DESC_INITFINAL].flc;
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+	fle->length = sym_op->auth.digest.length;
+
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	fle++;
+
+	if (sess->dir == DIR_ENC) {
+		DPAA2_SET_FLE_ADDR(fle,
+				   DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+		DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
+				     sym_op->m_src->data_off);
+		DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
+		fle->length = sym_op->auth.data.length;
+	} else {
+		sge = fle + 2;
+		DPAA2_SET_FLE_SG_EXT(fle);
+		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+
+		if (likely(bpid < MAX_BPID)) {
+			DPAA2_SET_FLE_BPID(sge, bpid);
+			DPAA2_SET_FLE_BPID(sge + 1, bpid);
+		} else {
+			DPAA2_SET_FLE_IVP(sge);
+			DPAA2_SET_FLE_IVP((sge + 1));
+		}
+		DPAA2_SET_FLE_ADDR(sge,
+				   DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+		DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
+				     sym_op->m_src->data_off);
+
+		DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
+				 sym_op->auth.digest.length);
+		sge->length = sym_op->auth.data.length;
+		sge++;
+		old_digest = (uint8_t *)(sge + 1);
+		rte_memcpy(old_digest, sym_op->auth.digest.data,
+			   sym_op->auth.digest.length);
+		memset(sym_op->auth.digest.data, 0, sym_op->auth.digest.length);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
+		sge->length = sym_op->auth.digest.length;
+		fle->length = sym_op->auth.data.length +
+				sym_op->auth.digest.length;
+		DPAA2_SET_FLE_FIN(sge);
+	}
+	DPAA2_SET_FLE_FIN(fle);
+
+	return 0;
+}
+
+static int build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+			   struct qbman_fd *fd, uint16_t bpid)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct qbman_fle *fle, *sge;
+	uint32_t mem_len = (5 * sizeof(struct qbman_fle));
+	struct sec_flow_context *flc;
+	struct ctxt_priv *priv = sess->ctxt;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* todo - we can use some mempool to avoid malloc here */
+	fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
+	if (!fle) {
+		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+		return -1;
+	}
+	/* TODO we are using the first FLE entry to store Mbuf.
+	 * Currently we donot know which FLE has the mbuf stored.
+	 * So while retreiving we can go back 1 FLE from the FD -ADDR
+	 * to get the MBUF Addr from the previous FLE.
+	 * We can have a better approach to use the inline Mbuf
+	 */
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	fle = fle + 1;
+	sge = fle + 2;
+
+	if (likely(bpid < MAX_BPID)) {
+		DPAA2_SET_FD_BPID(fd, bpid);
+		DPAA2_SET_FLE_BPID(fle, bpid);
+		DPAA2_SET_FLE_BPID(fle + 1, bpid);
+		DPAA2_SET_FLE_BPID(sge, bpid);
+		DPAA2_SET_FLE_BPID(sge + 1, bpid);
+	} else {
+		DPAA2_SET_FD_IVP(fd);
+		DPAA2_SET_FLE_IVP(fle);
+		DPAA2_SET_FLE_IVP((fle + 1));
+		DPAA2_SET_FLE_IVP(sge);
+		DPAA2_SET_FLE_IVP((sge + 1));
+	}
+
+	flc = &priv->flc_desc[0].flc;
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+	DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
+			 sym_op->cipher.iv.length);
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
+		   sym_op->cipher.data.offset,
+		   sym_op->cipher.data.length,
+		   sym_op->cipher.iv.length,
+		   sym_op->m_src->data_off);
+
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+	DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
+			     sym_op->m_src->data_off);
+
+	fle->length = sym_op->cipher.data.length + sym_op->cipher.iv.length;
+
+	PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
+		   flc, fle, fle->addr_hi, fle->addr_lo, fle->length);
+
+	fle++;
+
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+	fle->length = sym_op->cipher.data.length + sym_op->cipher.iv.length;
+
+	DPAA2_SET_FLE_SG_EXT(fle);
+
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(sym_op->cipher.iv.data));
+	sge->length = sym_op->cipher.iv.length;
+
+	sge++;
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
+			     sym_op->m_src->data_off);
+
+	sge->length = sym_op->cipher.data.length;
+	DPAA2_SET_FLE_FIN(sge);
+	DPAA2_SET_FLE_FIN(fle);
+
+	PMD_TX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
+		   (void *)DPAA2_GET_FD_ADDR(fd),
+		   DPAA2_GET_FD_BPID(fd),
+		   bpid_info[bpid].meta_data_size,
+		   DPAA2_GET_FD_OFFSET(fd),
+		   DPAA2_GET_FD_LEN(fd));
+
+	return 0;
+}
+
+static inline int
+build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+	     struct qbman_fd *fd, uint16_t bpid)
+{
+	int ret = -1;
+
+	PMD_INIT_FUNC_TRACE();
+
+	switch (sess->ctxt_type) {
+	case DPAA2_SEC_CIPHER:
+		ret = build_cipher_fd(sess, op, fd, bpid);
+		break;
+	case DPAA2_SEC_AUTH:
+		ret = build_auth_fd(sess, op, fd, bpid);
+		break;
+	case DPAA2_SEC_CIPHER_HASH:
+		ret = build_authenc_fd(sess, op, fd, bpid);
+		break;
+	case DPAA2_SEC_HASH_CIPHER:
+	default:
+		RTE_LOG(ERR, PMD, "error: Unsupported session\n");
+	}
+	return ret;
+}
+
+static uint16_t
+dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+			uint16_t nb_ops)
+{
+	/* Function to transmit the frames to given device and VQ*/
+	uint32_t loop;
+	int32_t ret;
+	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+	uint32_t frames_to_send;
+	struct qbman_eq_desc eqdesc;
+	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
+	struct qbman_swp *swp;
+	uint16_t num_tx = 0;
+	/*todo - need to support multiple buffer pools */
+	uint16_t bpid;
+	struct rte_mempool *mb_pool;
+	dpaa2_sec_session *sess;
+
+	if (unlikely(nb_ops == 0))
+		return 0;
+
+	if (ops[0]->sym->sess_type != RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+		RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n");
+		return 0;
+	}
+	/*Prepare enqueue descriptor*/
+	qbman_eq_desc_clear(&eqdesc);
+	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
+	qbman_eq_desc_set_response(&eqdesc, 0, 0);
+	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
+
+	if (!DPAA2_PER_LCORE_SEC_DPIO) {
+		ret = dpaa2_affine_qbman_swp_sec();
+		if (ret) {
+			RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+			return 0;
+		}
+	}
+	swp = DPAA2_PER_LCORE_SEC_PORTAL;
+
+	while (nb_ops) {
+		frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
+
+		for (loop = 0; loop < frames_to_send; loop++) {
+			/*Clear the unused FD fields before sending*/
+			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+			sess = (dpaa2_sec_session *)
+				(*ops)->sym->session->_private;
+			mb_pool = (*ops)->sym->m_src->pool;
+			bpid = mempool_to_bpid(mb_pool);
+			ret = build_sec_fd(sess, *ops, &fd_arr[loop], bpid);
+			if (ret) {
+				PMD_DRV_LOG(ERR, "error: Improper packet"
+					    " contents for crypto operation\n");
+				goto skip_tx;
+			}
+			ops++;
+		}
+		loop = 0;
+		while (loop < frames_to_send) {
+			loop += qbman_swp_send_multiple(swp, &eqdesc,
+							&fd_arr[loop],
+							frames_to_send - loop);
+		}
+
+		num_tx += frames_to_send;
+		nb_ops -= frames_to_send;
+	}
+skip_tx:
+	dpaa2_qp->tx_vq.tx_pkts += num_tx;
+	dpaa2_qp->tx_vq.err_pkts += nb_ops;
+	return num_tx;
+}
+
+static inline
+struct rte_crypto_op *sec_fd_to_mbuf(
+	const struct qbman_fd *fd)
+{
+	struct qbman_fle *fle;
+	struct rte_crypto_op *op;
+
+	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+
+	PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x",
+		   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
+
+	/* TODO we are using the first FLE entry to store Mbuf.
+	 * Currently we donot know which FLE has the mbuf stored.
+	 * So while retreiving we can go back 1 FLE from the FD -ADDR
+	 * to get the MBUF Addr from the previous FLE.
+	 * We can have a better approach to use the inline Mbuf
+	 */
+
+	if (unlikely(DPAA2_GET_FD_IVP(fd))) {
+		/* TODO complete it. */
+		RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?");
+		return NULL;
+	}
+	op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
+			DPAA2_GET_FLE_ADDR((fle - 1)));
+
+	/* Prefeth op */
+	rte_prefetch0(op->sym->m_src);
+
+	PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p",
+		   (void *)op->sym->m_src, op->sym->m_src->buf_addr);
+
+	PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
+		   (void *)DPAA2_GET_FD_ADDR(fd),
+		   DPAA2_GET_FD_BPID(fd),
+		   bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+		   DPAA2_GET_FD_OFFSET(fd),
+		   DPAA2_GET_FD_LEN(fd));
+
+	/* free the fle memory */
+	rte_free(fle - 1);
+
+	return op;
+}
+
+static uint16_t
+dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+			uint16_t nb_ops)
+{
+	/* Function is responsible to receive frames for a given device and VQ*/
+	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
+	struct qbman_result *dq_storage;
+	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
+	int ret, num_rx = 0;
+	uint8_t is_last = 0, status;
+	struct qbman_swp *swp;
+	const struct qbman_fd *fd;
+	struct qbman_pull_desc pulldesc;
+
+	if (!DPAA2_PER_LCORE_SEC_DPIO) {
+		ret = dpaa2_affine_qbman_swp_sec();
+		if (ret) {
+			RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+			return 0;
+		}
+	}
+	swp = DPAA2_PER_LCORE_SEC_PORTAL;
+	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+
+	qbman_pull_desc_clear(&pulldesc);
+	qbman_pull_desc_set_numframes(&pulldesc,
+				      (nb_ops > DPAA2_DQRR_RING_SIZE) ?
+				      DPAA2_DQRR_RING_SIZE : nb_ops);
+	qbman_pull_desc_set_fq(&pulldesc, fqid);
+	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+				    (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
+				    1);
+
+	/*Issue a volatile dequeue command. */
+	while (1) {
+		if (qbman_swp_pull(swp, &pulldesc)) {
+			RTE_LOG(WARNING, PMD, "SEC VDQ command is not issued."
+				"QBMAN is busy\n");
+			/* Portal was busy, try again */
+			continue;
+		}
+		break;
+	};
+
+	/* Receive the packets till Last Dequeue entry is found with
+	 * respect to the above issues PULL command.
+	 */
+	while (!is_last) {
+		/* Check if the previous issued command is completed.
+		 * Also seems like the SWP is shared between the Ethernet Driver
+		 * and the SEC driver.
+		 */
+		while (!qbman_check_command_complete(swp, dq_storage))
+			;
+
+		/* Loop until the dq_storage is updated with
+		 * new token by QBMAN
+		 */
+		while (!qbman_result_has_new_result(swp, dq_storage))
+			;
+		/* Check whether Last Pull command is Expired and
+		 * setting Condition for Loop termination
+		 */
+		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+			is_last = 1;
+			/* Check for valid frame. */
+			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
+			if ((status & QBMAN_DQ_STAT_VALIDFRAME) == 0) {
+				PMD_RX_LOG(DEBUG, "No frame is delivered");
+				continue;
+			}
+		}
+
+		fd = qbman_result_DQ_fd(dq_storage);
+		ops[num_rx] = sec_fd_to_mbuf(fd);
+
+		if (unlikely(fd->simple.frc)) {
+			/* TODO Parse SEC errors */
+			RTE_LOG(ERR, PMD, "SEC returned Error - %x\n",
+					fd->simple.frc);
+			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		} else {
+			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+		}
+
+		num_rx++;
+		dq_storage++;
+	} /* End of Packet Rx loop */
+
+	dpaa2_qp->rx_vq.rx_pkts += num_rx;
+
+	PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx);
+	/*Return the total number of packets received to DPAA2 app*/
+	return num_rx;
+}
+/** Release queue pair */
+static int
+dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+	struct dpaa2_sec_qp *qp =
+		(struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (qp->rx_vq.q_storage) {
+		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
+		rte_free(qp->rx_vq.q_storage);
+	}
+	rte_free(qp);
+
+	dev->data->queue_pairs[queue_pair_id] = NULL;
+
+	return 0;
+}
+
+/** Setup a queue pair */
+static int
+dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
+		__rte_unused int socket_id)
+{
+	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+	struct dpaa2_sec_qp *qp;
+	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+	struct dpseci_rx_queue_cfg cfg;
+	int32_t retcode;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* If qp is already in use free ring memory and qp metadata. */
+	if (dev->data->queue_pairs[qp_id] != NULL) {
+		PMD_DRV_LOG(INFO, "QP already setup");
+		return 0;
+	}
+
+	PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
+		    dev, qp_id, qp_conf);
+
+	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
+
+	qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
+			RTE_CACHE_LINE_SIZE);
+	if (!qp) {
+		RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n");
+		return -1;
+	}
+
+	qp->rx_vq.dev = dev;
+	qp->tx_vq.dev = dev;
+	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
+		sizeof(struct queue_storage_info_t),
+		RTE_CACHE_LINE_SIZE);
+	if (!qp->rx_vq.q_storage) {
+		RTE_LOG(ERR, PMD, "malloc failed for q_storage\n");
+		return -1;
+	}
+	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
+
+	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
+		RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n");
+		return -1;
+	}
+
+	dev->data->queue_pairs[qp_id] = qp;
+
+	cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
+	cfg.user_ctx = (uint64_t)(&qp->rx_vq);
+	retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
+				      qp_id, &cfg);
+	return retcode;
+}
+
+/** Start queue pair */
+static int
+dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
+			   __rte_unused uint16_t queue_pair_id)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	return 0;
+}
+
+/** Stop queue pair */
+static int
+dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
+			  __rte_unused uint16_t queue_pair_id)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	return 0;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni gcm session structure */
+static unsigned int
+dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	return sizeof(dpaa2_sec_session);
+}
+
+static void
+dpaa2_sec_session_initialize(struct rte_mempool *mp __rte_unused,
+			     void *sess __rte_unused)
+{
+	PMD_INIT_FUNC_TRACE();
+}
+
+static int dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
+				 struct rte_crypto_sym_xform *xform,
+		dpaa2_sec_session *session)
+{
+	struct dpaa2_sec_cipher_ctxt *ctxt = &session->ext_params.cipher_ctxt;
+	struct alginfo cipherdata;
+	unsigned int bufsize, i;
+	struct ctxt_priv *priv;
+	struct sec_flow_context *flc;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* For SEC CIPHER only one descriptor is required. */
+	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+			RTE_CACHE_LINE_SIZE);
+	if (priv == NULL) {
+		RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+		return -1;
+	}
+
+	flc = &priv->flc_desc[0].flc;
+
+	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
+			RTE_CACHE_LINE_SIZE);
+	if (session->cipher_key.data == NULL) {
+		RTE_LOG(ERR, PMD, "No Memory for cipher key");
+		return -1;
+	}
+	session->cipher_key.length = xform->cipher.key.length;
+
+	memcpy(session->cipher_key.data, xform->cipher.key.data,
+	       xform->cipher.key.length);
+	cipherdata.key = (uint64_t)session->cipher_key.data;
+	cipherdata.keylen = session->cipher_key.length;
+	cipherdata.key_enc_flags = 0;
+	cipherdata.key_type = RTA_DATA_IMM;
+
+	switch (xform->cipher.algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		cipherdata.algtype = OP_ALG_ALGSEL_AES;
+		cipherdata.algmode = OP_ALG_AAI_CBC;
+		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+		ctxt->iv.length = AES_CBC_IV_LEN;
+		break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
+		cipherdata.algmode = OP_ALG_AAI_CBC;
+		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+		ctxt->iv.length = TDES_CBC_IV_LEN;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+	case RTE_CRYPTO_CIPHER_3DES_CTR:
+	case RTE_CRYPTO_CIPHER_AES_GCM:
+	case RTE_CRYPTO_CIPHER_AES_CCM:
+	case RTE_CRYPTO_CIPHER_AES_ECB:
+	case RTE_CRYPTO_CIPHER_3DES_ECB:
+	case RTE_CRYPTO_CIPHER_AES_XTS:
+	case RTE_CRYPTO_CIPHER_AES_F8:
+	case RTE_CRYPTO_CIPHER_ARC4:
+	case RTE_CRYPTO_CIPHER_KASUMI_F8:
+	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+	case RTE_CRYPTO_CIPHER_NULL:
+		RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
+			xform->cipher.algo);
+		goto error_out;
+	default:
+		RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+			xform->cipher.algo);
+		goto error_out;
+	}
+	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+				DIR_ENC : DIR_DEC;
+
+	bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
+					&cipherdata, NULL, ctxt->iv.length,
+			session->dir);
+	flc->dhr = 0;
+	flc->bpv0 = 0x1;
+	flc->mode_bits = 0x8000;
+
+	flc->word1_sdl = (uint8_t)bufsize;
+	flc->word2_rflc_31_0 = lower_32_bits(
+			(uint64_t)&(((struct dpaa2_sec_qp *)
+			dev->data->queue_pairs[0])->rx_vq));
+	flc->word3_rflc_63_32 = upper_32_bits(
+			(uint64_t)&(((struct dpaa2_sec_qp *)
+			dev->data->queue_pairs[0])->rx_vq));
+	session->ctxt = priv;
+
+	for (i = 0; i < bufsize; i++)
+		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+			    i, priv->flc_desc[0].desc[i]);
+
+	return 0;
+
+error_out:
+	rte_free(session->cipher_key.data);
+	return -1;
+}
+
+static int dpaa2_sec_auth_init(struct rte_cryptodev *dev,
+			       struct rte_crypto_sym_xform *xform,
+		dpaa2_sec_session *session)
+{
+	struct dpaa2_sec_auth_ctxt *ctxt = &session->ext_params.auth_ctxt;
+	struct alginfo authdata;
+	unsigned int bufsize;
+	struct ctxt_priv *priv;
+	struct sec_flow_context *flc;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* For SEC AUTH three descriptors are required for various stages */
+	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+			sizeof(struct ctxt_priv) + 3 *
+			sizeof(struct sec_flc_desc),
+			RTE_CACHE_LINE_SIZE);
+	if (priv == NULL) {
+		RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+		return -1;
+	}
+
+	flc = &priv->flc_desc[DESC_INITFINAL].flc;
+
+	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
+			RTE_CACHE_LINE_SIZE);
+	session->auth_key.length = xform->auth.key.length;
+
+	memcpy(session->auth_key.data, xform->auth.key.data,
+	       xform->auth.key.length);
+	authdata.key = (uint64_t)session->auth_key.data;
+	authdata.keylen = session->auth_key.length;
+	authdata.key_enc_flags = 0;
+	authdata.key_type = RTA_DATA_IMM;
+
+	switch (xform->auth.algo) {
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_SHA1;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_MD5;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_SHA256;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_SHA384;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_SHA512;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_SHA224;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+	case RTE_CRYPTO_AUTH_AES_GCM:
+	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+	case RTE_CRYPTO_AUTH_NULL:
+	case RTE_CRYPTO_AUTH_SHA1:
+	case RTE_CRYPTO_AUTH_SHA256:
+	case RTE_CRYPTO_AUTH_SHA512:
+	case RTE_CRYPTO_AUTH_SHA224:
+	case RTE_CRYPTO_AUTH_SHA384:
+	case RTE_CRYPTO_AUTH_MD5:
+	case RTE_CRYPTO_AUTH_AES_CCM:
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+	case RTE_CRYPTO_AUTH_KASUMI_F9:
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+	case RTE_CRYPTO_AUTH_ZUC_EIA3:
+		RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
+			xform->auth.algo);
+		goto error_out;
+	default:
+		RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+			xform->auth.algo);
+		goto error_out;
+	}
+	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
+				DIR_ENC : DIR_DEC;
+
+	bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
+				   1, 0, &authdata, !session->dir,
+				   ctxt->trunc_len);
+
+	flc->word1_sdl = (uint8_t)bufsize;
+	flc->word2_rflc_31_0 = lower_32_bits(
+			(uint64_t)&(((struct dpaa2_sec_qp *)
+			dev->data->queue_pairs[0])->rx_vq));
+	flc->word3_rflc_63_32 = upper_32_bits(
+			(uint64_t)&(((struct dpaa2_sec_qp *)
+			dev->data->queue_pairs[0])->rx_vq));
+	session->ctxt = priv;
+
+	return 0;
+
+error_out:
+	rte_free(session->auth_key.data);
+	return -1;
+}
+
+static int dpaa2_sec_aead_init(struct rte_cryptodev *dev,
+			       struct rte_crypto_sym_xform *xform,
+		dpaa2_sec_session *session)
+{
+	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+	struct alginfo authdata, cipherdata;
+	unsigned int bufsize;
+	struct ctxt_priv *priv;
+	struct sec_flow_context *flc;
+	struct rte_crypto_cipher_xform *cipher_xform;
+	struct rte_crypto_auth_xform *auth_xform;
+	int err;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (session->ext_params.aead_ctxt.auth_cipher_text) {
+		cipher_xform = &xform->cipher;
+		auth_xform = &xform->next->auth;
+		session->ctxt_type =
+			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+			DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
+	} else {
+		cipher_xform = &xform->next->cipher;
+		auth_xform = &xform->auth;
+		session->ctxt_type =
+			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+			DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
+	}
+	/* For SEC AEAD only one descriptor is required */
+	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+			RTE_CACHE_LINE_SIZE);
+	if (priv == NULL) {
+		RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+		return -1;
+	}
+
+	flc = &priv->flc_desc[0].flc;
+
+	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
+					       RTE_CACHE_LINE_SIZE);
+	if (session->cipher_key.data == NULL) {
+		RTE_LOG(ERR, PMD, "No Memory for cipher key");
+		return -1;
+	}
+	session->cipher_key.length = cipher_xform->key.length;
+	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
+					     RTE_CACHE_LINE_SIZE);
+	if (session->auth_key.data == NULL) {
+		RTE_LOG(ERR, PMD, "No Memory for auth key");
+		goto error_out;
+	}
+	session->auth_key.length = auth_xform->key.length;
+	memcpy(session->cipher_key.data, cipher_xform->key.data,
+	       cipher_xform->key.length);
+	memcpy(session->auth_key.data, auth_xform->key.data,
+	       auth_xform->key.length);
+
+	ctxt->trunc_len = auth_xform->digest_length;
+	authdata.key = (uint64_t)session->auth_key.data;
+	authdata.keylen = session->auth_key.length;
+	authdata.key_enc_flags = 0;
+	authdata.key_type = RTA_DATA_IMM;
+
+	switch (auth_xform->algo) {
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_SHA1;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_MD5;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_SHA224;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_SHA256;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_SHA384;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_SHA512;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+	case RTE_CRYPTO_AUTH_AES_GCM:
+	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+	case RTE_CRYPTO_AUTH_NULL:
+	case RTE_CRYPTO_AUTH_SHA1:
+	case RTE_CRYPTO_AUTH_SHA256:
+	case RTE_CRYPTO_AUTH_SHA512:
+	case RTE_CRYPTO_AUTH_SHA224:
+	case RTE_CRYPTO_AUTH_SHA384:
+	case RTE_CRYPTO_AUTH_MD5:
+	case RTE_CRYPTO_AUTH_AES_CCM:
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+	case RTE_CRYPTO_AUTH_KASUMI_F9:
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+	case RTE_CRYPTO_AUTH_ZUC_EIA3:
+		RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
+			auth_xform->algo);
+		goto error_out;
+	default:
+		RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+			auth_xform->algo);
+		goto error_out;
+	}
+	cipherdata.key = (uint64_t)session->cipher_key.data;
+	cipherdata.keylen = session->cipher_key.length;
+	cipherdata.key_enc_flags = 0;
+	cipherdata.key_type = RTA_DATA_IMM;
+
+	switch (cipher_xform->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		cipherdata.algtype = OP_ALG_ALGSEL_AES;
+		cipherdata.algmode = OP_ALG_AAI_CBC;
+		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+		ctxt->iv.length = AES_CBC_IV_LEN;
+		break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
+		cipherdata.algmode = OP_ALG_AAI_CBC;
+		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+		ctxt->iv.length = TDES_CBC_IV_LEN;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_GCM:
+	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+	case RTE_CRYPTO_CIPHER_NULL:
+	case RTE_CRYPTO_CIPHER_3DES_ECB:
+	case RTE_CRYPTO_CIPHER_AES_ECB:
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+	case RTE_CRYPTO_CIPHER_AES_CCM:
+	case RTE_CRYPTO_CIPHER_KASUMI_F8:
+		RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
+			cipher_xform->algo);
+		goto error_out;
+	default:
+		RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+			cipher_xform->algo);
+		goto error_out;
+	}
+	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+				DIR_ENC : DIR_DEC;
+
+	priv->flc_desc[0].desc[0] = cipherdata.keylen;
+	priv->flc_desc[0].desc[1] = authdata.keylen;
+	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+			MIN_JOB_DESC_SIZE,
+			(unsigned int *)priv->flc_desc[0].desc,
+			&priv->flc_desc[0].desc[2], 2);
+
+	if (err < 0) {
+		PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths");
+		goto error_out;
+	}
+	if (priv->flc_desc[0].desc[2] & 1)
+		cipherdata.key_type = RTA_DATA_IMM;
+	else {
+		cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
+		cipherdata.key_type = RTA_DATA_PTR;
+	}
+	if (priv->flc_desc[0].desc[2] & (1<<1))
+		authdata.key_type = RTA_DATA_IMM;
+	else {
+		authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
+		authdata.key_type = RTA_DATA_PTR;
+	}
+	priv->flc_desc[0].desc[0] = 0;
+	priv->flc_desc[0].desc[1] = 0;
+	priv->flc_desc[0].desc[2] = 0;
+
+	if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
+		bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
+					      0, &cipherdata, &authdata,
+					      ctxt->iv.length,
+					      ctxt->auth_only_len,
+					      ctxt->trunc_len,
+					      session->dir);
+	} else {
+		RTE_LOG(ERR, PMD, "Hash before cipher not supported");
+		goto error_out;
+	}
+
+	flc->word1_sdl = (uint8_t)bufsize;
+	flc->word2_rflc_31_0 = lower_32_bits(
+			(uint64_t)&(((struct dpaa2_sec_qp *)
+			dev->data->queue_pairs[0])->rx_vq));
+	flc->word3_rflc_63_32 = upper_32_bits(
+			(uint64_t)&(((struct dpaa2_sec_qp *)
+			dev->data->queue_pairs[0])->rx_vq));
+	session->ctxt = priv;
+
+	return 0;
+
+error_out:
+	rte_free(session->cipher_key.data);
+	rte_free(session->auth_key.data);
+	return -1;
+}
+
+static void *
+dpaa2_sec_session_configure(struct rte_cryptodev *dev,
+			    struct rte_crypto_sym_xform *xform,	void *sess)
+{
+	dpaa2_sec_session *session = sess;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (unlikely(sess == NULL)) {
+		RTE_LOG(ERR, PMD, "invalid session struct");
+		return NULL;
+	}
+	/* Cipher Only */
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
+		session->ctxt_type = DPAA2_SEC_CIPHER;
+		dpaa2_sec_cipher_init(dev, xform, session);
+
+	/* Authentication Only */
+	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+		   xform->next == NULL) {
+		session->ctxt_type = DPAA2_SEC_AUTH;
+		dpaa2_sec_auth_init(dev, xform, session);
+
+	/* Cipher then Authenticate */
+	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		session->ext_params.aead_ctxt.auth_cipher_text = true;
+		dpaa2_sec_aead_init(dev, xform, session);
+
+	/* Authenticate then Cipher */
+	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		session->ext_params.aead_ctxt.auth_cipher_text = false;
+		dpaa2_sec_aead_init(dev, xform, session);
+	} else {
+		RTE_LOG(ERR, PMD, "Invalid crypto type");
+		return NULL;
+	}
+
+	return session;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+dpaa2_sec_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	if (sess)
+		memset(sess, 0, sizeof(dpaa2_sec_session));
+}
+
+static int
+dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	return -ENOTSUP;
+}
+
+static int
+dpaa2_sec_dev_start(struct rte_cryptodev *dev)
+{
+	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+	struct dpseci_attr attr;
+	struct dpaa2_queue *dpaa2_q;
+	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
+					dev->data->queue_pairs;
+	struct dpseci_rx_queue_attr rx_attr;
+	struct dpseci_tx_queue_attr tx_attr;
+	int ret, i;
+
+	PMD_INIT_FUNC_TRACE();
+
+	memset(&attr, 0, sizeof(struct dpseci_attr));
+
+	ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n",
+			     priv->hw_id);
+		goto get_attr_failure;
+	}
+	ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
+	if (ret) {
+		PMD_INIT_LOG(ERR,
+			     "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n");
+		goto get_attr_failure;
+	}
+	for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
+		dpaa2_q = &qp[i]->rx_vq;
+		dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
+				    &rx_attr);
+		dpaa2_q->fqid = rx_attr.fqid;
+		PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid);
+	}
+	for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
+		dpaa2_q = &qp[i]->tx_vq;
+		dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
+				    &tx_attr);
+		dpaa2_q->fqid = tx_attr.fqid;
+		PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid);
+	}
+
+	return 0;
+get_attr_failure:
+	dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
+	return -1;
+}
+
+static void
+dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
+{
+	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+	int ret;
+
+	PMD_INIT_FUNC_TRACE();
+
+	ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device",
+			     priv->hw_id);
+		return;
+	}
+
+	ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
+	if (ret < 0) {
+		PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n",
+			     ret);
+		return;
+	}
+}
+
+static int
+dpaa2_sec_dev_close(struct rte_cryptodev *dev)
+{
+	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+	int ret;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* Function is reverse of dpaa2_sec_dev_init.
+	 * It does the following:
+	 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
+	 * 2. Close the DPSECI device
+	 * 3. Free the allocated resources.
+	 */
+
+	/*Close the device at underlying layer*/
+	ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failure closing dpseci device with"
+			     " error code %d\n", ret);
+		return -1;
+	}
+
+	/*Free the allocated memory for ethernet private data and dpseci*/
+	priv->hw = NULL;
+	free(dpseci);
+
+	return 0;
+}
+
+static void
+dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
+			struct rte_cryptodev_info *info)
+{
+	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
+
+	PMD_INIT_FUNC_TRACE();
+	if (info != NULL) {
+		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+		info->feature_flags = dev->feature_flags;
+		info->capabilities = dpaa2_sec_capabilities;
+		info->sym.max_nb_sessions = internals->max_nb_sessions;
+		info->dev_type = RTE_CRYPTODEV_DPAA2_SEC_PMD;
+	}
+}
+
+static struct rte_cryptodev_ops crypto_ops = {
+	.dev_configure	      = dpaa2_sec_dev_configure,
+	.dev_start	      = dpaa2_sec_dev_start,
+	.dev_stop	      = dpaa2_sec_dev_stop,
+	.dev_close	      = dpaa2_sec_dev_close,
+	.dev_infos_get        = dpaa2_sec_dev_infos_get,
+	.queue_pair_setup     = dpaa2_sec_queue_pair_setup,
+	.queue_pair_release   = dpaa2_sec_queue_pair_release,
+	.queue_pair_start     = dpaa2_sec_queue_pair_start,
+	.queue_pair_stop      = dpaa2_sec_queue_pair_stop,
+	.queue_pair_count     = dpaa2_sec_queue_pair_count,
+	.session_get_size     = dpaa2_sec_session_get_size,
+	.session_initialize   = dpaa2_sec_session_initialize,
+	.session_configure    = dpaa2_sec_session_configure,
+	.session_clear        = dpaa2_sec_session_clear,
+};
+
+static int
+dpaa2_sec_uninit(__attribute__((unused))
+		 const struct rte_cryptodev_driver *crypto_drv,
+		 struct rte_cryptodev *dev)
+{
+	if (dev->data->name == NULL)
+		return -EINVAL;
+
+	PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
+		     dev->data->name, rte_socket_id());
+
+	return 0;
+}
+
+static int
+dpaa2_sec_dev_init(__attribute__((unused))
+		   struct rte_cryptodev_driver *crypto_drv,
+		   struct rte_cryptodev *cryptodev)
+{
+	struct dpaa2_sec_dev_private *internals;
+	struct rte_device *dev = cryptodev->device;
+	struct rte_dpaa2_device *dpaa2_dev;
+	struct fsl_mc_io *dpseci;
+	uint16_t token;
+	struct dpseci_attr attr;
+	int retcode, hw_id;
+
+	PMD_INIT_FUNC_TRACE();
+	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
+	if (dpaa2_dev == NULL) {
+		PMD_INIT_LOG(ERR, "dpaa2_device not found\n");
+		return -1;
+	}
+	hw_id = dpaa2_dev->object_id;
+
+	cryptodev->dev_type = RTE_CRYPTODEV_DPAA2_SEC_PMD;
+	cryptodev->dev_ops = &crypto_ops;
+
+	cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
+	cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
+	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+			RTE_CRYPTODEV_FF_HW_ACCELERATED |
+			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+	internals = cryptodev->data->dev_private;
+	internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
+
+	/*
+	 * For secondary processes, we don't initialise any further as primary
+	 * has already done this work. Only check we don't need a different
+	 * RX function
+	 */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		PMD_INIT_LOG(DEBUG, "Device already init by primary process");
+		return 0;
+	}
+	/*Open the rte device via MC and save the handle for further use*/
+	dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
+				sizeof(struct fsl_mc_io), 0);
+	if (!dpseci) {
+		PMD_INIT_LOG(ERR,
+			     "Error in allocating the memory for dpsec object");
+		return -1;
+	}
+	dpseci->regs = mcp_ptr_list[0];
+
+	retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
+	if (retcode != 0) {
+		PMD_INIT_LOG(ERR, "Cannot open the dpsec device: Error = %x",
+			     retcode);
+		goto init_error;
+	}
+	retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
+	if (retcode != 0) {
+		PMD_INIT_LOG(ERR,
+			     "Cannot get dpsec device attributed: Error = %x",
+			     retcode);
+		goto init_error;
+	}
+	sprintf(cryptodev->data->name, "dpsec-%u", hw_id);
+
+	internals->max_nb_queue_pairs = attr.num_tx_queues;
+	cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
+	internals->hw = dpseci;
+	internals->token = token;
+
+	PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
+	return 0;
+
+init_error:
+	PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
+
+	/* dpaa2_sec_uninit(crypto_dev_name); */
+	return -EFAULT;
+}
+
+static int
+cryptodev_dpaa2_sec_probe(struct rte_driver *r_drv __rte_unused,
+			  struct rte_device *r_dev)
+{
+	struct rte_cryptodev *cryptodev;
+	struct rte_dpaa2_device *dpaa2_dev;
+
+	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+	int retval;
+
+	dpaa2_dev = container_of(r_dev, struct rte_dpaa2_device, device);
+
+	strcpy(cryptodev_name, "dpaa2_sec_device");
+
+	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
+	if (cryptodev == NULL)
+		return -ENOMEM;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		cryptodev->data->dev_private = rte_zmalloc_socket(
+					"cryptodev private structure",
+					sizeof(struct dpaa2_sec_dev_private),
+					RTE_CACHE_LINE_SIZE,
+					rte_socket_id());
+
+		if (cryptodev->data->dev_private == NULL)
+			rte_panic("Cannot allocate memzone for private "
+					"device data");
+	}
+
+	dpaa2_dev->cryptodev = cryptodev;
+
+	cryptodev->device = r_dev;
+
+	/* init user callbacks */
+	TAILQ_INIT(&(cryptodev->link_intr_cbs));
+
+	/* Invoke PMD device initialization function */
+	retval = dpaa2_sec_dev_init(NULL, cryptodev);
+	if (retval == 0)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_free(cryptodev->data->dev_private);
+
+	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
+
+	return -ENXIO;
+}
+
+static int
+cryptodev_dpaa2_sec_remove(struct rte_device *r_dev)
+{
+	struct rte_cryptodev *cryptodev;
+	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	int ret;
+
+	if (r_dev == NULL)
+		return -EINVAL;
+
+	strcpy(cryptodev_name, "dpaa2_sec_device");
+	cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	ret = dpaa2_sec_uninit(NULL, cryptodev);
+	if (ret)
+		return ret;
+
+	/* free crypto device */
+	rte_cryptodev_pmd_release_device(cryptodev);
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_free(cryptodev->data->dev_private);
+
+	cryptodev->pci_dev = NULL;
+	cryptodev->driver = NULL;
+	cryptodev->data = NULL;
+
+	return 0;
+}
+
+static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
+	.driver = {
+		.probe = cryptodev_dpaa2_sec_probe,
+		.remove = cryptodev_dpaa2_sec_remove,
+	},
+	.drv_type = DPAA2_MC_DPSECI_DEVID,
+	.drv_flags = 0,
+};
+
+RTE_PMD_REGISTER_DPAA2(dpaa2_sec_pmd, rte_dpaa2_sec_driver);
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
new file mode 100644
index 0000000..03d4c70
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
@@ -0,0 +1,70 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ *   Copyright (c) 2016 NXP. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of  Freescale Semiconductor, Inc nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _DPAA2_SEC_LOGS_H_
+#define _DPAA2_SEC_LOGS_H_
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+	RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
+
+#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE() do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+	RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+	RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+	RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#else
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
+#endif
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+	PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _DPAA2_SEC_LOGS_H_ */
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
new file mode 100644
index 0000000..f2529fe
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -0,0 +1,368 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ *   Copyright (c) 2016 NXP. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of  Freescale Semiconductor, Inc nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_DPAA2_SEC_PMD_PRIVATE_H_
+#define _RTE_DPAA2_SEC_PMD_PRIVATE_H_
+
+#define MAX_QUEUES		64
+#define MAX_DESC_SIZE		64
+/** private data structure for each DPAA2_SEC device */
+struct dpaa2_sec_dev_private {
+	void *mc_portal; /**< MC Portal for configuring this device */
+	void *hw; /**< Hardware handle for this device.Used by NADK framework */
+	int32_t hw_id; /**< An unique ID of this device instance */
+	int32_t vfio_fd; /**< File descriptor received via VFIO */
+	uint16_t token; /**< Token required by DPxxx objects */
+	unsigned int max_nb_queue_pairs;
+
+	unsigned int max_nb_sessions;
+	/**< Max number of sessions supported by device */
+};
+
+struct dpaa2_sec_qp {
+	struct dpaa2_queue rx_vq;
+	struct dpaa2_queue tx_vq;
+};
+
+enum shr_desc_type {
+	DESC_UPDATE,
+	DESC_FINAL,
+	DESC_INITFINAL,
+};
+
+#define DIR_ENC                 1
+#define DIR_DEC                 0
+
+/* SEC Flow Context Descriptor */
+struct sec_flow_context {
+	/* word 0 */
+	uint16_t word0_sdid;		/* 11-0  SDID */
+	uint16_t word0_res;		/* 31-12 reserved */
+
+	/* word 1 */
+	uint8_t word1_sdl;		/* 5-0 SDL */
+					/* 7-6 reserved */
+
+	uint8_t word1_bits_15_8;        /* 11-8 CRID */
+					/* 14-12 reserved */
+					/* 15 CRJD */
+
+	uint8_t word1_bits23_16;	/* 16  EWS */
+					/* 17 DAC */
+					/* 18,19,20 ? */
+					/* 23-21 reserved */
+
+	uint8_t word1_bits31_24;	/* 24 RSC */
+					/* 25 RBMT */
+					/* 31-26 reserved */
+
+	/* word 2  RFLC[31-0] */
+	uint32_t word2_rflc_31_0;
+
+	/* word 3  RFLC[63-32] */
+	uint32_t word3_rflc_63_32;
+
+	/* word 4 */
+	uint16_t word4_iicid;		/* 15-0  IICID */
+	uint16_t word4_oicid;		/* 31-16 OICID */
+
+	/* word 5 */
+	uint32_t word5_ofqid:24;		/* 23-0 OFQID */
+	uint32_t word5_31_24:8;
+					/* 24 OSC */
+					/* 25 OBMT */
+					/* 29-26 reserved */
+					/* 31-30 ICR */
+
+	/* word 6 */
+	uint32_t word6_oflc_31_0;
+
+	/* word 7 */
+	uint32_t word7_oflc_63_32;
+
+	/* Word 8-15 storage profiles */
+	uint16_t dl;			/**<  DataLength(correction) */
+	uint16_t reserved;		/**< reserved */
+	uint16_t dhr;			/**< DataHeadRoom(correction) */
+	uint16_t mode_bits;		/**< mode bits */
+	uint16_t bpv0;			/**< buffer pool0 valid */
+	uint16_t bpid0;			/**< Bypass Memory Translation */
+	uint16_t bpv1;			/**< buffer pool1 valid */
+	uint16_t bpid1;			/**< Bypass Memory Translation */
+	uint64_t word_12_15[2];		/**< word 12-15 are reserved */
+};
+
+struct sec_flc_desc {
+	struct sec_flow_context flc;
+	uint32_t desc[MAX_DESC_SIZE];
+};
+
+struct ctxt_priv {
+	struct sec_flc_desc flc_desc[0];
+};
+
+enum dpaa2_sec_op_type {
+	DPAA2_SEC_NONE,  /*!< No Cipher operations*/
+	DPAA2_SEC_CIPHER,/*!< CIPHER operations */
+	DPAA2_SEC_AUTH,  /*!< Authentication Operations */
+	DPAA2_SEC_CIPHER_HASH,  /*!< Authenticated Encryption with
+				 * associated data
+				 */
+	DPAA2_SEC_HASH_CIPHER,  /*!< Encryption with Authenticated
+				 * associated data
+				 */
+	DPAA2_SEC_IPSEC, /*!< IPSEC protocol operations*/
+	DPAA2_SEC_PDCP,  /*!< PDCP protocol operations*/
+	DPAA2_SEC_PKC,   /*!< Public Key Cryptographic Operations */
+	DPAA2_SEC_MAX
+};
+
+struct dpaa2_sec_cipher_ctxt {
+	struct {
+		uint8_t *data;
+		uint16_t length;
+	} iv;	/**< Initialisation vector parameters */
+	uint8_t *init_counter;  /*!< Set initial counter for CTR mode */
+};
+
+struct dpaa2_sec_auth_ctxt {
+	uint8_t trunc_len;              /*!< Length for output ICV, should
+					 * be 0 if no truncation required
+					 */
+};
+
+struct dpaa2_sec_aead_ctxt {
+	struct {
+		uint8_t *data;
+		uint16_t length;
+	} iv;	/**< Initialisation vector parameters */
+	uint16_t auth_only_len; /*!< Length of data for Auth only */
+	uint8_t auth_cipher_text;       /**< Authenticate/cipher ordering */
+	uint8_t trunc_len;              /*!< Length for output ICV, should
+					 * be 0 if no truncation required
+					 */
+};
+
+typedef struct dpaa2_sec_session_entry {
+	void *ctxt;
+	uint8_t ctxt_type;
+	uint8_t dir;         /*!< Operation Direction */
+	enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
+	enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
+	struct {
+		uint8_t *data;	/**< pointer to key data */
+		size_t length;	/**< key length in bytes */
+	} cipher_key;
+	struct {
+		uint8_t *data;	/**< pointer to key data */
+		size_t length;	/**< key length in bytes */
+	} auth_key;
+	uint8_t status;
+	union {
+		struct dpaa2_sec_cipher_ctxt cipher_ctxt;
+		struct dpaa2_sec_auth_ctxt auth_ctxt;
+		struct dpaa2_sec_aead_ctxt aead_ctxt;
+	} ext_params;
+} dpaa2_sec_session;
+
+static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 64,
+					.max = 64,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 64,
+					.max = 64,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 20,
+					.max = 20,
+					.increment = 0
+				},
+				.aad_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA224 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 64,
+					.max = 64,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 28,
+					.max = 28,
+					.increment = 0
+				},
+				.aad_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 64,
+					.max = 64,
+					.increment = 0
+				},
+				.digest_size = {
+						.min = 32,
+						.max = 32,
+						.increment = 0
+					},
+					.aad_size = { 0 }
+				}, }
+			}, }
+		},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 128,
+					.max = 128,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 48,
+					.max = 48,
+					.increment = 0
+				},
+				.aad_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 128,
+					.max = 128,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 64,
+					.max = 64,
+					.increment = 0
+				},
+				.aad_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* 3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 16,
+					.max = 24,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+#endif /* _RTE_DPAA2_SEC_PMD_PRIVATE_H_ */
-- 
2.9.3

  parent reply	other threads:[~2016-12-22 14:51 UTC|newest]

Thread overview: 169+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-12-05 12:55 [PATCH 0/8] Introducing NXP DPAA2 SEC based cryptodev PMD Akhil Goyal
2016-12-05 10:50 ` Akhil Goyal
2016-12-05 12:55 ` [PATCH 1/8] drivers/common/dpaa2: Run time assembler for Descriptor formation Akhil Goyal
2016-12-06 20:23   ` Thomas Monjalon
2016-12-07  6:24     ` Akhil Goyal
2016-12-07  8:33       ` Thomas Monjalon
2016-12-07 11:44         ` Akhil Goyal
2016-12-07 13:13           ` Thomas Monjalon
2016-12-12 14:59   ` [dpdk-dev, " Neil Horman
2016-12-05 12:55 ` [PATCH 2/8] drivers/common/dpaa2: Sample descriptors for NXP DPAA2 SEC operations Akhil Goyal
2016-12-05 12:55 ` [PATCH 3/8] doc: Adding NXP DPAA2_SEC in cryptodev Akhil Goyal
2016-12-05 16:40   ` Mcnamara, John
2016-12-05 16:42     ` Mcnamara, John
2016-12-06  7:04     ` Akhil Goyal
2016-12-05 12:55 ` [PATCH 4/8] crypto/dpaa2_sec: Introducing dpaa2_sec based on NXP SEC HW Akhil Goyal
2016-12-05 12:55 ` [PATCH 5/8] crypto/dpaa2_sec: debug and log support Akhil Goyal
2016-12-05 12:55 ` [PATCH 6/8] crypto/dpaa2_sec: add sec procssing functionality Akhil Goyal
2016-12-21 12:39   ` De Lara Guarch, Pablo
2016-12-21 12:45     ` Akhil Goyal
2016-12-05 12:55 ` [PATCH 7/8] crypto/dpaa2_sec: statistics support Akhil Goyal
2016-12-05 12:55 ` [PATCH 8/8] app/test: add dpaa2_sec crypto test Akhil Goyal
2016-12-22 20:16 ` [PATCH v2 00/11] Introducing NXP DPAA2 SEC based cryptodev PMD Akhil Goyal
2016-12-22 20:16   ` [PATCH v2 01/11] librte_cryptodev: Add rte_device pointer in cryptodevice Akhil Goyal
2017-01-09 13:34     ` De Lara Guarch, Pablo
2017-01-12 12:26       ` Akhil Goyal
2016-12-22 20:16   ` [PATCH v2 02/11] crypto/dpaa2_sec: Run time assembler for Descriptor formation Akhil Goyal
2017-01-09 13:55     ` De Lara Guarch, Pablo
2017-01-12 12:28       ` Akhil Goyal
2016-12-22 20:16   ` [PATCH v2 03/11] crypto/dpaa2_sec/hw: Sample descriptors for NXP DPAA2 SEC operations Akhil Goyal
2016-12-22 20:16   ` [PATCH v2 04/11] doc: Adding NXP DPAA2_SEC in cryptodev Akhil Goyal
2016-12-22 20:16   ` [PATCH v2 05/11] lib: Add cryptodev type for DPAA2_SEC Akhil Goyal
2016-12-22 20:16   ` Akhil Goyal [this message]
2016-12-22 20:16   ` [PATCH v2 07/11] crypto/dpaa2_sec: Add DPAA2_SEC PMD into build system Akhil Goyal
2017-01-09 15:33     ` Thomas Monjalon
2017-01-12 12:35       ` Akhil Goyal
2016-12-22 20:16   ` [PATCH v2 08/11] crypto/dpaa2_sec: Enable DPAA2_SEC PMD in the configuration Akhil Goyal
2016-12-22 20:16   ` [PATCH v2 09/11] crypto/dpaa2_sec: statistics support Akhil Goyal
2016-12-22 20:16   ` [PATCH v2 10/11] app/test: add dpaa2_sec crypto test Akhil Goyal
2016-12-22 20:17   ` [PATCH v2 11/11] crypto/dpaa2_sec: Update MAINTAINERS entry for dpaa2_sec PMD Akhil Goyal
2017-01-09 13:31   ` [PATCH v2 00/11] Introducing NXP DPAA2 SEC based cryptodev PMD De Lara Guarch, Pablo
2017-01-20 14:04   ` [PATCH v3 00/10] Introducing NXP dpaa2_sec based cryptodev pmd akhil.goyal
2017-01-20 14:05     ` [PATCH v3 01/10] doc: add NXP dpaa2_sec in cryptodev akhil.goyal
2017-01-24 15:33       ` De Lara Guarch, Pablo
2017-01-31  5:48         ` Akhil Goyal
2017-01-20 14:05     ` [PATCH v3 02/10] cryptodev: add cryptodev type for dpaa2_sec akhil.goyal
2017-01-20 14:05     ` [PATCH v3 03/10] crypto/dpaa2_sec: add dpaa2_sec poll mode driver akhil.goyal
2017-01-20 12:32       ` Neil Horman
2017-01-20 13:17         ` Akhil Goyal
2017-01-20 19:31           ` Neil Horman
2017-01-24  6:34             ` Akhil Goyal
2017-01-24 15:06               ` Neil Horman
2017-01-20 14:05     ` [PATCH v3 04/10] crypto/dpaa2_sec: add run time assembler for descriptor formation akhil.goyal
2017-01-20 14:05     ` [PATCH v3 05/10] crypto/dpaa2_sec: add sample descriptors for NXP dpaa2_sec operations akhil.goyal
2017-01-20 14:05     ` [PATCH v3 06/10] crypto/dpaa2_sec: add crypto operation support akhil.goyal
2017-01-20 14:05     ` [PATCH v3 07/10] crypto/dpaa2_sec: statistics support akhil.goyal
2017-01-20 14:05     ` [PATCH v3 08/10] crypto/dpaa2_sec: update MAINTAINERS entry for dpaa2_sec pmd akhil.goyal
2017-01-20 14:05     ` [PATCH v3 09/10] app/test: add dpaa2_sec crypto performance test akhil.goyal
2017-01-20 14:05     ` [PATCH v3 10/10] app/test: add dpaa2_sec crypto functional test akhil.goyal
2017-03-03 19:36     ` [PATCH v4 00/12] Introducing NXP dpaa2_sec based cryptodev pmd Akhil Goyal
2017-03-03 14:25       ` Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 01/12] cryptodev: add cryptodev type for dpaa2_sec Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 01/12] cryptodev: add cryptodev type for dpaa2 sec Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 02/12] crypto/dpaa2_sec: add dpaa2_sec poll mode driver Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 02/12] crypto/dpaa2_sec: add dpaa2 sec " Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 03/12] crypto/dpaa2_sec: add mc dpseci object support Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 04/12] crypto/dpaa2_sec: add basic crypto operations Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 05/12] crypto/dpaa2_sec: add run time assembler for descriptor formation Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 06/12] crypto/dpaa2_sec: add sample descriptors for NXP dpaa2 sec ops Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 07/12] crypto/dpaa2_sec: add crypto operation support Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 08/12] crypto/dpaa2_sec: statistics support Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 09/12] doc: add NXP dpaa2_sec in cryptodev Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 09/12] doc: add NXP dpaa2 sec " Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 10/12] crypto/dpaa2_sec: update MAINTAINERS entry for dpaa2_sec pmd Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 10/12] maintainers: claim responsibility for dpaa2 sec pmd Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 11/12] app/test: add dpaa2_sec crypto performance test Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 11/12] app/test: add dpaa2 sec " Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 12/12] app/test: add dpaa2_sec crypto functional test Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 12/12] app/test: add dpaa2 sec " Akhil Goyal
2017-03-03 19:49       ` [PATCH v5 00/12] Introducing NXP dpaa2_sec based cryptodev pmd Akhil Goyal
2017-03-03 19:49         ` [PATCH v5 01/12] cryptodev: add cryptodev type for dpaa2 sec Akhil Goyal
2017-03-03 19:49         ` [PATCH v5 02/12] crypto/dpaa2_sec: add dpaa2 sec poll mode driver Akhil Goyal
2017-03-21 15:07           ` De Lara Guarch, Pablo
2017-03-22  8:39             ` Akhil Goyal
2017-03-21 15:40           ` De Lara Guarch, Pablo
2017-03-03 19:49         ` [PATCH v5 03/12] crypto/dpaa2_sec: add mc dpseci object support Akhil Goyal
2017-03-21 16:00           ` De Lara Guarch, Pablo
2017-03-03 19:49         ` [PATCH v5 04/12] crypto/dpaa2_sec: add basic crypto operations Akhil Goyal
2017-03-03 19:49         ` [PATCH v5 05/12] crypto/dpaa2_sec: add run time assembler for descriptor formation Akhil Goyal
2017-03-03 19:49         ` [PATCH v5 06/12] crypto/dpaa2_sec: add sample descriptors for NXP dpaa2 sec ops Akhil Goyal
2017-03-03 19:49         ` [PATCH v5 07/12] crypto/dpaa2_sec: add crypto operation support Akhil Goyal
2017-03-03 19:49         ` [PATCH v5 08/12] crypto/dpaa2_sec: statistics support Akhil Goyal
2017-03-03 19:49         ` [PATCH v5 09/12] doc: add NXP dpaa2 sec in cryptodev Akhil Goyal
2017-03-08 18:17           ` Mcnamara, John
2017-03-22  9:50             ` Akhil Goyal
2017-03-22 16:30               ` De Lara Guarch, Pablo
2017-03-22 16:34                 ` Akhil Goyal
2017-03-03 19:49         ` [PATCH v5 10/12] maintainers: claim responsibility for dpaa2 sec pmd Akhil Goyal
2017-03-03 19:49         ` [PATCH v5 11/12] app/test: add dpaa2 sec crypto performance test Akhil Goyal
2017-03-03 19:49         ` [PATCH v5 12/12] app/test: add dpaa2 sec crypto functional test Akhil Goyal
2017-03-21 15:31           ` De Lara Guarch, Pablo
2017-03-24 21:57         ` [PATCH v6 00/13] Introducing NXP dpaa2_sec based cryptodev pmd akhil.goyal
2017-03-24 21:57           ` [PATCH v6 01/13] cryptodev: add cryptodev type for dpaa2 sec akhil.goyal
2017-03-24 21:57           ` [PATCH v6 02/13] crypto/dpaa2_sec: add dpaa2 sec poll mode driver akhil.goyal
2017-03-24 21:57           ` [PATCH v6 03/13] crypto/dpaa2_sec: add mc dpseci object support akhil.goyal
2017-03-24 21:57           ` [PATCH v6 04/13] crypto/dpaa2_sec: add basic crypto operations akhil.goyal
2017-03-27 13:58             ` De Lara Guarch, Pablo
2017-03-29 10:44               ` Akhil Goyal
2017-03-29 19:26                 ` De Lara Guarch, Pablo
2017-03-24 21:57           ` [PATCH v6 05/13] crypto/dpaa2_sec: add run time assembler for descriptor formation akhil.goyal
2017-03-24 21:57           ` [PATCH v6 06/13] crypto/dpaa2_sec: add sample descriptors for NXP dpaa2 sec ops akhil.goyal
2017-03-24 21:57           ` [PATCH v6 07/13] bus/fslmc: add packet frame list entry definitions akhil.goyal
2017-03-24 21:57           ` [PATCH v6 08/13] crypto/dpaa2_sec: add crypto operation support akhil.goyal
2017-03-24 21:57           ` [PATCH v6 09/13] crypto/dpaa2_sec: statistics support akhil.goyal
2017-03-24 21:57           ` [PATCH v6 10/13] doc: add NXP dpaa2 sec in cryptodev akhil.goyal
2017-04-03 15:53             ` Mcnamara, John
2017-03-24 21:57           ` [PATCH v6 11/13] maintainers: claim responsibility for dpaa2 sec pmd akhil.goyal
2017-03-24 21:57           ` [PATCH v6 12/13] test/test: add dpaa2 sec crypto performance test akhil.goyal
2017-03-24 21:57           ` [PATCH v6 13/13] test/test: add dpaa2 sec crypto functional test akhil.goyal
2017-04-10 12:30           ` [PATCH v7 00/13] Introducing NXP dpaa2_sec based cryptodev pmd akhil.goyal
2017-04-10 12:30             ` [PATCH v7 01/13] cryptodev: add cryptodev type for dpaa2 sec akhil.goyal
2017-04-10 12:30             ` [PATCH v7 02/13] crypto/dpaa2_sec: add dpaa2 sec poll mode driver akhil.goyal
2017-04-10 12:30             ` [PATCH v7 03/13] crypto/dpaa2_sec: add mc dpseci object support akhil.goyal
2017-04-10 12:30             ` [PATCH v7 04/13] crypto/dpaa2_sec: add basic crypto operations akhil.goyal
2017-04-10 12:31             ` [PATCH v7 05/13] crypto/dpaa2_sec: add run time assembler for descriptor formation akhil.goyal
2017-04-10 12:31             ` [PATCH v7 06/13] crypto/dpaa2_sec: add sample descriptors for NXP dpaa2 sec ops akhil.goyal
2017-04-10 12:31             ` [PATCH v7 07/13] bus/fslmc: add packet frame list entry definitions akhil.goyal
2017-04-10 12:31             ` [PATCH v7 08/13] crypto/dpaa2_sec: add crypto operation support akhil.goyal
2017-04-10 12:31             ` [PATCH v7 09/13] crypto/dpaa2_sec: statistics support akhil.goyal
2017-04-10 12:31             ` [PATCH v7 10/13] doc: add NXP dpaa2 sec in cryptodev akhil.goyal
2017-04-14 16:11               ` Mcnamara, John
2017-04-10 12:31             ` [PATCH v7 11/13] maintainers: claim responsibility for dpaa2 sec pmd akhil.goyal
2017-04-10 12:31             ` [PATCH v7 12/13] test/test: add dpaa2 sec crypto performance test akhil.goyal
2017-04-10 12:31             ` [PATCH v7 13/13] test/test: add dpaa2 sec crypto functional test akhil.goyal
2017-04-10 12:36             ` [PATCH v7 00/13] Introducing NXP dpaa2_sec based cryptodev pmd Akhil Goyal
2017-04-18 21:51             ` De Lara Guarch, Pablo
2017-04-19 15:37             ` [PATCH v8 " akhil.goyal
2017-04-19 15:37               ` [PATCH v8 01/13] cryptodev: add cryptodev type for dpaa2 sec akhil.goyal
2017-04-19 15:37               ` [PATCH v8 02/13] crypto/dpaa2_sec: add dpaa2 sec poll mode driver akhil.goyal
2017-04-19 17:32                 ` De Lara Guarch, Pablo
2017-04-19 15:37               ` [PATCH v8 03/13] maintainers: claim responsibility for dpaa2 sec pmd akhil.goyal
2017-04-19 15:37               ` [PATCH v8 04/13] test/test: add dpaa2 sec crypto performance test akhil.goyal
2017-04-19 15:37               ` [PATCH v8 05/13] test/test: add dpaa2 sec crypto functional test akhil.goyal
2017-04-19 15:37               ` [PATCH v8 06/13] crypto/dpaa2_sec: add mc dpseci object support akhil.goyal
2017-04-19 15:37               ` [PATCH v8 07/13] crypto/dpaa2_sec: add basic crypto operations akhil.goyal
2017-04-19 15:37               ` [PATCH v8 08/13] crypto/dpaa2_sec: add run time assembler for descriptor formation akhil.goyal
2017-04-19 15:37               ` [PATCH v8 09/13] crypto/dpaa2_sec: add sample descriptors for NXP dpaa2 sec ops akhil.goyal
2017-04-19 15:37               ` [PATCH v8 10/13] bus/fslmc: add packet frame list entry definitions akhil.goyal
2017-04-19 15:37               ` [PATCH v8 11/13] crypto/dpaa2_sec: add crypto operation support akhil.goyal
2017-04-19 17:36                 ` De Lara Guarch, Pablo
2017-04-19 17:47                   ` Hemant Agrawal
2017-04-19 21:29                     ` De Lara Guarch, Pablo
2017-04-19 15:37               ` [PATCH v8 12/13] crypto/dpaa2_sec: statistics support akhil.goyal
2017-04-19 15:37               ` [PATCH v8 13/13] doc: add NXP dpaa2 sec in cryptodev akhil.goyal
2017-04-20  5:44               ` [PATCH v9 00/13] Introducing NXP dpaa2_sec based cryptodev pmd akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 01/13] cryptodev: add cryptodev type for dpaa2 sec akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 02/13] crypto/dpaa2_sec: add dpaa2 sec poll mode driver akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 03/13] crypto/dpaa2_sec: add mc dpseci object support akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 04/13] crypto/dpaa2_sec: add basic crypto operations akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 05/13] crypto/dpaa2_sec: add run time assembler for descriptor formation akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 06/13] crypto/dpaa2_sec: add sample descriptors for NXP dpaa2 sec ops akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 07/13] bus/fslmc: add packet frame list entry definitions akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 08/13] crypto/dpaa2_sec: add crypto operation support akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 09/13] crypto/dpaa2_sec: statistics support akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 10/13] doc: add NXP dpaa2 sec in cryptodev akhil.goyal
2017-04-20  8:10                   ` De Lara Guarch, Pablo
2017-04-20  5:44                 ` [PATCH v9 11/13] maintainers: claim responsibility for dpaa2 sec pmd akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 12/13] test/test: add dpaa2 sec crypto performance test akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 13/13] test/test: add dpaa2 sec crypto functional test akhil.goyal
2017-04-20  9:31                 ` [PATCH v9 00/13] Introducing NXP dpaa2_sec based cryptodev pmd De Lara Guarch, Pablo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20161222201700.20020-7-akhil.goyal@nxp.com \
    --to=akhil.goyal@nxp.com \
    --cc=declan.doherty@intel.com \
    --cc=dev@dpdk.org \
    --cc=hemant.agrawal@nxp.com \
    --cc=john.mcnamara@intel.com \
    --cc=nhorman@tuxdriver.com \
    --cc=pablo.de.lara.guarch@intel.com \
    --cc=thomas.monjalon@6wind.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.