All of lore.kernel.org
 help / color / mirror / Atom feed
From: <akhil.goyal@nxp.com>
To: <dev@dpdk.org>
Cc: <declan.doherty@intel.com>, <pablo.de.lara.guarch@intel.com>,
	<hemant.agrawal@nxp.com>, <john.mcnamara@intel.com>,
	<nhorman@tuxdriver.com>, <thomas.monjalon@6wind.com>,
	Akhil Goyal <akhil.goyal@nxp.com>
Subject: [PATCH v7 08/13] crypto/dpaa2_sec: add crypto operation support
Date: Mon, 10 Apr 2017 18:01:03 +0530	[thread overview]
Message-ID: <20170410123108.26305-9-akhil.goyal@nxp.com> (raw)
In-Reply-To: <20170410123108.26305-1-akhil.goyal@nxp.com>

From: Akhil Goyal <akhil.goyal@nxp.com>

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 1210 +++++++++++++++++++++++++++
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |  143 ++++
 2 files changed, 1353 insertions(+)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index bb56af1..29346df 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -48,17 +48,1216 @@
 #include <fslmc_vfio.h>
 #include <dpaa2_hw_pvt.h>
 #include <dpaa2_hw_dpio.h>
+#include <dpaa2_hw_mempool.h>
 #include <fsl_dpseci.h>
 #include <fsl_mc_sys.h>
 
 #include "dpaa2_sec_priv.h"
 #include "dpaa2_sec_logs.h"
 
+/* RTA header files */
+#include <hw/desc/ipsec.h>
+#include <hw/desc/algo.h>
+
+/* Minimum job descriptor consists of a oneword job descriptor HEADER and
+ * a pointer to the shared descriptor
+ */
+#define MIN_JOB_DESC_SIZE	(CAAM_CMD_SZ + CAAM_PTR_SZ)
 #define FSL_VENDOR_ID           0x1957
 #define FSL_DEVICE_ID           0x410
 #define FSL_SUBSYSTEM_SEC       1
 #define FSL_MC_DPSECI_DEVID     3
 
+#define NO_PREFETCH 0
+#define TDES_CBC_IV_LEN 8
+#define AES_CBC_IV_LEN 16
+enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
+
+static inline void
+print_fd(const struct qbman_fd *fd)
+{
+	printf("addr_lo:          %u\n", fd->simple.addr_lo);
+	printf("addr_hi:          %u\n", fd->simple.addr_hi);
+	printf("len:              %u\n", fd->simple.len);
+	printf("bpid:             %u\n", DPAA2_GET_FD_BPID(fd));
+	printf("fi_bpid_off:      %u\n", fd->simple.bpid_offset);
+	printf("frc:              %u\n", fd->simple.frc);
+	printf("ctrl:             %u\n", fd->simple.ctrl);
+	printf("flc_lo:           %u\n", fd->simple.flc_lo);
+	printf("flc_hi:           %u\n\n", fd->simple.flc_hi);
+}
+
+static inline void
+print_fle(const struct qbman_fle *fle)
+{
+	printf("addr_lo:          %u\n", fle->addr_lo);
+	printf("addr_hi:          %u\n", fle->addr_hi);
+	printf("len:              %u\n", fle->length);
+	printf("fi_bpid_off:      %u\n", fle->fin_bpid_offset);
+	printf("frc:              %u\n", fle->frc);
+}
+
+static inline int
+build_authenc_fd(dpaa2_sec_session *sess,
+		 struct rte_crypto_op *op,
+		 struct qbman_fd *fd, uint16_t bpid)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge;
+	struct sec_flow_context *flc;
+	uint32_t auth_only_len = sym_op->auth.data.length -
+				sym_op->cipher.data.length;
+	int icv_len = sym_op->auth.digest.length;
+	uint8_t *old_icv;
+	uint32_t mem_len = (7 * sizeof(struct qbman_fle)) + icv_len;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* TODO we are using the first FLE entry to store Mbuf.
+	 * Currently we donot know which FLE has the mbuf stored.
+	 * So while retreiving we can go back 1 FLE from the FD -ADDR
+	 * to get the MBUF Addr from the previous FLE.
+	 * We can have a better approach to use the inline Mbuf
+	 */
+	/* TODO - we can use some mempool to avoid malloc here */
+	fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
+	if (!fle) {
+		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+		return -1;
+	}
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	fle = fle + 1;
+	sge = fle + 2;
+	if (likely(bpid < MAX_BPID)) {
+		DPAA2_SET_FD_BPID(fd, bpid);
+		DPAA2_SET_FLE_BPID(fle, bpid);
+		DPAA2_SET_FLE_BPID(fle + 1, bpid);
+		DPAA2_SET_FLE_BPID(sge, bpid);
+		DPAA2_SET_FLE_BPID(sge + 1, bpid);
+		DPAA2_SET_FLE_BPID(sge + 2, bpid);
+		DPAA2_SET_FLE_BPID(sge + 3, bpid);
+	} else {
+		DPAA2_SET_FD_IVP(fd);
+		DPAA2_SET_FLE_IVP(fle);
+		DPAA2_SET_FLE_IVP((fle + 1));
+		DPAA2_SET_FLE_IVP(sge);
+		DPAA2_SET_FLE_IVP((sge + 1));
+		DPAA2_SET_FLE_IVP((sge + 2));
+		DPAA2_SET_FLE_IVP((sge + 3));
+	}
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
+		   "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
+		   sym_op->auth.data.offset,
+		   sym_op->auth.data.length,
+		   sym_op->auth.digest.length,
+		   sym_op->cipher.data.offset,
+		   sym_op->cipher.data.length,
+		   sym_op->cipher.iv.length,
+		   sym_op->m_src->data_off);
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+	if (auth_only_len)
+		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+	fle->length = (sess->dir == DIR_ENC) ?
+			(sym_op->cipher.data.length + icv_len) :
+			sym_op->cipher.data.length;
+
+	DPAA2_SET_FLE_SG_EXT(fle);
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
+				sym_op->m_src->data_off);
+	sge->length = sym_op->cipher.data.length;
+
+	if (sess->dir == DIR_ENC) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge,
+				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+		sge->length = sym_op->auth.digest.length;
+		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
+					sym_op->cipher.iv.length));
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	sge++;
+	fle++;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(fle);
+	DPAA2_SET_FLE_FIN(fle);
+	fle->length = (sess->dir == DIR_ENC) ?
+			(sym_op->auth.data.length + sym_op->cipher.iv.length) :
+			(sym_op->auth.data.length + sym_op->cipher.iv.length +
+			 sym_op->auth.digest.length);
+
+	/* Configure Input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(sym_op->cipher.iv.data));
+	sge->length = sym_op->cipher.iv.length;
+	sge++;
+
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
+				sym_op->m_src->data_off);
+	sge->length = sym_op->auth.data.length;
+	if (sess->dir == DIR_DEC) {
+		sge++;
+		old_icv = (uint8_t *)(sge + 1);
+		memcpy(old_icv,	sym_op->auth.digest.data,
+		       sym_op->auth.digest.length);
+		memset(sym_op->auth.digest.data, 0, sym_op->auth.digest.length);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+		sge->length = sym_op->auth.digest.length;
+		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
+				 sym_op->auth.digest.length +
+				 sym_op->cipher.iv.length));
+	}
+	DPAA2_SET_FLE_FIN(sge);
+	if (auth_only_len) {
+		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+	}
+	return 0;
+}
+
+static inline int
+build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+	      struct qbman_fd *fd, uint16_t bpid)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct qbman_fle *fle, *sge;
+	uint32_t mem_len = (sess->dir == DIR_ENC) ?
+			   (3 * sizeof(struct qbman_fle)) :
+			   (5 * sizeof(struct qbman_fle) +
+			    sym_op->auth.digest.length);
+	struct sec_flow_context *flc;
+	struct ctxt_priv *priv = sess->ctxt;
+	uint8_t *old_digest;
+
+	PMD_INIT_FUNC_TRACE();
+
+	fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
+	if (!fle) {
+		RTE_LOG(ERR, PMD, "Memory alloc failed for FLE\n");
+		return -1;
+	}
+	/* TODO we are using the first FLE entry to store Mbuf.
+	 * Currently we donot know which FLE has the mbuf stored.
+	 * So while retreiving we can go back 1 FLE from the FD -ADDR
+	 * to get the MBUF Addr from the previous FLE.
+	 * We can have a better approach to use the inline Mbuf
+	 */
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	fle = fle + 1;
+
+	if (likely(bpid < MAX_BPID)) {
+		DPAA2_SET_FD_BPID(fd, bpid);
+		DPAA2_SET_FLE_BPID(fle, bpid);
+		DPAA2_SET_FLE_BPID(fle + 1, bpid);
+	} else {
+		DPAA2_SET_FD_IVP(fd);
+		DPAA2_SET_FLE_IVP(fle);
+		DPAA2_SET_FLE_IVP((fle + 1));
+	}
+	flc = &priv->flc_desc[DESC_INITFINAL].flc;
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+	fle->length = sym_op->auth.digest.length;
+
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	fle++;
+
+	if (sess->dir == DIR_ENC) {
+		DPAA2_SET_FLE_ADDR(fle,
+				   DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+		DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
+				     sym_op->m_src->data_off);
+		DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
+		fle->length = sym_op->auth.data.length;
+	} else {
+		sge = fle + 2;
+		DPAA2_SET_FLE_SG_EXT(fle);
+		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+
+		if (likely(bpid < MAX_BPID)) {
+			DPAA2_SET_FLE_BPID(sge, bpid);
+			DPAA2_SET_FLE_BPID(sge + 1, bpid);
+		} else {
+			DPAA2_SET_FLE_IVP(sge);
+			DPAA2_SET_FLE_IVP((sge + 1));
+		}
+		DPAA2_SET_FLE_ADDR(sge,
+				   DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+		DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
+				     sym_op->m_src->data_off);
+
+		DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
+				 sym_op->auth.digest.length);
+		sge->length = sym_op->auth.data.length;
+		sge++;
+		old_digest = (uint8_t *)(sge + 1);
+		rte_memcpy(old_digest, sym_op->auth.digest.data,
+			   sym_op->auth.digest.length);
+		memset(sym_op->auth.digest.data, 0, sym_op->auth.digest.length);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
+		sge->length = sym_op->auth.digest.length;
+		fle->length = sym_op->auth.data.length +
+				sym_op->auth.digest.length;
+		DPAA2_SET_FLE_FIN(sge);
+	}
+	DPAA2_SET_FLE_FIN(fle);
+
+	return 0;
+}
+
+static int
+build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+		struct qbman_fd *fd, uint16_t bpid)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct qbman_fle *fle, *sge;
+	uint32_t mem_len = (5 * sizeof(struct qbman_fle));
+	struct sec_flow_context *flc;
+	struct ctxt_priv *priv = sess->ctxt;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* todo - we can use some mempool to avoid malloc here */
+	fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
+	if (!fle) {
+		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+		return -1;
+	}
+	/* TODO we are using the first FLE entry to store Mbuf.
+	 * Currently we donot know which FLE has the mbuf stored.
+	 * So while retreiving we can go back 1 FLE from the FD -ADDR
+	 * to get the MBUF Addr from the previous FLE.
+	 * We can have a better approach to use the inline Mbuf
+	 */
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	fle = fle + 1;
+	sge = fle + 2;
+
+	if (likely(bpid < MAX_BPID)) {
+		DPAA2_SET_FD_BPID(fd, bpid);
+		DPAA2_SET_FLE_BPID(fle, bpid);
+		DPAA2_SET_FLE_BPID(fle + 1, bpid);
+		DPAA2_SET_FLE_BPID(sge, bpid);
+		DPAA2_SET_FLE_BPID(sge + 1, bpid);
+	} else {
+		DPAA2_SET_FD_IVP(fd);
+		DPAA2_SET_FLE_IVP(fle);
+		DPAA2_SET_FLE_IVP((fle + 1));
+		DPAA2_SET_FLE_IVP(sge);
+		DPAA2_SET_FLE_IVP((sge + 1));
+	}
+
+	flc = &priv->flc_desc[0].flc;
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+	DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
+			 sym_op->cipher.iv.length);
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
+		   sym_op->cipher.data.offset,
+		   sym_op->cipher.data.length,
+		   sym_op->cipher.iv.length,
+		   sym_op->m_src->data_off);
+
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+	DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
+			     sym_op->m_src->data_off);
+
+	fle->length = sym_op->cipher.data.length + sym_op->cipher.iv.length;
+
+	PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
+		   flc, fle, fle->addr_hi, fle->addr_lo, fle->length);
+
+	fle++;
+
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+	fle->length = sym_op->cipher.data.length + sym_op->cipher.iv.length;
+
+	DPAA2_SET_FLE_SG_EXT(fle);
+
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(sym_op->cipher.iv.data));
+	sge->length = sym_op->cipher.iv.length;
+
+	sge++;
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
+			     sym_op->m_src->data_off);
+
+	sge->length = sym_op->cipher.data.length;
+	DPAA2_SET_FLE_FIN(sge);
+	DPAA2_SET_FLE_FIN(fle);
+
+	PMD_TX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
+		   (void *)DPAA2_GET_FD_ADDR(fd),
+		   DPAA2_GET_FD_BPID(fd),
+		   bpid_info[bpid].meta_data_size,
+		   DPAA2_GET_FD_OFFSET(fd),
+		   DPAA2_GET_FD_LEN(fd));
+
+	return 0;
+}
+
+static inline int
+build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+	     struct qbman_fd *fd, uint16_t bpid)
+{
+	int ret = -1;
+
+	PMD_INIT_FUNC_TRACE();
+
+	switch (sess->ctxt_type) {
+	case DPAA2_SEC_CIPHER:
+		ret = build_cipher_fd(sess, op, fd, bpid);
+		break;
+	case DPAA2_SEC_AUTH:
+		ret = build_auth_fd(sess, op, fd, bpid);
+		break;
+	case DPAA2_SEC_CIPHER_HASH:
+		ret = build_authenc_fd(sess, op, fd, bpid);
+		break;
+	case DPAA2_SEC_HASH_CIPHER:
+	default:
+		RTE_LOG(ERR, PMD, "error: Unsupported session\n");
+	}
+	return ret;
+}
+
+static uint16_t
+dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+			uint16_t nb_ops)
+{
+	/* Function to transmit the frames to given device and VQ*/
+	uint32_t loop;
+	int32_t ret;
+	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+	uint32_t frames_to_send;
+	struct qbman_eq_desc eqdesc;
+	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
+	struct qbman_swp *swp;
+	uint16_t num_tx = 0;
+	/*todo - need to support multiple buffer pools */
+	uint16_t bpid;
+	struct rte_mempool *mb_pool;
+	dpaa2_sec_session *sess;
+
+	if (unlikely(nb_ops == 0))
+		return 0;
+
+	if (ops[0]->sym->sess_type != RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+		RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n");
+		return 0;
+	}
+	/*Prepare enqueue descriptor*/
+	qbman_eq_desc_clear(&eqdesc);
+	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
+	qbman_eq_desc_set_response(&eqdesc, 0, 0);
+	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
+
+	if (!DPAA2_PER_LCORE_SEC_DPIO) {
+		ret = dpaa2_affine_qbman_swp_sec();
+		if (ret) {
+			RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+			return 0;
+		}
+	}
+	swp = DPAA2_PER_LCORE_SEC_PORTAL;
+
+	while (nb_ops) {
+		frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
+
+		for (loop = 0; loop < frames_to_send; loop++) {
+			/*Clear the unused FD fields before sending*/
+			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+			sess = (dpaa2_sec_session *)
+				(*ops)->sym->session->_private;
+			mb_pool = (*ops)->sym->m_src->pool;
+			bpid = mempool_to_bpid(mb_pool);
+			ret = build_sec_fd(sess, *ops, &fd_arr[loop], bpid);
+			if (ret) {
+				PMD_DRV_LOG(ERR, "error: Improper packet"
+					    " contents for crypto operation\n");
+				goto skip_tx;
+			}
+			ops++;
+		}
+		loop = 0;
+		while (loop < frames_to_send) {
+			loop += qbman_swp_send_multiple(swp, &eqdesc,
+							&fd_arr[loop],
+							frames_to_send - loop);
+		}
+
+		num_tx += frames_to_send;
+		nb_ops -= frames_to_send;
+	}
+skip_tx:
+	dpaa2_qp->tx_vq.tx_pkts += num_tx;
+	dpaa2_qp->tx_vq.err_pkts += nb_ops;
+	return num_tx;
+}
+
+static inline struct rte_crypto_op *
+sec_fd_to_mbuf(const struct qbman_fd *fd)
+{
+	struct qbman_fle *fle;
+	struct rte_crypto_op *op;
+
+	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+
+	PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x",
+		   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
+
+	/* TODO we are using the first FLE entry to store Mbuf.
+	 * Currently we donot know which FLE has the mbuf stored.
+	 * So while retreiving we can go back 1 FLE from the FD -ADDR
+	 * to get the MBUF Addr from the previous FLE.
+	 * We can have a better approach to use the inline Mbuf
+	 */
+
+	if (unlikely(DPAA2_GET_FD_IVP(fd))) {
+		/* TODO complete it. */
+		RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?");
+		return NULL;
+	}
+	op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
+			DPAA2_GET_FLE_ADDR((fle - 1)));
+
+	/* Prefeth op */
+	rte_prefetch0(op->sym->m_src);
+
+	PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p",
+		   (void *)op->sym->m_src, op->sym->m_src->buf_addr);
+
+	PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
+		   (void *)DPAA2_GET_FD_ADDR(fd),
+		   DPAA2_GET_FD_BPID(fd),
+		   bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+		   DPAA2_GET_FD_OFFSET(fd),
+		   DPAA2_GET_FD_LEN(fd));
+
+	/* free the fle memory */
+	rte_free(fle - 1);
+
+	return op;
+}
+
+static uint16_t
+dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+			uint16_t nb_ops)
+{
+	/* Function is responsible to receive frames for a given device and VQ*/
+	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
+	struct qbman_result *dq_storage;
+	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
+	int ret, num_rx = 0;
+	uint8_t is_last = 0, status;
+	struct qbman_swp *swp;
+	const struct qbman_fd *fd;
+	struct qbman_pull_desc pulldesc;
+
+	if (!DPAA2_PER_LCORE_SEC_DPIO) {
+		ret = dpaa2_affine_qbman_swp_sec();
+		if (ret) {
+			RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+			return 0;
+		}
+	}
+	swp = DPAA2_PER_LCORE_SEC_PORTAL;
+	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+
+	qbman_pull_desc_clear(&pulldesc);
+	qbman_pull_desc_set_numframes(&pulldesc,
+				      (nb_ops > DPAA2_DQRR_RING_SIZE) ?
+				      DPAA2_DQRR_RING_SIZE : nb_ops);
+	qbman_pull_desc_set_fq(&pulldesc, fqid);
+	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+				    (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
+				    1);
+
+	/*Issue a volatile dequeue command. */
+	while (1) {
+		if (qbman_swp_pull(swp, &pulldesc)) {
+			RTE_LOG(WARNING, PMD, "SEC VDQ command is not issued."
+				"QBMAN is busy\n");
+			/* Portal was busy, try again */
+			continue;
+		}
+		break;
+	};
+
+	/* Receive the packets till Last Dequeue entry is found with
+	 * respect to the above issues PULL command.
+	 */
+	while (!is_last) {
+		/* Check if the previous issued command is completed.
+		 * Also seems like the SWP is shared between the Ethernet Driver
+		 * and the SEC driver.
+		 */
+		while (!qbman_check_command_complete(swp, dq_storage))
+			;
+
+		/* Loop until the dq_storage is updated with
+		 * new token by QBMAN
+		 */
+		while (!qbman_result_has_new_result(swp, dq_storage))
+			;
+		/* Check whether Last Pull command is Expired and
+		 * setting Condition for Loop termination
+		 */
+		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+			is_last = 1;
+			/* Check for valid frame. */
+			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
+			if ((status & QBMAN_DQ_STAT_VALIDFRAME) == 0) {
+				PMD_RX_LOG(DEBUG, "No frame is delivered");
+				continue;
+			}
+		}
+
+		fd = qbman_result_DQ_fd(dq_storage);
+		ops[num_rx] = sec_fd_to_mbuf(fd);
+
+		if (unlikely(fd->simple.frc)) {
+			/* TODO Parse SEC errors */
+			RTE_LOG(ERR, PMD, "SEC returned Error - %x\n",
+					fd->simple.frc);
+			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		} else {
+			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+		}
+
+		num_rx++;
+		dq_storage++;
+	} /* End of Packet Rx loop */
+
+	dpaa2_qp->rx_vq.rx_pkts += num_rx;
+
+	PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx);
+	/*Return the total number of packets received to DPAA2 app*/
+	return num_rx;
+}
+/** Release queue pair */
+static int
+dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+	struct dpaa2_sec_qp *qp =
+		(struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (qp->rx_vq.q_storage) {
+		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
+		rte_free(qp->rx_vq.q_storage);
+	}
+	rte_free(qp);
+
+	dev->data->queue_pairs[queue_pair_id] = NULL;
+
+	return 0;
+}
+
+/** Setup a queue pair */
+static int
+dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
+		__rte_unused int socket_id)
+{
+	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+	struct dpaa2_sec_qp *qp;
+	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+	struct dpseci_rx_queue_cfg cfg;
+	int32_t retcode;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* If qp is already in use free ring memory and qp metadata. */
+	if (dev->data->queue_pairs[qp_id] != NULL) {
+		PMD_DRV_LOG(INFO, "QP already setup");
+		return 0;
+	}
+
+	PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
+		    dev, qp_id, qp_conf);
+
+	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
+
+	qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
+			RTE_CACHE_LINE_SIZE);
+	if (!qp) {
+		RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n");
+		return -1;
+	}
+
+	qp->rx_vq.dev = dev;
+	qp->tx_vq.dev = dev;
+	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
+		sizeof(struct queue_storage_info_t),
+		RTE_CACHE_LINE_SIZE);
+	if (!qp->rx_vq.q_storage) {
+		RTE_LOG(ERR, PMD, "malloc failed for q_storage\n");
+		return -1;
+	}
+	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
+
+	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
+		RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n");
+		return -1;
+	}
+
+	dev->data->queue_pairs[qp_id] = qp;
+
+	cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
+	cfg.user_ctx = (uint64_t)(&qp->rx_vq);
+	retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
+				      qp_id, &cfg);
+	return retcode;
+}
+
+/** Start queue pair */
+static int
+dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
+			   __rte_unused uint16_t queue_pair_id)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	return 0;
+}
+
+/** Stop queue pair */
+static int
+dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
+			  __rte_unused uint16_t queue_pair_id)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	return 0;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni gcm session structure */
+static unsigned int
+dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	return sizeof(dpaa2_sec_session);
+}
+
+static void
+dpaa2_sec_session_initialize(struct rte_mempool *mp __rte_unused,
+			     void *sess __rte_unused)
+{
+	PMD_INIT_FUNC_TRACE();
+}
+
+static int
+dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
+		      struct rte_crypto_sym_xform *xform,
+		      dpaa2_sec_session *session)
+{
+	struct dpaa2_sec_cipher_ctxt *ctxt = &session->ext_params.cipher_ctxt;
+	struct alginfo cipherdata;
+	unsigned int bufsize, i;
+	struct ctxt_priv *priv;
+	struct sec_flow_context *flc;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* For SEC CIPHER only one descriptor is required. */
+	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+			RTE_CACHE_LINE_SIZE);
+	if (priv == NULL) {
+		RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+		return -1;
+	}
+
+	flc = &priv->flc_desc[0].flc;
+
+	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
+			RTE_CACHE_LINE_SIZE);
+	if (session->cipher_key.data == NULL) {
+		RTE_LOG(ERR, PMD, "No Memory for cipher key");
+		return -1;
+	}
+	session->cipher_key.length = xform->cipher.key.length;
+
+	memcpy(session->cipher_key.data, xform->cipher.key.data,
+	       xform->cipher.key.length);
+	cipherdata.key = (uint64_t)session->cipher_key.data;
+	cipherdata.keylen = session->cipher_key.length;
+	cipherdata.key_enc_flags = 0;
+	cipherdata.key_type = RTA_DATA_IMM;
+
+	switch (xform->cipher.algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		cipherdata.algtype = OP_ALG_ALGSEL_AES;
+		cipherdata.algmode = OP_ALG_AAI_CBC;
+		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+		ctxt->iv.length = AES_CBC_IV_LEN;
+		break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
+		cipherdata.algmode = OP_ALG_AAI_CBC;
+		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+		ctxt->iv.length = TDES_CBC_IV_LEN;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+	case RTE_CRYPTO_CIPHER_3DES_CTR:
+	case RTE_CRYPTO_CIPHER_AES_GCM:
+	case RTE_CRYPTO_CIPHER_AES_CCM:
+	case RTE_CRYPTO_CIPHER_AES_ECB:
+	case RTE_CRYPTO_CIPHER_3DES_ECB:
+	case RTE_CRYPTO_CIPHER_AES_XTS:
+	case RTE_CRYPTO_CIPHER_AES_F8:
+	case RTE_CRYPTO_CIPHER_ARC4:
+	case RTE_CRYPTO_CIPHER_KASUMI_F8:
+	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+	case RTE_CRYPTO_CIPHER_NULL:
+		RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
+			xform->cipher.algo);
+		goto error_out;
+	default:
+		RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+			xform->cipher.algo);
+		goto error_out;
+	}
+	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+				DIR_ENC : DIR_DEC;
+
+	bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
+					&cipherdata, NULL, ctxt->iv.length,
+			session->dir);
+	flc->dhr = 0;
+	flc->bpv0 = 0x1;
+	flc->mode_bits = 0x8000;
+
+	flc->word1_sdl = (uint8_t)bufsize;
+	flc->word2_rflc_31_0 = lower_32_bits(
+			(uint64_t)&(((struct dpaa2_sec_qp *)
+			dev->data->queue_pairs[0])->rx_vq));
+	flc->word3_rflc_63_32 = upper_32_bits(
+			(uint64_t)&(((struct dpaa2_sec_qp *)
+			dev->data->queue_pairs[0])->rx_vq));
+	session->ctxt = priv;
+
+	for (i = 0; i < bufsize; i++)
+		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+			    i, priv->flc_desc[0].desc[i]);
+
+	return 0;
+
+error_out:
+	rte_free(session->cipher_key.data);
+	return -1;
+}
+
+static int
+dpaa2_sec_auth_init(struct rte_cryptodev *dev,
+		    struct rte_crypto_sym_xform *xform,
+		    dpaa2_sec_session *session)
+{
+	struct dpaa2_sec_auth_ctxt *ctxt = &session->ext_params.auth_ctxt;
+	struct alginfo authdata;
+	unsigned int bufsize;
+	struct ctxt_priv *priv;
+	struct sec_flow_context *flc;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* For SEC AUTH three descriptors are required for various stages */
+	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+			sizeof(struct ctxt_priv) + 3 *
+			sizeof(struct sec_flc_desc),
+			RTE_CACHE_LINE_SIZE);
+	if (priv == NULL) {
+		RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+		return -1;
+	}
+
+	flc = &priv->flc_desc[DESC_INITFINAL].flc;
+
+	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
+			RTE_CACHE_LINE_SIZE);
+	session->auth_key.length = xform->auth.key.length;
+
+	memcpy(session->auth_key.data, xform->auth.key.data,
+	       xform->auth.key.length);
+	authdata.key = (uint64_t)session->auth_key.data;
+	authdata.keylen = session->auth_key.length;
+	authdata.key_enc_flags = 0;
+	authdata.key_type = RTA_DATA_IMM;
+
+	switch (xform->auth.algo) {
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_SHA1;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_MD5;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_SHA256;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_SHA384;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_SHA512;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_SHA224;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+	case RTE_CRYPTO_AUTH_AES_GCM:
+	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+	case RTE_CRYPTO_AUTH_NULL:
+	case RTE_CRYPTO_AUTH_SHA1:
+	case RTE_CRYPTO_AUTH_SHA256:
+	case RTE_CRYPTO_AUTH_SHA512:
+	case RTE_CRYPTO_AUTH_SHA224:
+	case RTE_CRYPTO_AUTH_SHA384:
+	case RTE_CRYPTO_AUTH_MD5:
+	case RTE_CRYPTO_AUTH_AES_CCM:
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+	case RTE_CRYPTO_AUTH_KASUMI_F9:
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+	case RTE_CRYPTO_AUTH_ZUC_EIA3:
+		RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
+			xform->auth.algo);
+		goto error_out;
+	default:
+		RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+			xform->auth.algo);
+		goto error_out;
+	}
+	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
+				DIR_ENC : DIR_DEC;
+
+	bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
+				   1, 0, &authdata, !session->dir,
+				   ctxt->trunc_len);
+
+	flc->word1_sdl = (uint8_t)bufsize;
+	flc->word2_rflc_31_0 = lower_32_bits(
+			(uint64_t)&(((struct dpaa2_sec_qp *)
+			dev->data->queue_pairs[0])->rx_vq));
+	flc->word3_rflc_63_32 = upper_32_bits(
+			(uint64_t)&(((struct dpaa2_sec_qp *)
+			dev->data->queue_pairs[0])->rx_vq));
+	session->ctxt = priv;
+
+	return 0;
+
+error_out:
+	rte_free(session->auth_key.data);
+	return -1;
+}
+
+static int
+dpaa2_sec_aead_init(struct rte_cryptodev *dev,
+		    struct rte_crypto_sym_xform *xform,
+		    dpaa2_sec_session *session)
+{
+	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+	struct alginfo authdata, cipherdata;
+	unsigned int bufsize;
+	struct ctxt_priv *priv;
+	struct sec_flow_context *flc;
+	struct rte_crypto_cipher_xform *cipher_xform;
+	struct rte_crypto_auth_xform *auth_xform;
+	int err;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (session->ext_params.aead_ctxt.auth_cipher_text) {
+		cipher_xform = &xform->cipher;
+		auth_xform = &xform->next->auth;
+		session->ctxt_type =
+			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+			DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
+	} else {
+		cipher_xform = &xform->next->cipher;
+		auth_xform = &xform->auth;
+		session->ctxt_type =
+			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+			DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
+	}
+	/* For SEC AEAD only one descriptor is required */
+	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+			RTE_CACHE_LINE_SIZE);
+	if (priv == NULL) {
+		RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+		return -1;
+	}
+
+	flc = &priv->flc_desc[0].flc;
+
+	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
+					       RTE_CACHE_LINE_SIZE);
+	if (session->cipher_key.data == NULL) {
+		RTE_LOG(ERR, PMD, "No Memory for cipher key");
+		return -1;
+	}
+	session->cipher_key.length = cipher_xform->key.length;
+	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
+					     RTE_CACHE_LINE_SIZE);
+	if (session->auth_key.data == NULL) {
+		RTE_LOG(ERR, PMD, "No Memory for auth key");
+		goto error_out;
+	}
+	session->auth_key.length = auth_xform->key.length;
+	memcpy(session->cipher_key.data, cipher_xform->key.data,
+	       cipher_xform->key.length);
+	memcpy(session->auth_key.data, auth_xform->key.data,
+	       auth_xform->key.length);
+
+	ctxt->trunc_len = auth_xform->digest_length;
+	authdata.key = (uint64_t)session->auth_key.data;
+	authdata.keylen = session->auth_key.length;
+	authdata.key_enc_flags = 0;
+	authdata.key_type = RTA_DATA_IMM;
+
+	switch (auth_xform->algo) {
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_SHA1;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_MD5;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_SHA224;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_SHA256;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_SHA384;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		authdata.algtype = OP_ALG_ALGSEL_SHA512;
+		authdata.algmode = OP_ALG_AAI_HMAC;
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+	case RTE_CRYPTO_AUTH_AES_GCM:
+	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+	case RTE_CRYPTO_AUTH_NULL:
+	case RTE_CRYPTO_AUTH_SHA1:
+	case RTE_CRYPTO_AUTH_SHA256:
+	case RTE_CRYPTO_AUTH_SHA512:
+	case RTE_CRYPTO_AUTH_SHA224:
+	case RTE_CRYPTO_AUTH_SHA384:
+	case RTE_CRYPTO_AUTH_MD5:
+	case RTE_CRYPTO_AUTH_AES_CCM:
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+	case RTE_CRYPTO_AUTH_KASUMI_F9:
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+	case RTE_CRYPTO_AUTH_ZUC_EIA3:
+		RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
+			auth_xform->algo);
+		goto error_out;
+	default:
+		RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+			auth_xform->algo);
+		goto error_out;
+	}
+	cipherdata.key = (uint64_t)session->cipher_key.data;
+	cipherdata.keylen = session->cipher_key.length;
+	cipherdata.key_enc_flags = 0;
+	cipherdata.key_type = RTA_DATA_IMM;
+
+	switch (cipher_xform->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		cipherdata.algtype = OP_ALG_ALGSEL_AES;
+		cipherdata.algmode = OP_ALG_AAI_CBC;
+		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+		ctxt->iv.length = AES_CBC_IV_LEN;
+		break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
+		cipherdata.algmode = OP_ALG_AAI_CBC;
+		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+		ctxt->iv.length = TDES_CBC_IV_LEN;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_GCM:
+	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+	case RTE_CRYPTO_CIPHER_NULL:
+	case RTE_CRYPTO_CIPHER_3DES_ECB:
+	case RTE_CRYPTO_CIPHER_AES_ECB:
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+	case RTE_CRYPTO_CIPHER_AES_CCM:
+	case RTE_CRYPTO_CIPHER_KASUMI_F8:
+		RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
+			cipher_xform->algo);
+		goto error_out;
+	default:
+		RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+			cipher_xform->algo);
+		goto error_out;
+	}
+	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+				DIR_ENC : DIR_DEC;
+
+	priv->flc_desc[0].desc[0] = cipherdata.keylen;
+	priv->flc_desc[0].desc[1] = authdata.keylen;
+	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+			MIN_JOB_DESC_SIZE,
+			(unsigned int *)priv->flc_desc[0].desc,
+			&priv->flc_desc[0].desc[2], 2);
+
+	if (err < 0) {
+		PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths");
+		goto error_out;
+	}
+	if (priv->flc_desc[0].desc[2] & 1)
+		cipherdata.key_type = RTA_DATA_IMM;
+	else {
+		cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
+		cipherdata.key_type = RTA_DATA_PTR;
+	}
+	if (priv->flc_desc[0].desc[2] & (1<<1))
+		authdata.key_type = RTA_DATA_IMM;
+	else {
+		authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
+		authdata.key_type = RTA_DATA_PTR;
+	}
+	priv->flc_desc[0].desc[0] = 0;
+	priv->flc_desc[0].desc[1] = 0;
+	priv->flc_desc[0].desc[2] = 0;
+
+	if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
+		bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
+					      0, &cipherdata, &authdata,
+					      ctxt->iv.length,
+					      ctxt->auth_only_len,
+					      ctxt->trunc_len,
+					      session->dir);
+	} else {
+		RTE_LOG(ERR, PMD, "Hash before cipher not supported");
+		goto error_out;
+	}
+
+	flc->word1_sdl = (uint8_t)bufsize;
+	flc->word2_rflc_31_0 = lower_32_bits(
+			(uint64_t)&(((struct dpaa2_sec_qp *)
+			dev->data->queue_pairs[0])->rx_vq));
+	flc->word3_rflc_63_32 = upper_32_bits(
+			(uint64_t)&(((struct dpaa2_sec_qp *)
+			dev->data->queue_pairs[0])->rx_vq));
+	session->ctxt = priv;
+
+	return 0;
+
+error_out:
+	rte_free(session->cipher_key.data);
+	rte_free(session->auth_key.data);
+	return -1;
+}
+
+static void *
+dpaa2_sec_session_configure(struct rte_cryptodev *dev,
+			    struct rte_crypto_sym_xform *xform,	void *sess)
+{
+	dpaa2_sec_session *session = sess;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (unlikely(sess == NULL)) {
+		RTE_LOG(ERR, PMD, "invalid session struct");
+		return NULL;
+	}
+	/* Cipher Only */
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
+		session->ctxt_type = DPAA2_SEC_CIPHER;
+		dpaa2_sec_cipher_init(dev, xform, session);
+
+	/* Authentication Only */
+	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+		   xform->next == NULL) {
+		session->ctxt_type = DPAA2_SEC_AUTH;
+		dpaa2_sec_auth_init(dev, xform, session);
+
+	/* Cipher then Authenticate */
+	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		session->ext_params.aead_ctxt.auth_cipher_text = true;
+		dpaa2_sec_aead_init(dev, xform, session);
+
+	/* Authenticate then Cipher */
+	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		session->ext_params.aead_ctxt.auth_cipher_text = false;
+		dpaa2_sec_aead_init(dev, xform, session);
+	} else {
+		RTE_LOG(ERR, PMD, "Invalid crypto type");
+		return NULL;
+	}
+
+	return session;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+dpaa2_sec_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	if (sess)
+		memset(sess, 0, sizeof(dpaa2_sec_session));
+}
 
 static int
 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
@@ -196,6 +1395,15 @@ static struct rte_cryptodev_ops crypto_ops = {
 	.dev_stop	      = dpaa2_sec_dev_stop,
 	.dev_close	      = dpaa2_sec_dev_close,
 	.dev_infos_get        = dpaa2_sec_dev_infos_get,
+	.queue_pair_setup     = dpaa2_sec_queue_pair_setup,
+	.queue_pair_release   = dpaa2_sec_queue_pair_release,
+	.queue_pair_start     = dpaa2_sec_queue_pair_start,
+	.queue_pair_stop      = dpaa2_sec_queue_pair_stop,
+	.queue_pair_count     = dpaa2_sec_queue_pair_count,
+	.session_get_size     = dpaa2_sec_session_get_size,
+	.session_initialize   = dpaa2_sec_session_initialize,
+	.session_configure    = dpaa2_sec_session_configure,
+	.session_clear        = dpaa2_sec_session_clear,
 };
 
 static int
@@ -234,6 +1442,8 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 	cryptodev->dev_type = RTE_CRYPTODEV_DPAA2_SEC_PMD;
 	cryptodev->dev_ops = &crypto_ops;
 
+	cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
+	cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 6ecfb01..f5c6169 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -34,6 +34,8 @@
 #ifndef _RTE_DPAA2_SEC_PMD_PRIVATE_H_
 #define _RTE_DPAA2_SEC_PMD_PRIVATE_H_
 
+#define MAX_QUEUES		64
+#define MAX_DESC_SIZE		64
 /** private data structure for each DPAA2_SEC device */
 struct dpaa2_sec_dev_private {
 	void *mc_portal; /**< MC Portal for configuring this device */
@@ -52,6 +54,147 @@ struct dpaa2_sec_qp {
 	struct dpaa2_queue tx_vq;
 };
 
+enum shr_desc_type {
+	DESC_UPDATE,
+	DESC_FINAL,
+	DESC_INITFINAL,
+};
+
+#define DIR_ENC                 1
+#define DIR_DEC                 0
+
+/* SEC Flow Context Descriptor */
+struct sec_flow_context {
+	/* word 0 */
+	uint16_t word0_sdid;		/* 11-0  SDID */
+	uint16_t word0_res;		/* 31-12 reserved */
+
+	/* word 1 */
+	uint8_t word1_sdl;		/* 5-0 SDL */
+					/* 7-6 reserved */
+
+	uint8_t word1_bits_15_8;        /* 11-8 CRID */
+					/* 14-12 reserved */
+					/* 15 CRJD */
+
+	uint8_t word1_bits23_16;	/* 16  EWS */
+					/* 17 DAC */
+					/* 18,19,20 ? */
+					/* 23-21 reserved */
+
+	uint8_t word1_bits31_24;	/* 24 RSC */
+					/* 25 RBMT */
+					/* 31-26 reserved */
+
+	/* word 2  RFLC[31-0] */
+	uint32_t word2_rflc_31_0;
+
+	/* word 3  RFLC[63-32] */
+	uint32_t word3_rflc_63_32;
+
+	/* word 4 */
+	uint16_t word4_iicid;		/* 15-0  IICID */
+	uint16_t word4_oicid;		/* 31-16 OICID */
+
+	/* word 5 */
+	uint32_t word5_ofqid:24;		/* 23-0 OFQID */
+	uint32_t word5_31_24:8;
+					/* 24 OSC */
+					/* 25 OBMT */
+					/* 29-26 reserved */
+					/* 31-30 ICR */
+
+	/* word 6 */
+	uint32_t word6_oflc_31_0;
+
+	/* word 7 */
+	uint32_t word7_oflc_63_32;
+
+	/* Word 8-15 storage profiles */
+	uint16_t dl;			/**<  DataLength(correction) */
+	uint16_t reserved;		/**< reserved */
+	uint16_t dhr;			/**< DataHeadRoom(correction) */
+	uint16_t mode_bits;		/**< mode bits */
+	uint16_t bpv0;			/**< buffer pool0 valid */
+	uint16_t bpid0;			/**< Bypass Memory Translation */
+	uint16_t bpv1;			/**< buffer pool1 valid */
+	uint16_t bpid1;			/**< Bypass Memory Translation */
+	uint64_t word_12_15[2];		/**< word 12-15 are reserved */
+};
+
+struct sec_flc_desc {
+	struct sec_flow_context flc;
+	uint32_t desc[MAX_DESC_SIZE];
+};
+
+struct ctxt_priv {
+	struct sec_flc_desc flc_desc[0];
+};
+
+enum dpaa2_sec_op_type {
+	DPAA2_SEC_NONE,  /*!< No Cipher operations*/
+	DPAA2_SEC_CIPHER,/*!< CIPHER operations */
+	DPAA2_SEC_AUTH,  /*!< Authentication Operations */
+	DPAA2_SEC_CIPHER_HASH,  /*!< Authenticated Encryption with
+				 * associated data
+				 */
+	DPAA2_SEC_HASH_CIPHER,  /*!< Encryption with Authenticated
+				 * associated data
+				 */
+	DPAA2_SEC_IPSEC, /*!< IPSEC protocol operations*/
+	DPAA2_SEC_PDCP,  /*!< PDCP protocol operations*/
+	DPAA2_SEC_PKC,   /*!< Public Key Cryptographic Operations */
+	DPAA2_SEC_MAX
+};
+
+struct dpaa2_sec_cipher_ctxt {
+	struct {
+		uint8_t *data;
+		uint16_t length;
+	} iv;	/**< Initialisation vector parameters */
+	uint8_t *init_counter;  /*!< Set initial counter for CTR mode */
+};
+
+struct dpaa2_sec_auth_ctxt {
+	uint8_t trunc_len;              /*!< Length for output ICV, should
+					 * be 0 if no truncation required
+					 */
+};
+
+struct dpaa2_sec_aead_ctxt {
+	struct {
+		uint8_t *data;
+		uint16_t length;
+	} iv;	/**< Initialisation vector parameters */
+	uint16_t auth_only_len; /*!< Length of data for Auth only */
+	uint8_t auth_cipher_text;       /**< Authenticate/cipher ordering */
+	uint8_t trunc_len;              /*!< Length for output ICV, should
+					 * be 0 if no truncation required
+					 */
+};
+
+typedef struct dpaa2_sec_session_entry {
+	void *ctxt;
+	uint8_t ctxt_type;
+	uint8_t dir;         /*!< Operation Direction */
+	enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
+	enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
+	struct {
+		uint8_t *data;	/**< pointer to key data */
+		size_t length;	/**< key length in bytes */
+	} cipher_key;
+	struct {
+		uint8_t *data;	/**< pointer to key data */
+		size_t length;	/**< key length in bytes */
+	} auth_key;
+	uint8_t status;
+	union {
+		struct dpaa2_sec_cipher_ctxt cipher_ctxt;
+		struct dpaa2_sec_auth_ctxt auth_ctxt;
+		struct dpaa2_sec_aead_ctxt aead_ctxt;
+	} ext_params;
+} dpaa2_sec_session;
+
 static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 	{	/* MD5 HMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-- 
2.9.3

  parent reply	other threads:[~2017-04-10 12:31 UTC|newest]

Thread overview: 169+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-12-05 12:55 [PATCH 0/8] Introducing NXP DPAA2 SEC based cryptodev PMD Akhil Goyal
2016-12-05 10:50 ` Akhil Goyal
2016-12-05 12:55 ` [PATCH 1/8] drivers/common/dpaa2: Run time assembler for Descriptor formation Akhil Goyal
2016-12-06 20:23   ` Thomas Monjalon
2016-12-07  6:24     ` Akhil Goyal
2016-12-07  8:33       ` Thomas Monjalon
2016-12-07 11:44         ` Akhil Goyal
2016-12-07 13:13           ` Thomas Monjalon
2016-12-12 14:59   ` [dpdk-dev, " Neil Horman
2016-12-05 12:55 ` [PATCH 2/8] drivers/common/dpaa2: Sample descriptors for NXP DPAA2 SEC operations Akhil Goyal
2016-12-05 12:55 ` [PATCH 3/8] doc: Adding NXP DPAA2_SEC in cryptodev Akhil Goyal
2016-12-05 16:40   ` Mcnamara, John
2016-12-05 16:42     ` Mcnamara, John
2016-12-06  7:04     ` Akhil Goyal
2016-12-05 12:55 ` [PATCH 4/8] crypto/dpaa2_sec: Introducing dpaa2_sec based on NXP SEC HW Akhil Goyal
2016-12-05 12:55 ` [PATCH 5/8] crypto/dpaa2_sec: debug and log support Akhil Goyal
2016-12-05 12:55 ` [PATCH 6/8] crypto/dpaa2_sec: add sec procssing functionality Akhil Goyal
2016-12-21 12:39   ` De Lara Guarch, Pablo
2016-12-21 12:45     ` Akhil Goyal
2016-12-05 12:55 ` [PATCH 7/8] crypto/dpaa2_sec: statistics support Akhil Goyal
2016-12-05 12:55 ` [PATCH 8/8] app/test: add dpaa2_sec crypto test Akhil Goyal
2016-12-22 20:16 ` [PATCH v2 00/11] Introducing NXP DPAA2 SEC based cryptodev PMD Akhil Goyal
2016-12-22 20:16   ` [PATCH v2 01/11] librte_cryptodev: Add rte_device pointer in cryptodevice Akhil Goyal
2017-01-09 13:34     ` De Lara Guarch, Pablo
2017-01-12 12:26       ` Akhil Goyal
2016-12-22 20:16   ` [PATCH v2 02/11] crypto/dpaa2_sec: Run time assembler for Descriptor formation Akhil Goyal
2017-01-09 13:55     ` De Lara Guarch, Pablo
2017-01-12 12:28       ` Akhil Goyal
2016-12-22 20:16   ` [PATCH v2 03/11] crypto/dpaa2_sec/hw: Sample descriptors for NXP DPAA2 SEC operations Akhil Goyal
2016-12-22 20:16   ` [PATCH v2 04/11] doc: Adding NXP DPAA2_SEC in cryptodev Akhil Goyal
2016-12-22 20:16   ` [PATCH v2 05/11] lib: Add cryptodev type for DPAA2_SEC Akhil Goyal
2016-12-22 20:16   ` [PATCH v2 06/11] crypto: Add DPAA2_SEC PMD for NXP DPAA2 platform Akhil Goyal
2016-12-22 20:16   ` [PATCH v2 07/11] crypto/dpaa2_sec: Add DPAA2_SEC PMD into build system Akhil Goyal
2017-01-09 15:33     ` Thomas Monjalon
2017-01-12 12:35       ` Akhil Goyal
2016-12-22 20:16   ` [PATCH v2 08/11] crypto/dpaa2_sec: Enable DPAA2_SEC PMD in the configuration Akhil Goyal
2016-12-22 20:16   ` [PATCH v2 09/11] crypto/dpaa2_sec: statistics support Akhil Goyal
2016-12-22 20:16   ` [PATCH v2 10/11] app/test: add dpaa2_sec crypto test Akhil Goyal
2016-12-22 20:17   ` [PATCH v2 11/11] crypto/dpaa2_sec: Update MAINTAINERS entry for dpaa2_sec PMD Akhil Goyal
2017-01-09 13:31   ` [PATCH v2 00/11] Introducing NXP DPAA2 SEC based cryptodev PMD De Lara Guarch, Pablo
2017-01-20 14:04   ` [PATCH v3 00/10] Introducing NXP dpaa2_sec based cryptodev pmd akhil.goyal
2017-01-20 14:05     ` [PATCH v3 01/10] doc: add NXP dpaa2_sec in cryptodev akhil.goyal
2017-01-24 15:33       ` De Lara Guarch, Pablo
2017-01-31  5:48         ` Akhil Goyal
2017-01-20 14:05     ` [PATCH v3 02/10] cryptodev: add cryptodev type for dpaa2_sec akhil.goyal
2017-01-20 14:05     ` [PATCH v3 03/10] crypto/dpaa2_sec: add dpaa2_sec poll mode driver akhil.goyal
2017-01-20 12:32       ` Neil Horman
2017-01-20 13:17         ` Akhil Goyal
2017-01-20 19:31           ` Neil Horman
2017-01-24  6:34             ` Akhil Goyal
2017-01-24 15:06               ` Neil Horman
2017-01-20 14:05     ` [PATCH v3 04/10] crypto/dpaa2_sec: add run time assembler for descriptor formation akhil.goyal
2017-01-20 14:05     ` [PATCH v3 05/10] crypto/dpaa2_sec: add sample descriptors for NXP dpaa2_sec operations akhil.goyal
2017-01-20 14:05     ` [PATCH v3 06/10] crypto/dpaa2_sec: add crypto operation support akhil.goyal
2017-01-20 14:05     ` [PATCH v3 07/10] crypto/dpaa2_sec: statistics support akhil.goyal
2017-01-20 14:05     ` [PATCH v3 08/10] crypto/dpaa2_sec: update MAINTAINERS entry for dpaa2_sec pmd akhil.goyal
2017-01-20 14:05     ` [PATCH v3 09/10] app/test: add dpaa2_sec crypto performance test akhil.goyal
2017-01-20 14:05     ` [PATCH v3 10/10] app/test: add dpaa2_sec crypto functional test akhil.goyal
2017-03-03 19:36     ` [PATCH v4 00/12] Introducing NXP dpaa2_sec based cryptodev pmd Akhil Goyal
2017-03-03 14:25       ` Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 01/12] cryptodev: add cryptodev type for dpaa2_sec Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 01/12] cryptodev: add cryptodev type for dpaa2 sec Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 02/12] crypto/dpaa2_sec: add dpaa2_sec poll mode driver Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 02/12] crypto/dpaa2_sec: add dpaa2 sec " Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 03/12] crypto/dpaa2_sec: add mc dpseci object support Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 04/12] crypto/dpaa2_sec: add basic crypto operations Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 05/12] crypto/dpaa2_sec: add run time assembler for descriptor formation Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 06/12] crypto/dpaa2_sec: add sample descriptors for NXP dpaa2 sec ops Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 07/12] crypto/dpaa2_sec: add crypto operation support Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 08/12] crypto/dpaa2_sec: statistics support Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 09/12] doc: add NXP dpaa2_sec in cryptodev Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 09/12] doc: add NXP dpaa2 sec " Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 10/12] crypto/dpaa2_sec: update MAINTAINERS entry for dpaa2_sec pmd Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 10/12] maintainers: claim responsibility for dpaa2 sec pmd Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 11/12] app/test: add dpaa2_sec crypto performance test Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 11/12] app/test: add dpaa2 sec " Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 12/12] app/test: add dpaa2_sec crypto functional test Akhil Goyal
2017-03-03 19:36       ` [PATCH v4 12/12] app/test: add dpaa2 sec " Akhil Goyal
2017-03-03 19:49       ` [PATCH v5 00/12] Introducing NXP dpaa2_sec based cryptodev pmd Akhil Goyal
2017-03-03 19:49         ` [PATCH v5 01/12] cryptodev: add cryptodev type for dpaa2 sec Akhil Goyal
2017-03-03 19:49         ` [PATCH v5 02/12] crypto/dpaa2_sec: add dpaa2 sec poll mode driver Akhil Goyal
2017-03-21 15:07           ` De Lara Guarch, Pablo
2017-03-22  8:39             ` Akhil Goyal
2017-03-21 15:40           ` De Lara Guarch, Pablo
2017-03-03 19:49         ` [PATCH v5 03/12] crypto/dpaa2_sec: add mc dpseci object support Akhil Goyal
2017-03-21 16:00           ` De Lara Guarch, Pablo
2017-03-03 19:49         ` [PATCH v5 04/12] crypto/dpaa2_sec: add basic crypto operations Akhil Goyal
2017-03-03 19:49         ` [PATCH v5 05/12] crypto/dpaa2_sec: add run time assembler for descriptor formation Akhil Goyal
2017-03-03 19:49         ` [PATCH v5 06/12] crypto/dpaa2_sec: add sample descriptors for NXP dpaa2 sec ops Akhil Goyal
2017-03-03 19:49         ` [PATCH v5 07/12] crypto/dpaa2_sec: add crypto operation support Akhil Goyal
2017-03-03 19:49         ` [PATCH v5 08/12] crypto/dpaa2_sec: statistics support Akhil Goyal
2017-03-03 19:49         ` [PATCH v5 09/12] doc: add NXP dpaa2 sec in cryptodev Akhil Goyal
2017-03-08 18:17           ` Mcnamara, John
2017-03-22  9:50             ` Akhil Goyal
2017-03-22 16:30               ` De Lara Guarch, Pablo
2017-03-22 16:34                 ` Akhil Goyal
2017-03-03 19:49         ` [PATCH v5 10/12] maintainers: claim responsibility for dpaa2 sec pmd Akhil Goyal
2017-03-03 19:49         ` [PATCH v5 11/12] app/test: add dpaa2 sec crypto performance test Akhil Goyal
2017-03-03 19:49         ` [PATCH v5 12/12] app/test: add dpaa2 sec crypto functional test Akhil Goyal
2017-03-21 15:31           ` De Lara Guarch, Pablo
2017-03-24 21:57         ` [PATCH v6 00/13] Introducing NXP dpaa2_sec based cryptodev pmd akhil.goyal
2017-03-24 21:57           ` [PATCH v6 01/13] cryptodev: add cryptodev type for dpaa2 sec akhil.goyal
2017-03-24 21:57           ` [PATCH v6 02/13] crypto/dpaa2_sec: add dpaa2 sec poll mode driver akhil.goyal
2017-03-24 21:57           ` [PATCH v6 03/13] crypto/dpaa2_sec: add mc dpseci object support akhil.goyal
2017-03-24 21:57           ` [PATCH v6 04/13] crypto/dpaa2_sec: add basic crypto operations akhil.goyal
2017-03-27 13:58             ` De Lara Guarch, Pablo
2017-03-29 10:44               ` Akhil Goyal
2017-03-29 19:26                 ` De Lara Guarch, Pablo
2017-03-24 21:57           ` [PATCH v6 05/13] crypto/dpaa2_sec: add run time assembler for descriptor formation akhil.goyal
2017-03-24 21:57           ` [PATCH v6 06/13] crypto/dpaa2_sec: add sample descriptors for NXP dpaa2 sec ops akhil.goyal
2017-03-24 21:57           ` [PATCH v6 07/13] bus/fslmc: add packet frame list entry definitions akhil.goyal
2017-03-24 21:57           ` [PATCH v6 08/13] crypto/dpaa2_sec: add crypto operation support akhil.goyal
2017-03-24 21:57           ` [PATCH v6 09/13] crypto/dpaa2_sec: statistics support akhil.goyal
2017-03-24 21:57           ` [PATCH v6 10/13] doc: add NXP dpaa2 sec in cryptodev akhil.goyal
2017-04-03 15:53             ` Mcnamara, John
2017-03-24 21:57           ` [PATCH v6 11/13] maintainers: claim responsibility for dpaa2 sec pmd akhil.goyal
2017-03-24 21:57           ` [PATCH v6 12/13] test/test: add dpaa2 sec crypto performance test akhil.goyal
2017-03-24 21:57           ` [PATCH v6 13/13] test/test: add dpaa2 sec crypto functional test akhil.goyal
2017-04-10 12:30           ` [PATCH v7 00/13] Introducing NXP dpaa2_sec based cryptodev pmd akhil.goyal
2017-04-10 12:30             ` [PATCH v7 01/13] cryptodev: add cryptodev type for dpaa2 sec akhil.goyal
2017-04-10 12:30             ` [PATCH v7 02/13] crypto/dpaa2_sec: add dpaa2 sec poll mode driver akhil.goyal
2017-04-10 12:30             ` [PATCH v7 03/13] crypto/dpaa2_sec: add mc dpseci object support akhil.goyal
2017-04-10 12:30             ` [PATCH v7 04/13] crypto/dpaa2_sec: add basic crypto operations akhil.goyal
2017-04-10 12:31             ` [PATCH v7 05/13] crypto/dpaa2_sec: add run time assembler for descriptor formation akhil.goyal
2017-04-10 12:31             ` [PATCH v7 06/13] crypto/dpaa2_sec: add sample descriptors for NXP dpaa2 sec ops akhil.goyal
2017-04-10 12:31             ` [PATCH v7 07/13] bus/fslmc: add packet frame list entry definitions akhil.goyal
2017-04-10 12:31             ` akhil.goyal [this message]
2017-04-10 12:31             ` [PATCH v7 09/13] crypto/dpaa2_sec: statistics support akhil.goyal
2017-04-10 12:31             ` [PATCH v7 10/13] doc: add NXP dpaa2 sec in cryptodev akhil.goyal
2017-04-14 16:11               ` Mcnamara, John
2017-04-10 12:31             ` [PATCH v7 11/13] maintainers: claim responsibility for dpaa2 sec pmd akhil.goyal
2017-04-10 12:31             ` [PATCH v7 12/13] test/test: add dpaa2 sec crypto performance test akhil.goyal
2017-04-10 12:31             ` [PATCH v7 13/13] test/test: add dpaa2 sec crypto functional test akhil.goyal
2017-04-10 12:36             ` [PATCH v7 00/13] Introducing NXP dpaa2_sec based cryptodev pmd Akhil Goyal
2017-04-18 21:51             ` De Lara Guarch, Pablo
2017-04-19 15:37             ` [PATCH v8 " akhil.goyal
2017-04-19 15:37               ` [PATCH v8 01/13] cryptodev: add cryptodev type for dpaa2 sec akhil.goyal
2017-04-19 15:37               ` [PATCH v8 02/13] crypto/dpaa2_sec: add dpaa2 sec poll mode driver akhil.goyal
2017-04-19 17:32                 ` De Lara Guarch, Pablo
2017-04-19 15:37               ` [PATCH v8 03/13] maintainers: claim responsibility for dpaa2 sec pmd akhil.goyal
2017-04-19 15:37               ` [PATCH v8 04/13] test/test: add dpaa2 sec crypto performance test akhil.goyal
2017-04-19 15:37               ` [PATCH v8 05/13] test/test: add dpaa2 sec crypto functional test akhil.goyal
2017-04-19 15:37               ` [PATCH v8 06/13] crypto/dpaa2_sec: add mc dpseci object support akhil.goyal
2017-04-19 15:37               ` [PATCH v8 07/13] crypto/dpaa2_sec: add basic crypto operations akhil.goyal
2017-04-19 15:37               ` [PATCH v8 08/13] crypto/dpaa2_sec: add run time assembler for descriptor formation akhil.goyal
2017-04-19 15:37               ` [PATCH v8 09/13] crypto/dpaa2_sec: add sample descriptors for NXP dpaa2 sec ops akhil.goyal
2017-04-19 15:37               ` [PATCH v8 10/13] bus/fslmc: add packet frame list entry definitions akhil.goyal
2017-04-19 15:37               ` [PATCH v8 11/13] crypto/dpaa2_sec: add crypto operation support akhil.goyal
2017-04-19 17:36                 ` De Lara Guarch, Pablo
2017-04-19 17:47                   ` Hemant Agrawal
2017-04-19 21:29                     ` De Lara Guarch, Pablo
2017-04-19 15:37               ` [PATCH v8 12/13] crypto/dpaa2_sec: statistics support akhil.goyal
2017-04-19 15:37               ` [PATCH v8 13/13] doc: add NXP dpaa2 sec in cryptodev akhil.goyal
2017-04-20  5:44               ` [PATCH v9 00/13] Introducing NXP dpaa2_sec based cryptodev pmd akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 01/13] cryptodev: add cryptodev type for dpaa2 sec akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 02/13] crypto/dpaa2_sec: add dpaa2 sec poll mode driver akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 03/13] crypto/dpaa2_sec: add mc dpseci object support akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 04/13] crypto/dpaa2_sec: add basic crypto operations akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 05/13] crypto/dpaa2_sec: add run time assembler for descriptor formation akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 06/13] crypto/dpaa2_sec: add sample descriptors for NXP dpaa2 sec ops akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 07/13] bus/fslmc: add packet frame list entry definitions akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 08/13] crypto/dpaa2_sec: add crypto operation support akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 09/13] crypto/dpaa2_sec: statistics support akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 10/13] doc: add NXP dpaa2 sec in cryptodev akhil.goyal
2017-04-20  8:10                   ` De Lara Guarch, Pablo
2017-04-20  5:44                 ` [PATCH v9 11/13] maintainers: claim responsibility for dpaa2 sec pmd akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 12/13] test/test: add dpaa2 sec crypto performance test akhil.goyal
2017-04-20  5:44                 ` [PATCH v9 13/13] test/test: add dpaa2 sec crypto functional test akhil.goyal
2017-04-20  9:31                 ` [PATCH v9 00/13] Introducing NXP dpaa2_sec based cryptodev pmd De Lara Guarch, Pablo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170410123108.26305-9-akhil.goyal@nxp.com \
    --to=akhil.goyal@nxp.com \
    --cc=declan.doherty@intel.com \
    --cc=dev@dpdk.org \
    --cc=hemant.agrawal@nxp.com \
    --cc=john.mcnamara@intel.com \
    --cc=nhorman@tuxdriver.com \
    --cc=pablo.de.lara.guarch@intel.com \
    --cc=thomas.monjalon@6wind.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.