All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/5] crypto/dpaa2_sec optimization and feature update
@ 2017-06-29 20:48 akhil.goyal
  2017-06-29 20:49 ` [PATCH 1/5] crypto/dpaa2_sec: add per device mempool to store frame list entries akhil.goyal
                   ` (5 more replies)
  0 siblings, 6 replies; 21+ messages in thread
From: akhil.goyal @ 2017-06-29 20:48 UTC (permalink / raw)
  To: dev; +Cc: hemant.agrawal, pablo.de.lara.guarch, declan.doherty, Akhil Goyal

From: Akhil Goyal <akhil.goyal@nxp.com>

This patchset updates dpaa2_sec crypto driver with following:
- optimization in data path for memory allocation
- add support for additional AES algorithms like AES-GCM and AES-CTR
- Update test cases in test_cryptodev for all the supported test cases.
- Update documentation for supported algorithms

The patches are based on dpdk-crypto-next and are rebased over the
latest crypto restructuring changes by Pablo.
http://dpdk.org/ml/archives/dev/2017-June/069372.html


Akhil Goyal (5):
  crypto/dpaa2_sec: add per device mempool to store frame list entries
  crypto/dpaa2_sec: add descriptor support for gcm and ctr
  crypto/dpaa2_sec: add support for AES-GCM and CTR
  test/test: add test cases for gcm and ctr in dpaa2_sec test suite
  doc: update documentation for dpaa2_sec supported algos

 doc/guides/cryptodevs/dpaa2_sec.rst          |   9 +-
 doc/guides/cryptodevs/features/dpaa2_sec.ini |   6 +
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h      |   7 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c  | 384 ++++++++++++++++++++++++---
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h    | 100 ++++---
 drivers/crypto/dpaa2_sec/hw/desc/algo.h      | 226 +++++++++++++++-
 drivers/crypto/dpaa2_sec/hw/desc/ipsec.h     |  19 +-
 test/test/test_cryptodev.c                   |  94 ++++++-
 test/test/test_cryptodev_aes_test_vectors.h  |  78 ++++--
 test/test/test_cryptodev_blockcipher.c       |   1 +
 test/test/test_cryptodev_des_test_vectors.h  |  24 +-
 test/test/test_cryptodev_hash_test_vectors.h |  36 ++-
 12 files changed, 846 insertions(+), 138 deletions(-)

-- 
2.9.3

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 1/5] crypto/dpaa2_sec: add per device mempool to store frame list entries
  2017-06-29 20:48 [PATCH 0/5] crypto/dpaa2_sec optimization and feature update akhil.goyal
@ 2017-06-29 20:49 ` akhil.goyal
  2017-06-29 20:49 ` [PATCH 3/5] crypto/dpaa2_sec: add support for AES-GCM and CTR akhil.goyal
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 21+ messages in thread
From: akhil.goyal @ 2017-06-29 20:49 UTC (permalink / raw)
  To: dev; +Cc: hemant.agrawal, pablo.de.lara.guarch, declan.doherty, Akhil Goyal

From: Akhil Goyal <akhil.goyal@nxp.com>

rte_malloc uses common memory area for all cores.

Now rte_malloc are replaced by per device mempool to allocate
space for FLE. This removes contention and improves performance.

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h     |  7 +++
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 79 ++++++++++++++++++++++-------
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |  2 +
 3 files changed, 70 insertions(+), 18 deletions(-)

diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 429eaee..16cadf5 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -182,6 +182,13 @@ struct qbman_fle {
 	fle->addr_lo = lower_32_bits((uint64_t)addr);     \
 	fle->addr_hi = upper_32_bits((uint64_t)addr);	  \
 } while (0)
+#define DPAA2_GET_FLE_CTXT(fle)					\
+	(uint64_t)((((uint64_t)((fle)->reserved[1])) << 32) + \
+			(fle)->reserved[0])
+#define DPAA2_FLE_SAVE_CTXT(fle, addr) do { \
+	fle->reserved[0] = lower_32_bits((uint64_t)addr);     \
+	fle->reserved[1] = upper_32_bits((uint64_t)addr);	  \
+} while (0)
 #define DPAA2_SET_FLE_OFFSET(fle, offset) \
 	((fle)->fin_bpid_offset |= (uint32_t)(offset) << 16)
 #define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (uint64_t)bpid)
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 3620751..dbdaf46 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -71,6 +71,13 @@
 #define NO_PREFETCH 0
 #define TDES_CBC_IV_LEN 8
 #define AES_CBC_IV_LEN 16
+#define AES_CTR_IV_LEN 16
+#define AES_GCM_IV_LEN 12
+/* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
+#define FLE_POOL_NUM_BUFS	32000
+#define FLE_POOL_BUF_SIZE	256
+#define FLE_POOL_CACHE_SIZE	512
+
 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
 
 static inline int
@@ -84,9 +91,8 @@ build_authenc_fd(dpaa2_sec_session *sess,
 	struct sec_flow_context *flc;
 	uint32_t auth_only_len = sym_op->auth.data.length -
 				sym_op->cipher.data.length;
-	int icv_len = sess->digest_length;
+	int icv_len = sess->digest_length, retval;
 	uint8_t *old_icv;
-	uint32_t mem_len = (7 * sizeof(struct qbman_fle)) + icv_len;
 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
 			sess->iv.offset);
 
@@ -98,12 +104,14 @@ build_authenc_fd(dpaa2_sec_session *sess,
 	 * to get the MBUF Addr from the previous FLE.
 	 * We can have a better approach to use the inline Mbuf
 	 */
-	fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
-	if (!fle) {
+	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+	if (retval) {
 		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
 		return -1;
 	}
+	memset(fle, 0, FLE_POOL_BUF_SIZE);
 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	DPAA2_FLE_SAVE_CTXT(fle, priv);
 	fle = fle + 1;
 	sge = fle + 2;
 	if (likely(bpid < MAX_BPID)) {
@@ -214,21 +222,19 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 {
 	struct rte_crypto_sym_op *sym_op = op->sym;
 	struct qbman_fle *fle, *sge;
-	uint32_t mem_len = (sess->dir == DIR_ENC) ?
-			   (3 * sizeof(struct qbman_fle)) :
-			   (5 * sizeof(struct qbman_fle) +
-			    sess->digest_length);
 	struct sec_flow_context *flc;
 	struct ctxt_priv *priv = sess->ctxt;
 	uint8_t *old_digest;
+	int retval;
 
 	PMD_INIT_FUNC_TRACE();
 
-	fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
-	if (!fle) {
-		RTE_LOG(ERR, PMD, "Memory alloc failed for FLE\n");
+	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+	if (retval) {
+		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
 		return -1;
 	}
+	memset(fle, 0, FLE_POOL_BUF_SIZE);
 	/* TODO we are using the first FLE entry to store Mbuf.
 	 * Currently we donot know which FLE has the mbuf stored.
 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
@@ -236,6 +242,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 	 * We can have a better approach to use the inline Mbuf
 	 */
 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	DPAA2_FLE_SAVE_CTXT(fle, priv);
 	fle = fle + 1;
 
 	if (likely(bpid < MAX_BPID)) {
@@ -306,7 +313,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 {
 	struct rte_crypto_sym_op *sym_op = op->sym;
 	struct qbman_fle *fle, *sge;
-	uint32_t mem_len = (5 * sizeof(struct qbman_fle));
+	int retval;
 	struct sec_flow_context *flc;
 	struct ctxt_priv *priv = sess->ctxt;
 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
@@ -314,12 +321,12 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 
 	PMD_INIT_FUNC_TRACE();
 
-	/* todo - we can use some mempool to avoid malloc here */
-	fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
-	if (!fle) {
+	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+	if (retval) {
 		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
 		return -1;
 	}
+	memset(fle, 0, FLE_POOL_BUF_SIZE);
 	/* TODO we are using the first FLE entry to store Mbuf.
 	 * Currently we donot know which FLE has the mbuf stored.
 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
@@ -327,6 +334,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 	 * We can have a better approach to use the inline Mbuf
 	 */
 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	DPAA2_FLE_SAVE_CTXT(fle, priv);
 	fle = fle + 1;
 	sge = fle + 2;
 
@@ -499,6 +507,7 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
 {
 	struct qbman_fle *fle;
 	struct rte_crypto_op *op;
+	struct ctxt_priv *priv;
 
 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
 
@@ -534,7 +543,8 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
 		   DPAA2_GET_FD_LEN(fd));
 
 	/* free the fle memory */
-	rte_free(fle - 1);
+	priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
+	rte_mempool_put(priv->fle_pool, (void *)(fle - 1));
 
 	return op;
 }
@@ -764,6 +774,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
 		      dpaa2_sec_session *session)
 {
 	struct dpaa2_sec_cipher_ctxt *ctxt = &session->ext_params.cipher_ctxt;
+	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
 	struct alginfo cipherdata;
 	int bufsize, i;
 	struct ctxt_priv *priv;
@@ -780,6 +791,8 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
 		return -1;
 	}
 
+	priv->fle_pool = dev_priv->fle_pool;
+
 	flc = &priv->flc_desc[0].flc;
 
 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
@@ -875,8 +888,9 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
 		    dpaa2_sec_session *session)
 {
 	struct dpaa2_sec_auth_ctxt *ctxt = &session->ext_params.auth_ctxt;
+	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
 	struct alginfo authdata;
-	unsigned int bufsize;
+	unsigned int bufsize, i;
 	struct ctxt_priv *priv;
 	struct sec_flow_context *flc;
 
@@ -892,6 +906,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
 		return -1;
 	}
 
+	priv->fle_pool = dev_priv->fle_pool;
 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
 
 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
@@ -980,6 +995,9 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
 			(uint64_t)&(((struct dpaa2_sec_qp *)
 			dev->data->queue_pairs[0])->rx_vq));
 	session->ctxt = priv;
+	for (i = 0; i < bufsize; i++)
+		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+			    i, priv->flc_desc[0].desc[i]);
 
 	return 0;
 
@@ -995,8 +1013,9 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 		    dpaa2_sec_session *session)
 {
 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
 	struct alginfo authdata, cipherdata;
-	unsigned int bufsize;
+	unsigned int bufsize, i;
 	struct ctxt_priv *priv;
 	struct sec_flow_context *flc;
 	struct rte_crypto_cipher_xform *cipher_xform;
@@ -1032,6 +1051,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 		return -1;
 	}
 
+	priv->fle_pool = dev_priv->fle_pool;
 	flc = &priv->flc_desc[0].flc;
 
 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
@@ -1199,6 +1219,10 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 			(uint64_t)&(((struct dpaa2_sec_qp *)
 			dev->data->queue_pairs[0])->rx_vq));
 	session->ctxt = priv;
+	for (i = 0; i < bufsize; i++)
+		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+			    i, priv->flc_desc[DESC_INITFINAL].desc[i]);
+
 
 	return 0;
 
@@ -1496,6 +1520,10 @@ static struct rte_cryptodev_ops crypto_ops = {
 static int
 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
 {
+	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
+
+	rte_mempool_free(internals->fle_pool);
+
 	PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
 		     dev->data->name, rte_socket_id());
 
@@ -1512,6 +1540,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 	uint16_t token;
 	struct dpseci_attr attr;
 	int retcode, hw_id;
+	char str[20];
 
 	PMD_INIT_FUNC_TRACE();
 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
@@ -1572,6 +1601,20 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 	internals->hw = dpseci;
 	internals->token = token;
 
+	sprintf(str, "fle_pool_%d", cryptodev->data->dev_id);
+	internals->fle_pool = rte_mempool_create((const char *)str,
+			FLE_POOL_NUM_BUFS,
+			FLE_POOL_BUF_SIZE,
+			FLE_POOL_CACHE_SIZE, 0,
+			NULL, NULL, NULL, NULL,
+			SOCKET_ID_ANY, 0);
+	if (!internals->fle_pool) {
+		RTE_LOG(ERR, PMD, "%s create failed", str);
+		goto init_error;
+	} else
+		RTE_LOG(INFO, PMD, "%s created: %p\n", str,
+				internals->fle_pool);
+
 	PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
 	return 0;
 
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index eda2eec..b4dfe24 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -40,6 +40,7 @@
 struct dpaa2_sec_dev_private {
 	void *mc_portal; /**< MC Portal for configuring this device */
 	void *hw; /**< Hardware handle for this device.Used by NADK framework */
+	struct rte_mempool *fle_pool; /* per device memory pool for FLE */
 	int32_t hw_id; /**< An unique ID of this device instance */
 	int32_t vfio_fd; /**< File descriptor received via VFIO */
 	uint16_t token; /**< Token required by DPxxx objects */
@@ -128,6 +129,7 @@ struct sec_flc_desc {
 };
 
 struct ctxt_priv {
+	struct rte_mempool *fle_pool; /* per device memory pool for FLE */
 	struct sec_flc_desc flc_desc[0];
 };
 
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 3/5] crypto/dpaa2_sec: add support for AES-GCM and CTR
  2017-06-29 20:48 [PATCH 0/5] crypto/dpaa2_sec optimization and feature update akhil.goyal
  2017-06-29 20:49 ` [PATCH 1/5] crypto/dpaa2_sec: add per device mempool to store frame list entries akhil.goyal
@ 2017-06-29 20:49 ` akhil.goyal
  2017-06-29 20:49 ` [PATCH 4/5] test/test: add test cases for gcm and ctr in dpaa2_sec test suite akhil.goyal
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 21+ messages in thread
From: akhil.goyal @ 2017-06-29 20:49 UTC (permalink / raw)
  To: dev; +Cc: hemant.agrawal, pablo.de.lara.guarch, declan.doherty, Akhil Goyal

From: Akhil Goyal <akhil.goyal@nxp.com>

AES-GCM support is added as per the AEAD type of crypto
operations.
Support for AES-CTR is also added.

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 315 ++++++++++++++++++++++++++--
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |  98 ++++++---
 2 files changed, 359 insertions(+), 54 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index dbdaf46..997956f 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -69,10 +69,6 @@
 #define FSL_MC_DPSECI_DEVID     3
 
 #define NO_PREFETCH 0
-#define TDES_CBC_IV_LEN 8
-#define AES_CBC_IV_LEN 16
-#define AES_CTR_IV_LEN 16
-#define AES_GCM_IV_LEN 12
 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
 #define FLE_POOL_NUM_BUFS	32000
 #define FLE_POOL_BUF_SIZE	256
@@ -81,6 +77,148 @@
 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
 
 static inline int
+build_authenc_gcm_fd(dpaa2_sec_session *sess,
+		     struct rte_crypto_op *op,
+		     struct qbman_fd *fd, uint16_t bpid)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge;
+	struct sec_flow_context *flc;
+	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
+	int icv_len = sess->digest_length, retval;
+	uint8_t *old_icv;
+	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+			sess->iv.offset);
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* TODO we are using the first FLE entry to store Mbuf and session ctxt.
+	   Currently we donot know which FLE has the mbuf stored.
+	   So while retreiving we can go back 1 FLE from the FD -ADDR
+	   to get the MBUF Addr from the previous FLE.
+	   We can have a better approach to use the inline Mbuf*/
+	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+	if (retval) {
+		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+		return -1;
+	}
+	memset(fle, 0, FLE_POOL_BUF_SIZE);
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	DPAA2_FLE_SAVE_CTXT(fle, priv);
+	fle = fle + 1;
+	sge = fle + 2;
+	if (likely(bpid < MAX_BPID)) {
+		DPAA2_SET_FD_BPID(fd, bpid);
+		DPAA2_SET_FLE_BPID(fle, bpid);
+		DPAA2_SET_FLE_BPID(fle + 1, bpid);
+		DPAA2_SET_FLE_BPID(sge, bpid);
+		DPAA2_SET_FLE_BPID(sge + 1, bpid);
+		DPAA2_SET_FLE_BPID(sge + 2, bpid);
+		DPAA2_SET_FLE_BPID(sge + 3, bpid);
+	} else {
+		DPAA2_SET_FD_IVP(fd);
+		DPAA2_SET_FLE_IVP(fle);
+		DPAA2_SET_FLE_IVP((fle + 1));
+		DPAA2_SET_FLE_IVP(sge);
+		DPAA2_SET_FLE_IVP((sge + 1));
+		DPAA2_SET_FLE_IVP((sge + 2));
+		DPAA2_SET_FLE_IVP((sge + 3));
+	}
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
+		   "iv-len=%d data_off: 0x%x\n",
+		   sym_op->aead.data.offset,
+		   sym_op->aead.data.length,
+		   sym_op->aead.digest.length,
+		   sess->iv.length,
+		   sym_op->m_src->data_off);
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+	if (auth_only_len)
+		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+	fle->length = (sess->dir == DIR_ENC) ?
+			(sym_op->aead.data.length + icv_len + auth_only_len) :
+			sym_op->aead.data.length + auth_only_len;
+
+	DPAA2_SET_FLE_SG_EXT(fle);
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+				sym_op->m_src->data_off - auth_only_len);
+	sge->length = sym_op->aead.data.length + auth_only_len;
+
+	if (sess->dir == DIR_ENC) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge,
+				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
+		sge->length = sess->digest_length;
+		DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
+					sess->iv.length + auth_only_len));
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	sge++;
+	fle++;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(fle);
+	DPAA2_SET_FLE_FIN(fle);
+	fle->length = (sess->dir == DIR_ENC) ?
+		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
+		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
+		 sess->digest_length);
+
+	/* Configure Input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
+	sge->length = sess->iv.length;
+	sge++;
+	if (auth_only_len) {
+		DPAA2_SET_FLE_ADDR(sge,
+				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
+		sge->length = auth_only_len;
+		DPAA2_SET_FLE_BPID(sge, bpid);
+		sge++;
+	}
+
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+				sym_op->m_src->data_off);
+	sge->length = sym_op->aead.data.length;
+	if (sess->dir == DIR_DEC) {
+		sge++;
+		old_icv = (uint8_t *)(sge + 1);
+		memcpy(old_icv,	sym_op->aead.digest.data,
+		       sess->digest_length);
+		memset(sym_op->aead.digest.data, 0, sess->digest_length);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+		sge->length = sess->digest_length;
+		DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
+				 sess->digest_length +
+				 sess->iv.length +
+				 auth_only_len));
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	if (auth_only_len) {
+		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+	}
+
+	return 0;
+}
+
+static inline int
 build_authenc_fd(dpaa2_sec_session *sess,
 		 struct rte_crypto_op *op,
 		 struct qbman_fd *fd, uint16_t bpid)
@@ -418,6 +556,9 @@ build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 	case DPAA2_SEC_AUTH:
 		ret = build_auth_fd(sess, op, fd, bpid);
 		break;
+	case DPAA2_SEC_AEAD:
+		ret = build_authenc_gcm_fd(sess, op, fd, bpid);
+		break;
 	case DPAA2_SEC_CIPHER_HASH:
 		ret = build_authenc_fd(sess, op, fd, bpid);
 		break;
@@ -773,7 +914,6 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
 		      struct rte_crypto_sym_xform *xform,
 		      dpaa2_sec_session *session)
 {
-	struct dpaa2_sec_cipher_ctxt *ctxt = &session->ext_params.cipher_ctxt;
 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
 	struct alginfo cipherdata;
 	int bufsize, i;
@@ -820,15 +960,17 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
 		cipherdata.algmode = OP_ALG_AAI_CBC;
 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
-		ctxt->iv.length = AES_CBC_IV_LEN;
 		break;
 	case RTE_CRYPTO_CIPHER_3DES_CBC:
 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
 		cipherdata.algmode = OP_ALG_AAI_CBC;
 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
-		ctxt->iv.length = TDES_CBC_IV_LEN;
 		break;
 	case RTE_CRYPTO_CIPHER_AES_CTR:
+		cipherdata.algtype = OP_ALG_ALGSEL_AES;
+		cipherdata.algmode = OP_ALG_AAI_CTR;
+		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+		break;
 	case RTE_CRYPTO_CIPHER_3DES_CTR:
 	case RTE_CRYPTO_CIPHER_AES_ECB:
 	case RTE_CRYPTO_CIPHER_3DES_ECB:
@@ -851,8 +993,8 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
 				DIR_ENC : DIR_DEC;
 
 	bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
-					&cipherdata, NULL, ctxt->iv.length,
-			session->dir);
+					&cipherdata, NULL, session->iv.length,
+					session->dir);
 	if (bufsize < 0) {
 		RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n");
 		goto error_out;
@@ -887,7 +1029,6 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
 		    struct rte_crypto_sym_xform *xform,
 		    dpaa2_sec_session *session)
 {
-	struct dpaa2_sec_auth_ctxt *ctxt = &session->ext_params.auth_ctxt;
 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
 	struct alginfo authdata;
 	unsigned int bufsize, i;
@@ -985,7 +1126,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
 
 	bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
 				   1, 0, &authdata, !session->dir,
-				   ctxt->trunc_len);
+				   session->digest_length);
 
 	flc->word1_sdl = (uint8_t)bufsize;
 	flc->word2_rflc_31_0 = lower_32_bits(
@@ -997,7 +1138,8 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
 	session->ctxt = priv;
 	for (i = 0; i < bufsize; i++)
 		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
-			    i, priv->flc_desc[0].desc[i]);
+			    i, priv->flc_desc[DESC_INITFINAL].desc[i]);
+
 
 	return 0;
 
@@ -1014,6 +1156,126 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 {
 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+	struct alginfo aeaddata;
+	unsigned int bufsize, i;
+	struct ctxt_priv *priv;
+	struct sec_flow_context *flc;
+	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+	int err;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* Set IV parameters */
+	session->iv.offset = aead_xform->iv.offset;
+	session->iv.length = aead_xform->iv.length;
+	session->ctxt_type = DPAA2_SEC_AEAD;
+
+	/* For SEC AEAD only one descriptor is required */
+	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+			RTE_CACHE_LINE_SIZE);
+	if (priv == NULL) {
+		RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+		return -1;
+	}
+
+	priv->fle_pool = dev_priv->fle_pool;
+	flc = &priv->flc_desc[0].flc;
+
+	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
+					       RTE_CACHE_LINE_SIZE);
+	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
+		RTE_LOG(ERR, PMD, "No Memory for aead key");
+		rte_free(priv);
+		return -1;
+	}
+	memcpy(session->aead_key.data, aead_xform->key.data,
+	       aead_xform->key.length);
+
+	session->digest_length = aead_xform->digest_length;
+	session->aead_key.length = aead_xform->key.length;
+	ctxt->auth_only_len = aead_xform->add_auth_data_length;
+
+	aeaddata.key = (uint64_t)session->aead_key.data;
+	aeaddata.keylen = session->aead_key.length;
+	aeaddata.key_enc_flags = 0;
+	aeaddata.key_type = RTA_DATA_IMM;
+
+	switch (aead_xform->algo) {
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		aeaddata.algtype = OP_ALG_ALGSEL_AES;
+		aeaddata.algmode = OP_ALG_AAI_GCM;
+		session->cipher_alg = RTE_CRYPTO_AEAD_AES_GCM;
+		break;
+	case RTE_CRYPTO_AEAD_AES_CCM:
+		RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u",
+			aead_xform->algo);
+		goto error_out;
+	default:
+		RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n",
+			aead_xform->algo);
+		goto error_out;
+	}
+	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+				DIR_ENC : DIR_DEC;
+
+	priv->flc_desc[0].desc[0] = aeaddata.keylen;
+	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+			       MIN_JOB_DESC_SIZE,
+			       (unsigned int *)priv->flc_desc[0].desc,
+			       &priv->flc_desc[0].desc[1], 1);
+
+	if (err < 0) {
+		PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths");
+		goto error_out;
+	}
+	if (priv->flc_desc[0].desc[1] & 1) {
+		aeaddata.key_type = RTA_DATA_IMM;
+	} else {
+		aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
+		aeaddata.key_type = RTA_DATA_PTR;
+	}
+	priv->flc_desc[0].desc[0] = 0;
+	priv->flc_desc[0].desc[1] = 0;
+
+	if (session->dir == DIR_ENC)
+		bufsize = cnstr_shdsc_gcm_encap(
+				priv->flc_desc[0].desc, 1, 0,
+				&aeaddata, session->iv.length,
+				session->digest_length);
+	else
+		bufsize = cnstr_shdsc_gcm_decap(
+				priv->flc_desc[0].desc, 1, 0,
+				&aeaddata, session->iv.length,
+				session->digest_length);
+	flc->word1_sdl = (uint8_t)bufsize;
+	flc->word2_rflc_31_0 = lower_32_bits(
+			(uint64_t)&(((struct dpaa2_sec_qp *)
+			dev->data->queue_pairs[0])->rx_vq));
+	flc->word3_rflc_63_32 = upper_32_bits(
+			(uint64_t)&(((struct dpaa2_sec_qp *)
+			dev->data->queue_pairs[0])->rx_vq));
+	session->ctxt = priv;
+	for (i = 0; i < bufsize; i++)
+		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+			    i, priv->flc_desc[0].desc[i]);
+
+	return 0;
+
+error_out:
+	rte_free(session->aead_key.data);
+	rte_free(priv);
+	return -1;
+}
+
+
+static int
+dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
+		    struct rte_crypto_sym_xform *xform,
+		    dpaa2_sec_session *session)
+{
+	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
 	struct alginfo authdata, cipherdata;
 	unsigned int bufsize, i;
 	struct ctxt_priv *priv;
@@ -1076,7 +1338,6 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 	memcpy(session->auth_key.data, auth_xform->key.data,
 	       auth_xform->key.length);
 
-	ctxt->trunc_len = auth_xform->digest_length;
 	authdata.key = (uint64_t)session->auth_key.data;
 	authdata.keylen = session->auth_key.length;
 	authdata.key_enc_flags = 0;
@@ -1147,19 +1408,21 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
 		cipherdata.algmode = OP_ALG_AAI_CBC;
 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
-		ctxt->iv.length = AES_CBC_IV_LEN;
 		break;
 	case RTE_CRYPTO_CIPHER_3DES_CBC:
 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
 		cipherdata.algmode = OP_ALG_AAI_CBC;
 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
-		ctxt->iv.length = TDES_CBC_IV_LEN;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		cipherdata.algtype = OP_ALG_ALGSEL_AES;
+		cipherdata.algmode = OP_ALG_AAI_CTR;
+		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
 		break;
 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
 	case RTE_CRYPTO_CIPHER_NULL:
 	case RTE_CRYPTO_CIPHER_3DES_ECB:
 	case RTE_CRYPTO_CIPHER_AES_ECB:
-	case RTE_CRYPTO_CIPHER_AES_CTR:
 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
 		RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
 			cipher_xform->algo);
@@ -1202,9 +1465,9 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 	if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
 		bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
 					      0, &cipherdata, &authdata,
-					      ctxt->iv.length,
+					      session->iv.length,
 					      ctxt->auth_only_len,
-					      ctxt->trunc_len,
+					      session->digest_length,
 					      session->dir);
 	} else {
 		RTE_LOG(ERR, PMD, "Hash before cipher not supported");
@@ -1221,8 +1484,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 	session->ctxt = priv;
 	for (i = 0; i < bufsize; i++)
 		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
-			    i, priv->flc_desc[DESC_INITFINAL].desc[i]);
-
+			    i, priv->flc_desc[0].desc[i]);
 
 	return 0;
 
@@ -1264,13 +1526,19 @@ dpaa2_sec_session_configure(struct rte_cryptodev *dev,
 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
 		session->ext_params.aead_ctxt.auth_cipher_text = true;
-		dpaa2_sec_aead_init(dev, xform, session);
+		dpaa2_sec_aead_chain_init(dev, xform, session);
 
 	/* Authenticate then Cipher */
 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
 		session->ext_params.aead_ctxt.auth_cipher_text = false;
+		dpaa2_sec_aead_chain_init(dev, xform, session);
+
+	/* AEAD operation for AES-GCM kind of Algorithms */
+	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+		   xform->next == NULL) {
 		dpaa2_sec_aead_init(dev, xform, session);
+
 	} else {
 		RTE_LOG(ERR, PMD, "Invalid crypto type");
 		return NULL;
@@ -1300,7 +1568,7 @@ dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
 {
 	PMD_INIT_FUNC_TRACE();
 
-	return -ENOTSUP;
+	return 0;
 }
 
 static int
@@ -1626,7 +1894,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 }
 
 static int
-cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
+cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
 			  struct rte_dpaa2_device *dpaa2_dev)
 {
 	struct rte_cryptodev *cryptodev;
@@ -1654,6 +1922,7 @@ cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
 
 	dpaa2_dev->cryptodev = cryptodev;
 	cryptodev->device = &dpaa2_dev->device;
+	cryptodev->device->driver = &dpaa2_drv->driver;
 
 	/* init user callbacks */
 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index b4dfe24..a477404 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -137,6 +137,7 @@ enum dpaa2_sec_op_type {
 	DPAA2_SEC_NONE,  /*!< No Cipher operations*/
 	DPAA2_SEC_CIPHER,/*!< CIPHER operations */
 	DPAA2_SEC_AUTH,  /*!< Authentication Operations */
+	DPAA2_SEC_AEAD,  /*!< AEAD (AES-GCM/CCM) type operations */
 	DPAA2_SEC_CIPHER_HASH,  /*!< Authenticated Encryption with
 				 * associated data
 				 */
@@ -149,30 +150,9 @@ enum dpaa2_sec_op_type {
 	DPAA2_SEC_MAX
 };
 
-struct dpaa2_sec_cipher_ctxt {
-	struct {
-		uint8_t *data;
-		uint16_t length;
-	} iv;	/**< Initialisation vector parameters */
-	uint8_t *init_counter;  /*!< Set initial counter for CTR mode */
-};
-
-struct dpaa2_sec_auth_ctxt {
-	uint8_t trunc_len;              /*!< Length for output ICV, should
-					 * be 0 if no truncation required
-					 */
-};
-
 struct dpaa2_sec_aead_ctxt {
-	struct {
-		uint8_t *data;
-		uint16_t length;
-	} iv;	/**< Initialisation vector parameters */
 	uint16_t auth_only_len; /*!< Length of data for Auth only */
 	uint8_t auth_cipher_text;       /**< Authenticate/cipher ordering */
-	uint8_t trunc_len;              /*!< Length for output ICV, should
-					 * be 0 if no truncation required
-					 */
 };
 
 typedef struct dpaa2_sec_session_entry {
@@ -181,14 +161,22 @@ typedef struct dpaa2_sec_session_entry {
 	uint8_t dir;         /*!< Operation Direction */
 	enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
 	enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
-	struct {
-		uint8_t *data;	/**< pointer to key data */
-		size_t length;	/**< key length in bytes */
-	} cipher_key;
-	struct {
-		uint8_t *data;	/**< pointer to key data */
-		size_t length;	/**< key length in bytes */
-	} auth_key;
+	union {
+		struct {
+			uint8_t *data;	/**< pointer to key data */
+			size_t length;	/**< key length in bytes */
+		} aead_key;
+		struct {
+			struct {
+				uint8_t *data;	/**< pointer to key data */
+				size_t length;	/**< key length in bytes */
+			} cipher_key;
+			struct {
+				uint8_t *data;	/**< pointer to key data */
+				size_t length;	/**< key length in bytes */
+			} auth_key;
+		};
+	};
 	struct {
 		uint16_t length; /**< IV length in bytes */
 		uint16_t offset; /**< IV offset in bytes */
@@ -196,8 +184,6 @@ typedef struct dpaa2_sec_session_entry {
 	uint16_t digest_length;
 	uint8_t status;
 	union {
-		struct dpaa2_sec_cipher_ctxt cipher_ctxt;
-		struct dpaa2_sec_auth_ctxt auth_ctxt;
 		struct dpaa2_sec_aead_ctxt aead_ctxt;
 	} ext_params;
 } dpaa2_sec_session;
@@ -335,6 +321,36 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 			}, }
 		}, }
 	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
 	{	/* AES CBC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -355,6 +371,26 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 			}, }
 		}, }
 	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
 	{	/* 3DES CBC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 4/5] test/test: add test cases for gcm and ctr in dpaa2_sec test suite
  2017-06-29 20:48 [PATCH 0/5] crypto/dpaa2_sec optimization and feature update akhil.goyal
  2017-06-29 20:49 ` [PATCH 1/5] crypto/dpaa2_sec: add per device mempool to store frame list entries akhil.goyal
  2017-06-29 20:49 ` [PATCH 3/5] crypto/dpaa2_sec: add support for AES-GCM and CTR akhil.goyal
@ 2017-06-29 20:49 ` akhil.goyal
  2017-06-29 20:49 ` [PATCH 5/5] doc: update documentation for dpaa2_sec supported algos akhil.goyal
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 21+ messages in thread
From: akhil.goyal @ 2017-06-29 20:49 UTC (permalink / raw)
  To: dev; +Cc: hemant.agrawal, pablo.de.lara.guarch, declan.doherty, Akhil Goyal

From: Akhil Goyal <akhil.goyal@nxp.com>

dpaa2_sec test cases updated for various supported
crypto alogorithms.

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 test/test/test_cryptodev.c                   | 94 ++++++++++++++++++++++++----
 test/test/test_cryptodev_aes_test_vectors.h  | 78 +++++++++++++++--------
 test/test/test_cryptodev_blockcipher.c       |  1 +
 test/test/test_cryptodev_des_test_vectors.h  | 24 ++++---
 test/test/test_cryptodev_hash_test_vectors.h | 36 +++++++----
 5 files changed, 176 insertions(+), 57 deletions(-)

diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index db0999e..fe6c8dd 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -1738,6 +1738,22 @@ test_AES_cipheronly_dpaa2_sec_all(void)
 }
 
 static int
+test_authonly_dpaa2_sec_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool, ts_params->valid_devs[0],
+		RTE_CRYPTODEV_DPAA2_SEC_PMD,
+		BLKCIPHER_AUTHONLY_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
 test_authonly_openssl_all(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -8280,28 +8296,84 @@ static struct unit_test_suite cryptodev_dpaa2_sec_testsuite  = {
 	.teardown = testsuite_teardown,
 	.unit_test_cases = {
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_device_configure_invalid_dev_id),
+			test_device_configure_invalid_dev_id),
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_multi_session),
+			test_multi_session),
 
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_AES_chain_dpaa2_sec_all),
+			test_AES_chain_dpaa2_sec_all),
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_3DES_chain_dpaa2_sec_all),
+			test_3DES_chain_dpaa2_sec_all),
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_AES_cipheronly_dpaa2_sec_all),
+			test_AES_cipheronly_dpaa2_sec_all),
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_3DES_cipheronly_dpaa2_sec_all),
+			test_3DES_cipheronly_dpaa2_sec_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_authonly_dpaa2_sec_all),
 
-		/** HMAC_MD5 Authentication */
+		/** AES GCM Authenticated Encryption */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_1),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_2),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_3),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_5),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_6),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_7),
+
+		/** AES GCM Authenticated Decryption */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_1),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_2),
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_MD5_HMAC_generate_case_1),
+			test_mb_AES_GCM_authenticated_decryption_test_case_3),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_5),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_6),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_7),
+
+		/** AES GCM Authenticated Encryption 256 bits key */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_encryption_test_case_256_1),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_encryption_test_case_256_2),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_encryption_test_case_256_3),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_encryption_test_case_256_4),
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_MD5_HMAC_verify_case_1),
+			test_mb_AES_GCM_auth_encryption_test_case_256_5),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_encryption_test_case_256_6),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_encryption_test_case_256_7),
+
+		/** AES GCM Authenticated Decryption 256 bits key */
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_MD5_HMAC_generate_case_2),
+			test_mb_AES_GCM_auth_decryption_test_case_256_1),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_decryption_test_case_256_2),
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_MD5_HMAC_verify_case_2),
+			test_mb_AES_GCM_auth_decryption_test_case_256_3),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_decryption_test_case_256_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_decryption_test_case_256_5),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_decryption_test_case_256_6),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_decryption_test_case_256_7),
 
 		TEST_CASES_END() /**< NULL terminate unit test array */
 	}
diff --git a/test/test/test_cryptodev_aes_test_vectors.h b/test/test/test_cryptodev_aes_test_vectors.h
index 07d6eab..f692d57 100644
--- a/test/test/test_cryptodev_aes_test_vectors.h
+++ b/test/test/test_cryptodev_aes_test_vectors.h
@@ -1028,7 +1028,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CTR HMAC-SHA1 Decryption Digest "
@@ -1038,7 +1039,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-192-CTR XCBC Encryption Digest",
@@ -1074,7 +1076,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-256-CTR HMAC-SHA1 Decryption Digest "
@@ -1084,7 +1087,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest",
@@ -1094,7 +1098,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1124,7 +1129,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
@@ -1141,7 +1147,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA256 Encryption Digest "
@@ -1159,7 +1166,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA256 Decryption Digest "
@@ -1175,7 +1183,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
@@ -1205,7 +1214,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA512 Decryption Digest "
@@ -1262,7 +1272,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA224 Decryption Digest "
@@ -1272,7 +1283,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA384 Encryption Digest",
@@ -1281,7 +1293,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA384 Decryption Digest "
@@ -1291,7 +1304,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1322,7 +1336,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC Decryption",
@@ -1331,7 +1346,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-192-CBC Encryption",
@@ -1340,7 +1356,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-192-CBC Encryption Scater gather",
@@ -1357,7 +1374,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-256-CBC Encryption",
@@ -1366,7 +1384,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-256-CBC Decryption",
@@ -1375,7 +1394,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-256-CBC OOP Encryption",
@@ -1400,7 +1420,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CTR Decryption",
@@ -1409,7 +1430,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-192-CTR Encryption",
@@ -1418,7 +1440,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-192-CTR Decryption",
@@ -1427,7 +1450,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-256-CTR Encryption",
@@ -1436,7 +1460,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-256-CTR Decryption",
@@ -1445,7 +1470,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 };
 
diff --git a/test/test/test_cryptodev_blockcipher.c b/test/test/test_cryptodev_blockcipher.c
index 446ab4f..85fad01 100644
--- a/test/test/test_cryptodev_blockcipher.c
+++ b/test/test/test_cryptodev_blockcipher.c
@@ -100,6 +100,7 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
 			tdata->auth_key.len);
 
 	switch (cryptodev_type) {
+	case RTE_CRYPTODEV_DPAA2_SEC_PMD:
 	case RTE_CRYPTODEV_QAT_SYM_PMD:
 	case RTE_CRYPTODEV_OPENSSL_PMD:
 	case RTE_CRYPTODEV_ARMV8_PMD: /* Fall through */
diff --git a/test/test/test_cryptodev_des_test_vectors.h b/test/test/test_cryptodev_des_test_vectors.h
index b226794..0b6e0b8 100644
--- a/test/test/test_cryptodev_des_test_vectors.h
+++ b/test/test/test_cryptodev_des_test_vectors.h
@@ -1058,14 +1058,16 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.test_data = &triple_des128cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-128-CBC HMAC-SHA1 Decryption Digest Verify",
 		.test_data = &triple_des128cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-128-CBC SHA1 Encryption Digest",
@@ -1084,14 +1086,16 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.test_data = &triple_des192cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-192-CBC HMAC-SHA1 Decryption Digest Verify",
 		.test_data = &triple_des192cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-192-CBC SHA1 Encryption Digest",
@@ -1199,28 +1203,32 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 		.test_data = &triple_des128cbc_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-128-CBC Decryption",
 		.test_data = &triple_des128cbc_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-192-CBC Encryption",
 		.test_data = &triple_des192cbc_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-192-CBC Decryption",
 		.test_data = &triple_des192cbc_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-128-CTR Encryption",
diff --git a/test/test/test_cryptodev_hash_test_vectors.h b/test/test/test_cryptodev_hash_test_vectors.h
index 3214f9a..24353fc 100644
--- a/test/test/test_cryptodev_hash_test_vectors.h
+++ b/test/test/test_cryptodev_hash_test_vectors.h
@@ -366,7 +366,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "HMAC-MD5 Digest Verify",
@@ -374,7 +375,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "SHA1 Digest",
@@ -394,7 +396,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "HMAC-SHA1 Digest Verify",
@@ -402,7 +405,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "SHA224 Digest",
@@ -422,7 +426,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "HMAC-SHA224 Digest Verify",
@@ -430,7 +435,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "SHA256 Digest",
@@ -450,7 +456,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "HMAC-SHA256 Digest Verify",
@@ -458,7 +465,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "SHA384 Digest",
@@ -478,7 +486,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "HMAC-SHA384 Digest Verify",
@@ -486,7 +495,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "SHA512 Digest",
@@ -506,7 +516,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "HMAC-SHA512 Digest Verify",
@@ -514,7 +525,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 };
 
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 5/5] doc: update documentation for dpaa2_sec supported algos
  2017-06-29 20:48 [PATCH 0/5] crypto/dpaa2_sec optimization and feature update akhil.goyal
                   ` (2 preceding siblings ...)
  2017-06-29 20:49 ` [PATCH 4/5] test/test: add test cases for gcm and ctr in dpaa2_sec test suite akhil.goyal
@ 2017-06-29 20:49 ` akhil.goyal
  2017-06-29 21:07   ` De Lara Guarch, Pablo
  2017-06-30  7:43 ` [PATCH v2 0/5] crypto/dpaa2_sec optimization and feature update akhil.goyal
  2017-07-02 23:43 ` [PATCH " De Lara Guarch, Pablo
  5 siblings, 1 reply; 21+ messages in thread
From: akhil.goyal @ 2017-06-29 20:49 UTC (permalink / raw)
  To: dev; +Cc: hemant.agrawal, pablo.de.lara.guarch, declan.doherty, Akhil Goyal

From: Akhil Goyal <akhil.goyal@nxp.com>

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 doc/guides/cryptodevs/dpaa2_sec.rst          | 9 ++++++++-
 doc/guides/cryptodevs/features/dpaa2_sec.ini | 6 ++++++
 2 files changed, 14 insertions(+), 1 deletion(-)

diff --git a/doc/guides/cryptodevs/dpaa2_sec.rst b/doc/guides/cryptodevs/dpaa2_sec.rst
index becb910..1444a91 100644
--- a/doc/guides/cryptodevs/dpaa2_sec.rst
+++ b/doc/guides/cryptodevs/dpaa2_sec.rst
@@ -126,7 +126,7 @@ fits in the DPAA2 Bus model
 Features
 --------
 
-The DPAA2 PMD has support for:
+The DPAA2_SEC PMD has support for:
 
 Cipher algorithms:
 
@@ -134,6 +134,9 @@ Cipher algorithms:
 * ``RTE_CRYPTO_CIPHER_AES128_CBC``
 * ``RTE_CRYPTO_CIPHER_AES192_CBC``
 * ``RTE_CRYPTO_CIPHER_AES256_CBC``
+* ``RTE_CRYPTO_CIPHER_AES128_CTR``
+* ``RTE_CRYPTO_CIPHER_AES192_CTR``
+* ``RTE_CRYPTO_CIPHER_AES256_CTR``
 
 Hash algorithms:
 
@@ -144,6 +147,10 @@ Hash algorithms:
 * ``RTE_CRYPTO_AUTH_SHA512_HMAC``
 * ``RTE_CRYPTO_AUTH_MD5_HMAC``
 
+AEAD algorithms:
+
+* ``RTE_CRYPTO_AEAD_AES_GCM``
+
 Supported DPAA2 SoCs
 --------------------
 
diff --git a/doc/guides/cryptodevs/features/dpaa2_sec.ini b/doc/guides/cryptodevs/features/dpaa2_sec.ini
index db0ea4f..9eb07aa 100644
--- a/doc/guides/cryptodevs/features/dpaa2_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa2_sec.ini
@@ -15,6 +15,9 @@ HW Accelerated         = Y
 AES CBC (128) = Y
 AES CBC (192) = Y
 AES CBC (256) = Y
+AES CTR (128) = Y
+AES CTR (192) = Y
+AES CTR (256) = Y
 3DES CBC      = Y
 
 ;
@@ -32,3 +35,6 @@ SHA512 HMAC  = Y
 ; Supported AEAD algorithms of the 'openssl' crypto driver.
 ;
 [AEAD]
+AES GCM (128) = Y
+AES GCM (192) = Y
+AES GCM (256) = Y
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* Re: [PATCH 5/5] doc: update documentation for dpaa2_sec supported algos
  2017-06-29 20:49 ` [PATCH 5/5] doc: update documentation for dpaa2_sec supported algos akhil.goyal
@ 2017-06-29 21:07   ` De Lara Guarch, Pablo
  2017-06-29 21:54     ` Akhil Goyal
  0 siblings, 1 reply; 21+ messages in thread
From: De Lara Guarch, Pablo @ 2017-06-29 21:07 UTC (permalink / raw)
  To: akhil.goyal, dev; +Cc: hemant.agrawal, Doherty, Declan

Hi Akhil

> -----Original Message-----
> From: akhil.goyal@nxp.com [mailto:akhil.goyal@nxp.com]
> Sent: Thursday, June 29, 2017 9:49 PM
> To: dev@dpdk.org
> Cc: hemant.agrawal@nxp.com; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; Doherty, Declan
> <declan.doherty@intel.com>; Akhil Goyal <akhil.goyal@nxp.com>
> Subject: [PATCH 5/5] doc: update documentation for dpaa2_sec supported
> algos
> 
> From: Akhil Goyal <akhil.goyal@nxp.com>
> 
> Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>

Patch number 3 adds these algorithms, so I believe that's the right place to include
this documentation change.

Also, you should add an entry in release notes.

Lastly, I spotted a typo below: openssl -> dpaa2_sec

...
> 
>  ;
> @@ -32,3 +35,6 @@ SHA512 HMAC  = Y
>  ; Supported AEAD algorithms of the 'openssl' crypto driver.

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 5/5] doc: update documentation for dpaa2_sec supported algos
  2017-06-29 21:07   ` De Lara Guarch, Pablo
@ 2017-06-29 21:54     ` Akhil Goyal
  0 siblings, 0 replies; 21+ messages in thread
From: Akhil Goyal @ 2017-06-29 21:54 UTC (permalink / raw)
  To: De Lara Guarch, Pablo, dev; +Cc: hemant.agrawal, Doherty, Declan

Hi Pablo,

On 6/30/2017 2:37 AM, De Lara Guarch, Pablo wrote:
> Hi Akhil
>
>> -----Original Message-----
>> From: akhil.goyal@nxp.com [mailto:akhil.goyal@nxp.com]
>> Sent: Thursday, June 29, 2017 9:49 PM
>> To: dev@dpdk.org
>> Cc: hemant.agrawal@nxp.com; De Lara Guarch, Pablo
>> <pablo.de.lara.guarch@intel.com>; Doherty, Declan
>> <declan.doherty@intel.com>; Akhil Goyal <akhil.goyal@nxp.com>
>> Subject: [PATCH 5/5] doc: update documentation for dpaa2_sec supported
>> algos
>>
>> From: Akhil Goyal <akhil.goyal@nxp.com>
>>
>> Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
>
> Patch number 3 adds these algorithms, so I believe that's the right place to include
> this documentation change.
>
> Also, you should add an entry in release notes.
>
> Lastly, I spotted a typo below: openssl -> dpaa2_sec
>
> ...
>>
>>  ;
>> @@ -32,3 +35,6 @@ SHA512 HMAC  = Y
>>  ; Supported AEAD algorithms of the 'openssl' crypto driver.
>
>
Thanks for a quick review.
I will update the patch set and send again.

Also, I could see my 2nd patch is missing. All my patches are at same 
location but somehow the patch is not received. Even the git-send-email 
is showing that all the patches are successfully accepted for delivery.
I will send patch set again as soon as this problem is resolved.

Regards,
Akhil

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH v2 0/5] crypto/dpaa2_sec optimization and feature update
  2017-06-29 20:48 [PATCH 0/5] crypto/dpaa2_sec optimization and feature update akhil.goyal
                   ` (3 preceding siblings ...)
  2017-06-29 20:49 ` [PATCH 5/5] doc: update documentation for dpaa2_sec supported algos akhil.goyal
@ 2017-06-30  7:43 ` akhil.goyal
  2017-06-30  7:43   ` [PATCH v2 1/5] crypto/dpaa2_sec: add per device mempool to store frame list entries akhil.goyal
                     ` (5 more replies)
  2017-07-02 23:43 ` [PATCH " De Lara Guarch, Pablo
  5 siblings, 6 replies; 21+ messages in thread
From: akhil.goyal @ 2017-06-30  7:43 UTC (permalink / raw)
  To: dev; +Cc: hemant.agrawal, pablo.de.lara.guarch, declan.doherty, Akhil Goyal

From: Akhil Goyal <akhil.goyal@nxp.com>

This patchset updates dpaa2_sec crypto driver with following:
- optimization in data path for memory allocation
- add support for additional AES algorithms like AES-GCM and AES-CTR
- Update test cases in test_cryptodev for all the supported test cases.
- Update documentation for supported algorithms

The patches are based on dpdk-crypto-next and are rebased over the
latest crypto restructuring changes by Pablo.
http://dpdk.org/ml/archives/dev/2017-June/069372.html

changes in v2:
-fixed typo in doc
-split 2nd patch in two
-squashed documentation patch in 4th patch
-fixed checkpatch errors/warnings

Akhil Goyal (5):
  crypto/dpaa2_sec: add per device mempool to store frame list entries
  crypto/dpaa2_sec: add hw desc support for CTR
  crypto/dpaa2_sec: add hw desc support for AES-GCM
  crypto/dpaa2_sec: add support for AES-GCM and CTR
  test/test: add test cases for gcm and ctr in dpaa2_sec test suite

 doc/guides/cryptodevs/dpaa2_sec.rst          |   9 +-
 doc/guides/cryptodevs/features/dpaa2_sec.ini |   8 +-
 doc/guides/rel_notes/release_17_08.rst       |   4 +
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h      |   7 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c  | 385 ++++++++++++++++++++++++---
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h    | 100 ++++---
 drivers/crypto/dpaa2_sec/hw/desc/algo.h      | 228 +++++++++++++++-
 drivers/crypto/dpaa2_sec/hw/desc/ipsec.h     |  19 +-
 test/test/test_cryptodev.c                   |  94 ++++++-
 test/test/test_cryptodev_aes_test_vectors.h  |  78 ++++--
 test/test/test_cryptodev_blockcipher.c       |   1 +
 test/test/test_cryptodev_des_test_vectors.h  |  24 +-
 test/test/test_cryptodev_hash_test_vectors.h |  36 ++-
 13 files changed, 854 insertions(+), 139 deletions(-)

-- 
2.9.3

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH v2 1/5] crypto/dpaa2_sec: add per device mempool to store frame list entries
  2017-06-30  7:43 ` [PATCH v2 0/5] crypto/dpaa2_sec optimization and feature update akhil.goyal
@ 2017-06-30  7:43   ` akhil.goyal
  2017-06-30  7:43   ` [PATCH v2 2/5] crypto/dpaa2_sec: add hw desc support for CTR akhil.goyal
                     ` (4 subsequent siblings)
  5 siblings, 0 replies; 21+ messages in thread
From: akhil.goyal @ 2017-06-30  7:43 UTC (permalink / raw)
  To: dev; +Cc: hemant.agrawal, pablo.de.lara.guarch, declan.doherty, Akhil Goyal

From: Akhil Goyal <akhil.goyal@nxp.com>

rte_malloc uses common memory area for all cores.

Now rte_malloc are replaced by per device mempool to allocate
space for FLE. This removes contention and improves performance.

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h     |  7 +++
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 79 ++++++++++++++++++++++-------
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |  2 +
 3 files changed, 70 insertions(+), 18 deletions(-)

diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 429eaee..16cadf5 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -182,6 +182,13 @@ struct qbman_fle {
 	fle->addr_lo = lower_32_bits((uint64_t)addr);     \
 	fle->addr_hi = upper_32_bits((uint64_t)addr);	  \
 } while (0)
+#define DPAA2_GET_FLE_CTXT(fle)					\
+	(uint64_t)((((uint64_t)((fle)->reserved[1])) << 32) + \
+			(fle)->reserved[0])
+#define DPAA2_FLE_SAVE_CTXT(fle, addr) do { \
+	fle->reserved[0] = lower_32_bits((uint64_t)addr);     \
+	fle->reserved[1] = upper_32_bits((uint64_t)addr);	  \
+} while (0)
 #define DPAA2_SET_FLE_OFFSET(fle, offset) \
 	((fle)->fin_bpid_offset |= (uint32_t)(offset) << 16)
 #define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (uint64_t)bpid)
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 3620751..dbdaf46 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -71,6 +71,13 @@
 #define NO_PREFETCH 0
 #define TDES_CBC_IV_LEN 8
 #define AES_CBC_IV_LEN 16
+#define AES_CTR_IV_LEN 16
+#define AES_GCM_IV_LEN 12
+/* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
+#define FLE_POOL_NUM_BUFS	32000
+#define FLE_POOL_BUF_SIZE	256
+#define FLE_POOL_CACHE_SIZE	512
+
 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
 
 static inline int
@@ -84,9 +91,8 @@ build_authenc_fd(dpaa2_sec_session *sess,
 	struct sec_flow_context *flc;
 	uint32_t auth_only_len = sym_op->auth.data.length -
 				sym_op->cipher.data.length;
-	int icv_len = sess->digest_length;
+	int icv_len = sess->digest_length, retval;
 	uint8_t *old_icv;
-	uint32_t mem_len = (7 * sizeof(struct qbman_fle)) + icv_len;
 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
 			sess->iv.offset);
 
@@ -98,12 +104,14 @@ build_authenc_fd(dpaa2_sec_session *sess,
 	 * to get the MBUF Addr from the previous FLE.
 	 * We can have a better approach to use the inline Mbuf
 	 */
-	fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
-	if (!fle) {
+	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+	if (retval) {
 		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
 		return -1;
 	}
+	memset(fle, 0, FLE_POOL_BUF_SIZE);
 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	DPAA2_FLE_SAVE_CTXT(fle, priv);
 	fle = fle + 1;
 	sge = fle + 2;
 	if (likely(bpid < MAX_BPID)) {
@@ -214,21 +222,19 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 {
 	struct rte_crypto_sym_op *sym_op = op->sym;
 	struct qbman_fle *fle, *sge;
-	uint32_t mem_len = (sess->dir == DIR_ENC) ?
-			   (3 * sizeof(struct qbman_fle)) :
-			   (5 * sizeof(struct qbman_fle) +
-			    sess->digest_length);
 	struct sec_flow_context *flc;
 	struct ctxt_priv *priv = sess->ctxt;
 	uint8_t *old_digest;
+	int retval;
 
 	PMD_INIT_FUNC_TRACE();
 
-	fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
-	if (!fle) {
-		RTE_LOG(ERR, PMD, "Memory alloc failed for FLE\n");
+	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+	if (retval) {
+		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
 		return -1;
 	}
+	memset(fle, 0, FLE_POOL_BUF_SIZE);
 	/* TODO we are using the first FLE entry to store Mbuf.
 	 * Currently we donot know which FLE has the mbuf stored.
 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
@@ -236,6 +242,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 	 * We can have a better approach to use the inline Mbuf
 	 */
 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	DPAA2_FLE_SAVE_CTXT(fle, priv);
 	fle = fle + 1;
 
 	if (likely(bpid < MAX_BPID)) {
@@ -306,7 +313,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 {
 	struct rte_crypto_sym_op *sym_op = op->sym;
 	struct qbman_fle *fle, *sge;
-	uint32_t mem_len = (5 * sizeof(struct qbman_fle));
+	int retval;
 	struct sec_flow_context *flc;
 	struct ctxt_priv *priv = sess->ctxt;
 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
@@ -314,12 +321,12 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 
 	PMD_INIT_FUNC_TRACE();
 
-	/* todo - we can use some mempool to avoid malloc here */
-	fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
-	if (!fle) {
+	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+	if (retval) {
 		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
 		return -1;
 	}
+	memset(fle, 0, FLE_POOL_BUF_SIZE);
 	/* TODO we are using the first FLE entry to store Mbuf.
 	 * Currently we donot know which FLE has the mbuf stored.
 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
@@ -327,6 +334,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 	 * We can have a better approach to use the inline Mbuf
 	 */
 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	DPAA2_FLE_SAVE_CTXT(fle, priv);
 	fle = fle + 1;
 	sge = fle + 2;
 
@@ -499,6 +507,7 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
 {
 	struct qbman_fle *fle;
 	struct rte_crypto_op *op;
+	struct ctxt_priv *priv;
 
 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
 
@@ -534,7 +543,8 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
 		   DPAA2_GET_FD_LEN(fd));
 
 	/* free the fle memory */
-	rte_free(fle - 1);
+	priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
+	rte_mempool_put(priv->fle_pool, (void *)(fle - 1));
 
 	return op;
 }
@@ -764,6 +774,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
 		      dpaa2_sec_session *session)
 {
 	struct dpaa2_sec_cipher_ctxt *ctxt = &session->ext_params.cipher_ctxt;
+	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
 	struct alginfo cipherdata;
 	int bufsize, i;
 	struct ctxt_priv *priv;
@@ -780,6 +791,8 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
 		return -1;
 	}
 
+	priv->fle_pool = dev_priv->fle_pool;
+
 	flc = &priv->flc_desc[0].flc;
 
 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
@@ -875,8 +888,9 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
 		    dpaa2_sec_session *session)
 {
 	struct dpaa2_sec_auth_ctxt *ctxt = &session->ext_params.auth_ctxt;
+	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
 	struct alginfo authdata;
-	unsigned int bufsize;
+	unsigned int bufsize, i;
 	struct ctxt_priv *priv;
 	struct sec_flow_context *flc;
 
@@ -892,6 +906,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
 		return -1;
 	}
 
+	priv->fle_pool = dev_priv->fle_pool;
 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
 
 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
@@ -980,6 +995,9 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
 			(uint64_t)&(((struct dpaa2_sec_qp *)
 			dev->data->queue_pairs[0])->rx_vq));
 	session->ctxt = priv;
+	for (i = 0; i < bufsize; i++)
+		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+			    i, priv->flc_desc[0].desc[i]);
 
 	return 0;
 
@@ -995,8 +1013,9 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 		    dpaa2_sec_session *session)
 {
 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
 	struct alginfo authdata, cipherdata;
-	unsigned int bufsize;
+	unsigned int bufsize, i;
 	struct ctxt_priv *priv;
 	struct sec_flow_context *flc;
 	struct rte_crypto_cipher_xform *cipher_xform;
@@ -1032,6 +1051,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 		return -1;
 	}
 
+	priv->fle_pool = dev_priv->fle_pool;
 	flc = &priv->flc_desc[0].flc;
 
 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
@@ -1199,6 +1219,10 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 			(uint64_t)&(((struct dpaa2_sec_qp *)
 			dev->data->queue_pairs[0])->rx_vq));
 	session->ctxt = priv;
+	for (i = 0; i < bufsize; i++)
+		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+			    i, priv->flc_desc[DESC_INITFINAL].desc[i]);
+
 
 	return 0;
 
@@ -1496,6 +1520,10 @@ static struct rte_cryptodev_ops crypto_ops = {
 static int
 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
 {
+	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
+
+	rte_mempool_free(internals->fle_pool);
+
 	PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
 		     dev->data->name, rte_socket_id());
 
@@ -1512,6 +1540,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 	uint16_t token;
 	struct dpseci_attr attr;
 	int retcode, hw_id;
+	char str[20];
 
 	PMD_INIT_FUNC_TRACE();
 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
@@ -1572,6 +1601,20 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 	internals->hw = dpseci;
 	internals->token = token;
 
+	sprintf(str, "fle_pool_%d", cryptodev->data->dev_id);
+	internals->fle_pool = rte_mempool_create((const char *)str,
+			FLE_POOL_NUM_BUFS,
+			FLE_POOL_BUF_SIZE,
+			FLE_POOL_CACHE_SIZE, 0,
+			NULL, NULL, NULL, NULL,
+			SOCKET_ID_ANY, 0);
+	if (!internals->fle_pool) {
+		RTE_LOG(ERR, PMD, "%s create failed", str);
+		goto init_error;
+	} else
+		RTE_LOG(INFO, PMD, "%s created: %p\n", str,
+				internals->fle_pool);
+
 	PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
 	return 0;
 
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index eda2eec..b4dfe24 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -40,6 +40,7 @@
 struct dpaa2_sec_dev_private {
 	void *mc_portal; /**< MC Portal for configuring this device */
 	void *hw; /**< Hardware handle for this device.Used by NADK framework */
+	struct rte_mempool *fle_pool; /* per device memory pool for FLE */
 	int32_t hw_id; /**< An unique ID of this device instance */
 	int32_t vfio_fd; /**< File descriptor received via VFIO */
 	uint16_t token; /**< Token required by DPxxx objects */
@@ -128,6 +129,7 @@ struct sec_flc_desc {
 };
 
 struct ctxt_priv {
+	struct rte_mempool *fle_pool; /* per device memory pool for FLE */
 	struct sec_flc_desc flc_desc[0];
 };
 
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH v2 2/5] crypto/dpaa2_sec: add hw desc support for CTR
  2017-06-30  7:43 ` [PATCH v2 0/5] crypto/dpaa2_sec optimization and feature update akhil.goyal
  2017-06-30  7:43   ` [PATCH v2 1/5] crypto/dpaa2_sec: add per device mempool to store frame list entries akhil.goyal
@ 2017-06-30  7:43   ` akhil.goyal
  2017-06-30  7:43   ` [PATCH v2 3/5] crypto/dpaa2_sec: add hw desc support for AES-GCM akhil.goyal
                     ` (3 subsequent siblings)
  5 siblings, 0 replies; 21+ messages in thread
From: akhil.goyal @ 2017-06-30  7:43 UTC (permalink / raw)
  To: dev; +Cc: hemant.agrawal, pablo.de.lara.guarch, declan.doherty, Akhil Goyal

From: Akhil Goyal <akhil.goyal@nxp.com>

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/crypto/dpaa2_sec/hw/desc/algo.h  | 23 ++++++++++++++++-------
 drivers/crypto/dpaa2_sec/hw/desc/ipsec.h | 19 +++++++++++++------
 2 files changed, 29 insertions(+), 13 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/hw/desc/algo.h b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
index c71ada0..166bc3a 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/algo.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
@@ -159,6 +159,10 @@ cnstr_shdsc_snow_f9(uint32_t *descbuf, bool ps, bool swap,
  * @ps: if 36/40bit addressing is desired, this parameter must be true
  * @swap: must be true when core endianness doesn't match SEC endianness
  * @cipherdata: pointer to block cipher transform definitions
+ *              Valid algorithm values one of OP_ALG_ALGSEL_* {DES, 3DES, AES}
+ *              Valid modes for:
+ *                  AES: OP_ALG_AAI_* {CBC, CTR}
+ *                  DES, 3DES: OP_ALG_AAI_CBC
  * @iv: IV data; if NULL, "ivlen" bytes from the input frame will be read as IV
  * @ivlen: IV length
  * @dir: DIR_ENC/DIR_DEC
@@ -172,8 +176,10 @@ cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
 {
 	struct program prg;
 	struct program *p = &prg;
-	const bool is_aes_dec = (dir == DIR_DEC) &&
-				(cipherdata->algtype == OP_ALG_ALGSEL_AES);
+	uint32_t iv_off = 0;
+	const bool need_dk = (dir == DIR_DEC) &&
+			     (cipherdata->algtype == OP_ALG_ALGSEL_AES) &&
+			     (cipherdata->algmode == OP_ALG_AAI_CBC);
 	LABEL(keyjmp);
 	LABEL(skipdk);
 	REFERENCE(pkeyjmp);
@@ -191,7 +197,7 @@ cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
 	KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 	    cipherdata->keylen, INLINE_KEY(cipherdata));
 
-	if (is_aes_dec) {
+	if (need_dk) {
 		ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
 			      OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
 
@@ -199,7 +205,7 @@ cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
 	}
 	SET_LABEL(p, keyjmp);
 
-	if (is_aes_dec) {
+	if (need_dk) {
 		ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode |
 			      OP_ALG_AAI_DK, OP_ALG_AS_INITFINAL,
 			      ICV_CHECK_DISABLE, dir);
@@ -209,12 +215,15 @@ cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
 			      OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
 	}
 
+	if (cipherdata->algmode == OP_ALG_AAI_CTR)
+		iv_off = 16;
+
 	if (iv)
 		/* IV load, convert size */
-		LOAD(p, (uintptr_t)iv, CONTEXT1, 0, ivlen, IMMED | COPY);
+		LOAD(p, (uintptr_t)iv, CONTEXT1, iv_off, ivlen, IMMED | COPY);
 	else
 		/* IV is present first before the actual message */
-		SEQLOAD(p, CONTEXT1, 0, ivlen, 0);
+		SEQLOAD(p, CONTEXT1, iv_off, ivlen, 0);
 
 	MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
 	MATHB(p, SEQINSZ, SUB, MATH2, VSEQOUTSZ, 4, 0);
@@ -224,7 +233,7 @@ cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
 	SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 
 	PATCH_JUMP(p, pkeyjmp, keyjmp);
-	if (is_aes_dec)
+	if (need_dk)
 		PATCH_JUMP(p, pskipdk, skipdk);
 
 	return PROGRAM_FINALIZE(p);
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h b/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
index c63d0da..5954055 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
@@ -1311,8 +1311,11 @@ cnstr_shdsc_ipsec_new_decap(uint32_t *descbuf, bool ps,
  * @descbuf: pointer to buffer used for descriptor construction
  * @ps: if 36/40bit addressing is desired, this parameter must be true
  * @swap: if true, perform descriptor byte swapping on a 4-byte boundary
- * @cipherdata: ointer to block cipher transform definitions.
+ * @cipherdata: pointer to block cipher transform definitions.
  *              Valid algorithm values one of OP_ALG_ALGSEL_* {DES, 3DES, AES}
+ *              Valid modes for:
+ *                  AES: OP_ALG_AAI_* {CBC, CTR}
+ *                  DES, 3DES: OP_ALG_AAI_CBC
  * @authdata: pointer to authentication transform definitions.
  *            Valid algorithm values - one of OP_ALG_ALGSEL_* {MD5, SHA1,
  *            SHA224, SHA256, SHA384, SHA512}
@@ -1379,8 +1382,9 @@ cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,
 {
 	struct program prg;
 	struct program *p = &prg;
-	const bool is_aes_dec = (dir == DIR_DEC) &&
-				(cipherdata->algtype == OP_ALG_ALGSEL_AES);
+	const bool need_dk = (dir == DIR_DEC) &&
+			     (cipherdata->algtype == OP_ALG_ALGSEL_AES) &&
+			     (cipherdata->algmode == OP_ALG_AAI_CBC);
 
 	LABEL(skip_patch_len);
 	LABEL(keyjmp);
@@ -1466,7 +1470,7 @@ cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,
 		      dir == DIR_ENC ? ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
 		      dir);
 
-	if (is_aes_dec)
+	if (need_dk)
 		ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode,
 			      OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
 	pskipkeys = JUMP(p, skipkeys, LOCAL_JUMP, ALL_TRUE, 0);
@@ -1478,7 +1482,7 @@ cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,
 		      dir == DIR_ENC ? ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
 		      dir);
 
-	if (is_aes_dec) {
+	if (need_dk) {
 		ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode |
 			      OP_ALG_AAI_DK, OP_ALG_AS_INITFINAL,
 			      ICV_CHECK_DISABLE, dir);
@@ -1503,7 +1507,10 @@ cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,
 	SET_LABEL(p, aonly_len_offset);
 
 	/* Read IV */
-	SEQLOAD(p, CONTEXT1, 0, ivlen, 0);
+	if (cipherdata->algmode == OP_ALG_AAI_CTR)
+		SEQLOAD(p, CONTEXT1, 16, ivlen, 0);
+	else
+		SEQLOAD(p, CONTEXT1, 0, ivlen, 0);
 
 	/*
 	 * Read data needed only for authentication. This is overwritten above
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH v2 3/5] crypto/dpaa2_sec: add hw desc support for AES-GCM
  2017-06-30  7:43 ` [PATCH v2 0/5] crypto/dpaa2_sec optimization and feature update akhil.goyal
  2017-06-30  7:43   ` [PATCH v2 1/5] crypto/dpaa2_sec: add per device mempool to store frame list entries akhil.goyal
  2017-06-30  7:43   ` [PATCH v2 2/5] crypto/dpaa2_sec: add hw desc support for CTR akhil.goyal
@ 2017-06-30  7:43   ` akhil.goyal
  2017-06-30  7:43   ` [PATCH v2 4/5] crypto/dpaa2_sec: add support for AES-GCM and CTR akhil.goyal
                     ` (2 subsequent siblings)
  5 siblings, 0 replies; 21+ messages in thread
From: akhil.goyal @ 2017-06-30  7:43 UTC (permalink / raw)
  To: dev; +Cc: hemant.agrawal, pablo.de.lara.guarch, declan.doherty, Akhil Goyal

From: Akhil Goyal <akhil.goyal@nxp.com>

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/crypto/dpaa2_sec/hw/desc/algo.h | 205 ++++++++++++++++++++++++++++++++
 1 file changed, 205 insertions(+)

diff --git a/drivers/crypto/dpaa2_sec/hw/desc/algo.h b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
index 166bc3a..cb633ed 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/algo.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
@@ -443,6 +443,211 @@ cnstr_shdsc_kasumi_f9(uint32_t *descbuf, bool ps, bool swap,
 }
 
 /**
+ * cnstr_shdsc_gcm_encap - AES-GCM encap as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ *		Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with
+ *		OP_ALG_AAI_GCM.
+ * @ivlen: Initialization vector length
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_gcm_encap(uint32_t *descbuf, bool ps, bool swap,
+		      struct alginfo *cipherdata,
+		      uint32_t ivlen, uint32_t icvsize)
+{
+	struct program prg;
+	struct program *p = &prg;
+
+	LABEL(keyjmp);
+	LABEL(zeroassocjump2);
+	LABEL(zeroassocjump1);
+	LABEL(zeropayloadjump);
+	REFERENCE(pkeyjmp);
+	REFERENCE(pzeroassocjump2);
+	REFERENCE(pzeroassocjump1);
+	REFERENCE(pzeropayloadjump);
+
+	PROGRAM_CNTXT_INIT(p, descbuf, 0);
+
+	if (swap)
+		PROGRAM_SET_BSWAP(p);
+	if (ps)
+		PROGRAM_SET_36BIT_ADDR(p);
+
+	SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+	pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SELF | SHRD);
+	/* Insert Key */
+	KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+	    cipherdata->keylen, INLINE_KEY(cipherdata));
+
+	SET_LABEL(p, keyjmp);
+
+	/* class 1 operation */
+	ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+		      OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, DIR_ENC);
+
+	MATHB(p, DPOVRD, AND, 0x7fffffff, MATH3, 4, IMMED2);
+
+	/* if assoclen + cryptlen is ZERO, skip to ICV write */
+	MATHB(p, SEQINSZ, SUB, ivlen, VSEQOUTSZ, 4, IMMED2);
+	pzeroassocjump2 = JUMP(p, zeroassocjump2, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+	SEQFIFOLOAD(p, IV1, ivlen, FLUSH1);
+
+	/* if assoclen is ZERO, skip reading the assoc data */
+	MATHB(p, ZERO, ADD, MATH3, VSEQINSZ, 4, 0);
+	pzeroassocjump1 = JUMP(p, zeroassocjump1, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+	MATHB(p, ZERO, ADD, MATH3, VSEQOUTSZ, 4, 0);
+
+	/* skip assoc data */
+	SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+
+	/* cryptlen = seqinlen - assoclen */
+	MATHB(p, SEQINSZ, SUB, MATH3, VSEQOUTSZ, 4, 0);
+
+	/* if cryptlen is ZERO jump to zero-payload commands */
+	pzeropayloadjump = JUMP(p, zeropayloadjump, LOCAL_JUMP, ALL_TRUE,
+				MATH_Z);
+
+	/* read assoc data */
+	SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | FLUSH1);
+	SET_LABEL(p, zeroassocjump1);
+
+	MATHB(p, SEQINSZ, SUB, MATH0, VSEQINSZ, 4, 0);
+
+	/* write encrypted data */
+	SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+	/* read payload data */
+	SEQFIFOLOAD(p, MSG1, 0, CLASS1 | VLF | LAST1);
+
+	/* jump the zero-payload commands */
+	JUMP(p, 4, LOCAL_JUMP, ALL_TRUE, 0);
+
+	/* zero-payload commands */
+	SET_LABEL(p, zeropayloadjump);
+
+	/* read assoc data */
+	SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | LAST1);
+
+	JUMP(p, 2, LOCAL_JUMP, ALL_TRUE, 0);
+
+	/* There is no input data */
+	SET_LABEL(p, zeroassocjump2);
+
+	SEQFIFOLOAD(p, IV1, ivlen, FLUSH1 | LAST1);
+
+	/* write ICV */
+	SEQSTORE(p, CONTEXT1, 0, icvsize, 0);
+
+	PATCH_JUMP(p, pkeyjmp, keyjmp);
+	PATCH_JUMP(p, pzeroassocjump2, zeroassocjump2);
+	PATCH_JUMP(p, pzeroassocjump1, zeroassocjump1);
+	PATCH_JUMP(p, pzeropayloadjump, zeropayloadjump);
+
+	return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_gcm_decap - AES-GCM decap as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ *		Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with
+ *		OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_gcm_decap(uint32_t *descbuf, bool ps, bool swap,
+		      struct alginfo *cipherdata,
+		      uint32_t ivlen, uint32_t icvsize)
+{
+	struct program prg;
+	struct program *p = &prg;
+
+	LABEL(keyjmp);
+	LABEL(zeroassocjump1);
+	LABEL(zeropayloadjump);
+	REFERENCE(pkeyjmp);
+	REFERENCE(pzeroassocjump1);
+	REFERENCE(pzeropayloadjump);
+
+	PROGRAM_CNTXT_INIT(p, descbuf, 0);
+
+	if (swap)
+		PROGRAM_SET_BSWAP(p);
+	if (ps)
+		PROGRAM_SET_36BIT_ADDR(p);
+
+	SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+	pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SELF | SHRD);
+	/* Insert Key */
+	KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+	    cipherdata->keylen, INLINE_KEY(cipherdata));
+
+	SET_LABEL(p, keyjmp);
+
+	/* class 1 operation */
+	ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+		      OP_ALG_AS_INITFINAL, ICV_CHECK_ENABLE, DIR_DEC);
+
+	MATHB(p, DPOVRD, AND, 0x7fffffff, MATH3, 4, IMMED2);
+	SEQFIFOLOAD(p, IV1, ivlen, FLUSH1);
+
+	/* if assoclen is ZERO, skip reading the assoc data */
+	MATHB(p, ZERO, ADD, MATH3, VSEQINSZ, 4, 0);
+	pzeroassocjump1 = JUMP(p, zeroassocjump1, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+	MATHB(p, ZERO, ADD, MATH3, VSEQOUTSZ, 4, 0);
+
+	/* skip assoc data */
+	SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+
+	/* read assoc data */
+	SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | FLUSH1);
+
+	SET_LABEL(p, zeroassocjump1);
+
+	/* cryptlen = seqoutlen - assoclen */
+	MATHB(p, SEQOUTSZ, SUB, MATH0, VSEQINSZ, 4, 0);
+
+	/* jump to zero-payload command if cryptlen is zero */
+	pzeropayloadjump = JUMP(p, zeropayloadjump, LOCAL_JUMP, ALL_TRUE,
+				MATH_Z);
+
+	MATHB(p, SEQOUTSZ, SUB, MATH0, VSEQOUTSZ, 4, 0);
+
+	/* store encrypted data */
+	SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+	/* read payload data */
+	SEQFIFOLOAD(p, MSG1, 0, CLASS1 | VLF | FLUSH1);
+
+	/* zero-payload command */
+	SET_LABEL(p, zeropayloadjump);
+
+	/* read ICV */
+	SEQFIFOLOAD(p, ICV1, icvsize, CLASS1 | LAST1);
+
+	PATCH_JUMP(p, pkeyjmp, keyjmp);
+	PATCH_JUMP(p, pzeroassocjump1, zeroassocjump1);
+	PATCH_JUMP(p, pzeropayloadjump, zeropayloadjump);
+
+	return PROGRAM_FINALIZE(p);
+}
+
+/**
  * cnstr_shdsc_crc - CRC32 Accelerator (IEEE 802 CRC32 protocol mode)
  * @descbuf: pointer to descriptor-under-construction buffer
  * @swap: must be true when core endianness doesn't match SEC endianness
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH v2 4/5] crypto/dpaa2_sec: add support for AES-GCM and CTR
  2017-06-30  7:43 ` [PATCH v2 0/5] crypto/dpaa2_sec optimization and feature update akhil.goyal
                     ` (2 preceding siblings ...)
  2017-06-30  7:43   ` [PATCH v2 3/5] crypto/dpaa2_sec: add hw desc support for AES-GCM akhil.goyal
@ 2017-06-30  7:43   ` akhil.goyal
  2017-06-30  7:43   ` [PATCH v2 5/5] test/test: add test cases for gcm and ctr in dpaa2_sec test suite akhil.goyal
  2017-07-03 12:31   ` [PATCH v3 0/5] crypto/dpaa2_sec optimization and feature update Akhil Goyal
  5 siblings, 0 replies; 21+ messages in thread
From: akhil.goyal @ 2017-06-30  7:43 UTC (permalink / raw)
  To: dev; +Cc: hemant.agrawal, pablo.de.lara.guarch, declan.doherty, Akhil Goyal

From: Akhil Goyal <akhil.goyal@nxp.com>

AES-GCM support is added as per the AEAD type of crypto
operations.
Support for AES-CTR is also added.

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 doc/guides/cryptodevs/dpaa2_sec.rst          |   9 +-
 doc/guides/cryptodevs/features/dpaa2_sec.ini |   8 +-
 doc/guides/rel_notes/release_17_08.rst       |   4 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c  | 316 +++++++++++++++++++++++++--
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h    |  98 ++++++---
 5 files changed, 379 insertions(+), 56 deletions(-)

diff --git a/doc/guides/cryptodevs/dpaa2_sec.rst b/doc/guides/cryptodevs/dpaa2_sec.rst
index becb910..1444a91 100644
--- a/doc/guides/cryptodevs/dpaa2_sec.rst
+++ b/doc/guides/cryptodevs/dpaa2_sec.rst
@@ -126,7 +126,7 @@ fits in the DPAA2 Bus model
 Features
 --------
 
-The DPAA2 PMD has support for:
+The DPAA2_SEC PMD has support for:
 
 Cipher algorithms:
 
@@ -134,6 +134,9 @@ Cipher algorithms:
 * ``RTE_CRYPTO_CIPHER_AES128_CBC``
 * ``RTE_CRYPTO_CIPHER_AES192_CBC``
 * ``RTE_CRYPTO_CIPHER_AES256_CBC``
+* ``RTE_CRYPTO_CIPHER_AES128_CTR``
+* ``RTE_CRYPTO_CIPHER_AES192_CTR``
+* ``RTE_CRYPTO_CIPHER_AES256_CTR``
 
 Hash algorithms:
 
@@ -144,6 +147,10 @@ Hash algorithms:
 * ``RTE_CRYPTO_AUTH_SHA512_HMAC``
 * ``RTE_CRYPTO_AUTH_MD5_HMAC``
 
+AEAD algorithms:
+
+* ``RTE_CRYPTO_AEAD_AES_GCM``
+
 Supported DPAA2 SoCs
 --------------------
 
diff --git a/doc/guides/cryptodevs/features/dpaa2_sec.ini b/doc/guides/cryptodevs/features/dpaa2_sec.ini
index db0ea4f..c3bb3dd 100644
--- a/doc/guides/cryptodevs/features/dpaa2_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa2_sec.ini
@@ -15,6 +15,9 @@ HW Accelerated         = Y
 AES CBC (128) = Y
 AES CBC (192) = Y
 AES CBC (256) = Y
+AES CTR (128) = Y
+AES CTR (192) = Y
+AES CTR (256) = Y
 3DES CBC      = Y
 
 ;
@@ -29,6 +32,9 @@ SHA384 HMAC  = Y
 SHA512 HMAC  = Y
 
 ;
-; Supported AEAD algorithms of the 'openssl' crypto driver.
+; Supported AEAD algorithms of the 'dpaa2_sec' crypto driver.
 ;
 [AEAD]
+AES GCM (128) = Y
+AES GCM (192) = Y
+AES GCM (256) = Y
diff --git a/doc/guides/rel_notes/release_17_08.rst b/doc/guides/rel_notes/release_17_08.rst
index d29b203..c21e878 100644
--- a/doc/guides/rel_notes/release_17_08.rst
+++ b/doc/guides/rel_notes/release_17_08.rst
@@ -81,6 +81,10 @@ New Features
   necessary to use a combination of cipher and authentication
   structures anymore.
 
+* **Updated dpaa2_sec crypto PMD.**
+
+  Added support for AES-GCM and AES-CTR
+
 
 Resolved Issues
 ---------------
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index dbdaf46..b1eede9 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -69,10 +69,6 @@
 #define FSL_MC_DPSECI_DEVID     3
 
 #define NO_PREFETCH 0
-#define TDES_CBC_IV_LEN 8
-#define AES_CBC_IV_LEN 16
-#define AES_CTR_IV_LEN 16
-#define AES_GCM_IV_LEN 12
 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
 #define FLE_POOL_NUM_BUFS	32000
 #define FLE_POOL_BUF_SIZE	256
@@ -81,6 +77,149 @@
 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
 
 static inline int
+build_authenc_gcm_fd(dpaa2_sec_session *sess,
+		     struct rte_crypto_op *op,
+		     struct qbman_fd *fd, uint16_t bpid)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge;
+	struct sec_flow_context *flc;
+	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
+	int icv_len = sess->digest_length, retval;
+	uint8_t *old_icv;
+	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+			sess->iv.offset);
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* TODO we are using the first FLE entry to store Mbuf and session ctxt.
+	 * Currently we donot know which FLE has the mbuf stored.
+	 * So while retreiving we can go back 1 FLE from the FD -ADDR
+	 * to get the MBUF Addr from the previous FLE.
+	 * We can have a better approach to use the inline Mbuf
+	 */
+	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+	if (retval) {
+		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+		return -1;
+	}
+	memset(fle, 0, FLE_POOL_BUF_SIZE);
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	DPAA2_FLE_SAVE_CTXT(fle, priv);
+	fle = fle + 1;
+	sge = fle + 2;
+	if (likely(bpid < MAX_BPID)) {
+		DPAA2_SET_FD_BPID(fd, bpid);
+		DPAA2_SET_FLE_BPID(fle, bpid);
+		DPAA2_SET_FLE_BPID(fle + 1, bpid);
+		DPAA2_SET_FLE_BPID(sge, bpid);
+		DPAA2_SET_FLE_BPID(sge + 1, bpid);
+		DPAA2_SET_FLE_BPID(sge + 2, bpid);
+		DPAA2_SET_FLE_BPID(sge + 3, bpid);
+	} else {
+		DPAA2_SET_FD_IVP(fd);
+		DPAA2_SET_FLE_IVP(fle);
+		DPAA2_SET_FLE_IVP((fle + 1));
+		DPAA2_SET_FLE_IVP(sge);
+		DPAA2_SET_FLE_IVP((sge + 1));
+		DPAA2_SET_FLE_IVP((sge + 2));
+		DPAA2_SET_FLE_IVP((sge + 3));
+	}
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
+		   "iv-len=%d data_off: 0x%x\n",
+		   sym_op->aead.data.offset,
+		   sym_op->aead.data.length,
+		   sym_op->aead.digest.length,
+		   sess->iv.length,
+		   sym_op->m_src->data_off);
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+	if (auth_only_len)
+		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+	fle->length = (sess->dir == DIR_ENC) ?
+			(sym_op->aead.data.length + icv_len + auth_only_len) :
+			sym_op->aead.data.length + auth_only_len;
+
+	DPAA2_SET_FLE_SG_EXT(fle);
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+				sym_op->m_src->data_off - auth_only_len);
+	sge->length = sym_op->aead.data.length + auth_only_len;
+
+	if (sess->dir == DIR_ENC) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge,
+				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
+		sge->length = sess->digest_length;
+		DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
+					sess->iv.length + auth_only_len));
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	sge++;
+	fle++;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(fle);
+	DPAA2_SET_FLE_FIN(fle);
+	fle->length = (sess->dir == DIR_ENC) ?
+		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
+		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
+		 sess->digest_length);
+
+	/* Configure Input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
+	sge->length = sess->iv.length;
+	sge++;
+	if (auth_only_len) {
+		DPAA2_SET_FLE_ADDR(sge,
+				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
+		sge->length = auth_only_len;
+		DPAA2_SET_FLE_BPID(sge, bpid);
+		sge++;
+	}
+
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+				sym_op->m_src->data_off);
+	sge->length = sym_op->aead.data.length;
+	if (sess->dir == DIR_DEC) {
+		sge++;
+		old_icv = (uint8_t *)(sge + 1);
+		memcpy(old_icv,	sym_op->aead.digest.data,
+		       sess->digest_length);
+		memset(sym_op->aead.digest.data, 0, sess->digest_length);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+		sge->length = sess->digest_length;
+		DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
+				 sess->digest_length +
+				 sess->iv.length +
+				 auth_only_len));
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	if (auth_only_len) {
+		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+	}
+
+	return 0;
+}
+
+static inline int
 build_authenc_fd(dpaa2_sec_session *sess,
 		 struct rte_crypto_op *op,
 		 struct qbman_fd *fd, uint16_t bpid)
@@ -418,6 +557,9 @@ build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 	case DPAA2_SEC_AUTH:
 		ret = build_auth_fd(sess, op, fd, bpid);
 		break;
+	case DPAA2_SEC_AEAD:
+		ret = build_authenc_gcm_fd(sess, op, fd, bpid);
+		break;
 	case DPAA2_SEC_CIPHER_HASH:
 		ret = build_authenc_fd(sess, op, fd, bpid);
 		break;
@@ -773,7 +915,6 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
 		      struct rte_crypto_sym_xform *xform,
 		      dpaa2_sec_session *session)
 {
-	struct dpaa2_sec_cipher_ctxt *ctxt = &session->ext_params.cipher_ctxt;
 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
 	struct alginfo cipherdata;
 	int bufsize, i;
@@ -820,15 +961,17 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
 		cipherdata.algmode = OP_ALG_AAI_CBC;
 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
-		ctxt->iv.length = AES_CBC_IV_LEN;
 		break;
 	case RTE_CRYPTO_CIPHER_3DES_CBC:
 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
 		cipherdata.algmode = OP_ALG_AAI_CBC;
 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
-		ctxt->iv.length = TDES_CBC_IV_LEN;
 		break;
 	case RTE_CRYPTO_CIPHER_AES_CTR:
+		cipherdata.algtype = OP_ALG_ALGSEL_AES;
+		cipherdata.algmode = OP_ALG_AAI_CTR;
+		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+		break;
 	case RTE_CRYPTO_CIPHER_3DES_CTR:
 	case RTE_CRYPTO_CIPHER_AES_ECB:
 	case RTE_CRYPTO_CIPHER_3DES_ECB:
@@ -851,8 +994,8 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
 				DIR_ENC : DIR_DEC;
 
 	bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
-					&cipherdata, NULL, ctxt->iv.length,
-			session->dir);
+					&cipherdata, NULL, session->iv.length,
+					session->dir);
 	if (bufsize < 0) {
 		RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n");
 		goto error_out;
@@ -887,7 +1030,6 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
 		    struct rte_crypto_sym_xform *xform,
 		    dpaa2_sec_session *session)
 {
-	struct dpaa2_sec_auth_ctxt *ctxt = &session->ext_params.auth_ctxt;
 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
 	struct alginfo authdata;
 	unsigned int bufsize, i;
@@ -985,7 +1127,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
 
 	bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
 				   1, 0, &authdata, !session->dir,
-				   ctxt->trunc_len);
+				   session->digest_length);
 
 	flc->word1_sdl = (uint8_t)bufsize;
 	flc->word2_rflc_31_0 = lower_32_bits(
@@ -997,7 +1139,8 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
 	session->ctxt = priv;
 	for (i = 0; i < bufsize; i++)
 		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
-			    i, priv->flc_desc[0].desc[i]);
+			    i, priv->flc_desc[DESC_INITFINAL].desc[i]);
+
 
 	return 0;
 
@@ -1014,6 +1157,126 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 {
 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+	struct alginfo aeaddata;
+	unsigned int bufsize, i;
+	struct ctxt_priv *priv;
+	struct sec_flow_context *flc;
+	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+	int err;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* Set IV parameters */
+	session->iv.offset = aead_xform->iv.offset;
+	session->iv.length = aead_xform->iv.length;
+	session->ctxt_type = DPAA2_SEC_AEAD;
+
+	/* For SEC AEAD only one descriptor is required */
+	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+			RTE_CACHE_LINE_SIZE);
+	if (priv == NULL) {
+		RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+		return -1;
+	}
+
+	priv->fle_pool = dev_priv->fle_pool;
+	flc = &priv->flc_desc[0].flc;
+
+	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
+					       RTE_CACHE_LINE_SIZE);
+	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
+		RTE_LOG(ERR, PMD, "No Memory for aead key");
+		rte_free(priv);
+		return -1;
+	}
+	memcpy(session->aead_key.data, aead_xform->key.data,
+	       aead_xform->key.length);
+
+	session->digest_length = aead_xform->digest_length;
+	session->aead_key.length = aead_xform->key.length;
+	ctxt->auth_only_len = aead_xform->add_auth_data_length;
+
+	aeaddata.key = (uint64_t)session->aead_key.data;
+	aeaddata.keylen = session->aead_key.length;
+	aeaddata.key_enc_flags = 0;
+	aeaddata.key_type = RTA_DATA_IMM;
+
+	switch (aead_xform->algo) {
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		aeaddata.algtype = OP_ALG_ALGSEL_AES;
+		aeaddata.algmode = OP_ALG_AAI_GCM;
+		session->cipher_alg = RTE_CRYPTO_AEAD_AES_GCM;
+		break;
+	case RTE_CRYPTO_AEAD_AES_CCM:
+		RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u",
+			aead_xform->algo);
+		goto error_out;
+	default:
+		RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n",
+			aead_xform->algo);
+		goto error_out;
+	}
+	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+				DIR_ENC : DIR_DEC;
+
+	priv->flc_desc[0].desc[0] = aeaddata.keylen;
+	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+			       MIN_JOB_DESC_SIZE,
+			       (unsigned int *)priv->flc_desc[0].desc,
+			       &priv->flc_desc[0].desc[1], 1);
+
+	if (err < 0) {
+		PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths");
+		goto error_out;
+	}
+	if (priv->flc_desc[0].desc[1] & 1) {
+		aeaddata.key_type = RTA_DATA_IMM;
+	} else {
+		aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
+		aeaddata.key_type = RTA_DATA_PTR;
+	}
+	priv->flc_desc[0].desc[0] = 0;
+	priv->flc_desc[0].desc[1] = 0;
+
+	if (session->dir == DIR_ENC)
+		bufsize = cnstr_shdsc_gcm_encap(
+				priv->flc_desc[0].desc, 1, 0,
+				&aeaddata, session->iv.length,
+				session->digest_length);
+	else
+		bufsize = cnstr_shdsc_gcm_decap(
+				priv->flc_desc[0].desc, 1, 0,
+				&aeaddata, session->iv.length,
+				session->digest_length);
+	flc->word1_sdl = (uint8_t)bufsize;
+	flc->word2_rflc_31_0 = lower_32_bits(
+			(uint64_t)&(((struct dpaa2_sec_qp *)
+			dev->data->queue_pairs[0])->rx_vq));
+	flc->word3_rflc_63_32 = upper_32_bits(
+			(uint64_t)&(((struct dpaa2_sec_qp *)
+			dev->data->queue_pairs[0])->rx_vq));
+	session->ctxt = priv;
+	for (i = 0; i < bufsize; i++)
+		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+			    i, priv->flc_desc[0].desc[i]);
+
+	return 0;
+
+error_out:
+	rte_free(session->aead_key.data);
+	rte_free(priv);
+	return -1;
+}
+
+
+static int
+dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
+		    struct rte_crypto_sym_xform *xform,
+		    dpaa2_sec_session *session)
+{
+	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
 	struct alginfo authdata, cipherdata;
 	unsigned int bufsize, i;
 	struct ctxt_priv *priv;
@@ -1076,7 +1339,6 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 	memcpy(session->auth_key.data, auth_xform->key.data,
 	       auth_xform->key.length);
 
-	ctxt->trunc_len = auth_xform->digest_length;
 	authdata.key = (uint64_t)session->auth_key.data;
 	authdata.keylen = session->auth_key.length;
 	authdata.key_enc_flags = 0;
@@ -1147,19 +1409,21 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
 		cipherdata.algmode = OP_ALG_AAI_CBC;
 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
-		ctxt->iv.length = AES_CBC_IV_LEN;
 		break;
 	case RTE_CRYPTO_CIPHER_3DES_CBC:
 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
 		cipherdata.algmode = OP_ALG_AAI_CBC;
 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
-		ctxt->iv.length = TDES_CBC_IV_LEN;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		cipherdata.algtype = OP_ALG_ALGSEL_AES;
+		cipherdata.algmode = OP_ALG_AAI_CTR;
+		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
 		break;
 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
 	case RTE_CRYPTO_CIPHER_NULL:
 	case RTE_CRYPTO_CIPHER_3DES_ECB:
 	case RTE_CRYPTO_CIPHER_AES_ECB:
-	case RTE_CRYPTO_CIPHER_AES_CTR:
 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
 		RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
 			cipher_xform->algo);
@@ -1202,9 +1466,9 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 	if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
 		bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
 					      0, &cipherdata, &authdata,
-					      ctxt->iv.length,
+					      session->iv.length,
 					      ctxt->auth_only_len,
-					      ctxt->trunc_len,
+					      session->digest_length,
 					      session->dir);
 	} else {
 		RTE_LOG(ERR, PMD, "Hash before cipher not supported");
@@ -1221,8 +1485,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 	session->ctxt = priv;
 	for (i = 0; i < bufsize; i++)
 		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
-			    i, priv->flc_desc[DESC_INITFINAL].desc[i]);
-
+			    i, priv->flc_desc[0].desc[i]);
 
 	return 0;
 
@@ -1264,13 +1527,19 @@ dpaa2_sec_session_configure(struct rte_cryptodev *dev,
 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
 		session->ext_params.aead_ctxt.auth_cipher_text = true;
-		dpaa2_sec_aead_init(dev, xform, session);
+		dpaa2_sec_aead_chain_init(dev, xform, session);
 
 	/* Authenticate then Cipher */
 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
 		session->ext_params.aead_ctxt.auth_cipher_text = false;
+		dpaa2_sec_aead_chain_init(dev, xform, session);
+
+	/* AEAD operation for AES-GCM kind of Algorithms */
+	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+		   xform->next == NULL) {
 		dpaa2_sec_aead_init(dev, xform, session);
+
 	} else {
 		RTE_LOG(ERR, PMD, "Invalid crypto type");
 		return NULL;
@@ -1300,7 +1569,7 @@ dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
 {
 	PMD_INIT_FUNC_TRACE();
 
-	return -ENOTSUP;
+	return 0;
 }
 
 static int
@@ -1626,7 +1895,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 }
 
 static int
-cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
+cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
 			  struct rte_dpaa2_device *dpaa2_dev)
 {
 	struct rte_cryptodev *cryptodev;
@@ -1654,6 +1923,7 @@ cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
 
 	dpaa2_dev->cryptodev = cryptodev;
 	cryptodev->device = &dpaa2_dev->device;
+	cryptodev->device->driver = &dpaa2_drv->driver;
 
 	/* init user callbacks */
 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index b4dfe24..a477404 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -137,6 +137,7 @@ enum dpaa2_sec_op_type {
 	DPAA2_SEC_NONE,  /*!< No Cipher operations*/
 	DPAA2_SEC_CIPHER,/*!< CIPHER operations */
 	DPAA2_SEC_AUTH,  /*!< Authentication Operations */
+	DPAA2_SEC_AEAD,  /*!< AEAD (AES-GCM/CCM) type operations */
 	DPAA2_SEC_CIPHER_HASH,  /*!< Authenticated Encryption with
 				 * associated data
 				 */
@@ -149,30 +150,9 @@ enum dpaa2_sec_op_type {
 	DPAA2_SEC_MAX
 };
 
-struct dpaa2_sec_cipher_ctxt {
-	struct {
-		uint8_t *data;
-		uint16_t length;
-	} iv;	/**< Initialisation vector parameters */
-	uint8_t *init_counter;  /*!< Set initial counter for CTR mode */
-};
-
-struct dpaa2_sec_auth_ctxt {
-	uint8_t trunc_len;              /*!< Length for output ICV, should
-					 * be 0 if no truncation required
-					 */
-};
-
 struct dpaa2_sec_aead_ctxt {
-	struct {
-		uint8_t *data;
-		uint16_t length;
-	} iv;	/**< Initialisation vector parameters */
 	uint16_t auth_only_len; /*!< Length of data for Auth only */
 	uint8_t auth_cipher_text;       /**< Authenticate/cipher ordering */
-	uint8_t trunc_len;              /*!< Length for output ICV, should
-					 * be 0 if no truncation required
-					 */
 };
 
 typedef struct dpaa2_sec_session_entry {
@@ -181,14 +161,22 @@ typedef struct dpaa2_sec_session_entry {
 	uint8_t dir;         /*!< Operation Direction */
 	enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
 	enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
-	struct {
-		uint8_t *data;	/**< pointer to key data */
-		size_t length;	/**< key length in bytes */
-	} cipher_key;
-	struct {
-		uint8_t *data;	/**< pointer to key data */
-		size_t length;	/**< key length in bytes */
-	} auth_key;
+	union {
+		struct {
+			uint8_t *data;	/**< pointer to key data */
+			size_t length;	/**< key length in bytes */
+		} aead_key;
+		struct {
+			struct {
+				uint8_t *data;	/**< pointer to key data */
+				size_t length;	/**< key length in bytes */
+			} cipher_key;
+			struct {
+				uint8_t *data;	/**< pointer to key data */
+				size_t length;	/**< key length in bytes */
+			} auth_key;
+		};
+	};
 	struct {
 		uint16_t length; /**< IV length in bytes */
 		uint16_t offset; /**< IV offset in bytes */
@@ -196,8 +184,6 @@ typedef struct dpaa2_sec_session_entry {
 	uint16_t digest_length;
 	uint8_t status;
 	union {
-		struct dpaa2_sec_cipher_ctxt cipher_ctxt;
-		struct dpaa2_sec_auth_ctxt auth_ctxt;
 		struct dpaa2_sec_aead_ctxt aead_ctxt;
 	} ext_params;
 } dpaa2_sec_session;
@@ -335,6 +321,36 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 			}, }
 		}, }
 	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
 	{	/* AES CBC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -355,6 +371,26 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 			}, }
 		}, }
 	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
 	{	/* 3DES CBC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH v2 5/5] test/test: add test cases for gcm and ctr in dpaa2_sec test suite
  2017-06-30  7:43 ` [PATCH v2 0/5] crypto/dpaa2_sec optimization and feature update akhil.goyal
                     ` (3 preceding siblings ...)
  2017-06-30  7:43   ` [PATCH v2 4/5] crypto/dpaa2_sec: add support for AES-GCM and CTR akhil.goyal
@ 2017-06-30  7:43   ` akhil.goyal
  2017-07-03 12:31   ` [PATCH v3 0/5] crypto/dpaa2_sec optimization and feature update Akhil Goyal
  5 siblings, 0 replies; 21+ messages in thread
From: akhil.goyal @ 2017-06-30  7:43 UTC (permalink / raw)
  To: dev; +Cc: hemant.agrawal, pablo.de.lara.guarch, declan.doherty, Akhil Goyal

From: Akhil Goyal <akhil.goyal@nxp.com>

dpaa2_sec test cases updated for various supported
crypto alogorithms.

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 test/test/test_cryptodev.c                   | 94 ++++++++++++++++++++++++----
 test/test/test_cryptodev_aes_test_vectors.h  | 78 +++++++++++++++--------
 test/test/test_cryptodev_blockcipher.c       |  1 +
 test/test/test_cryptodev_des_test_vectors.h  | 24 ++++---
 test/test/test_cryptodev_hash_test_vectors.h | 36 +++++++----
 5 files changed, 176 insertions(+), 57 deletions(-)

diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index db0999e..fe6c8dd 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -1738,6 +1738,22 @@ test_AES_cipheronly_dpaa2_sec_all(void)
 }
 
 static int
+test_authonly_dpaa2_sec_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool, ts_params->valid_devs[0],
+		RTE_CRYPTODEV_DPAA2_SEC_PMD,
+		BLKCIPHER_AUTHONLY_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
 test_authonly_openssl_all(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -8280,28 +8296,84 @@ static struct unit_test_suite cryptodev_dpaa2_sec_testsuite  = {
 	.teardown = testsuite_teardown,
 	.unit_test_cases = {
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_device_configure_invalid_dev_id),
+			test_device_configure_invalid_dev_id),
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_multi_session),
+			test_multi_session),
 
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_AES_chain_dpaa2_sec_all),
+			test_AES_chain_dpaa2_sec_all),
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_3DES_chain_dpaa2_sec_all),
+			test_3DES_chain_dpaa2_sec_all),
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_AES_cipheronly_dpaa2_sec_all),
+			test_AES_cipheronly_dpaa2_sec_all),
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_3DES_cipheronly_dpaa2_sec_all),
+			test_3DES_cipheronly_dpaa2_sec_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_authonly_dpaa2_sec_all),
 
-		/** HMAC_MD5 Authentication */
+		/** AES GCM Authenticated Encryption */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_1),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_2),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_3),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_5),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_6),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_7),
+
+		/** AES GCM Authenticated Decryption */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_1),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_2),
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_MD5_HMAC_generate_case_1),
+			test_mb_AES_GCM_authenticated_decryption_test_case_3),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_5),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_6),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_7),
+
+		/** AES GCM Authenticated Encryption 256 bits key */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_encryption_test_case_256_1),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_encryption_test_case_256_2),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_encryption_test_case_256_3),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_encryption_test_case_256_4),
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_MD5_HMAC_verify_case_1),
+			test_mb_AES_GCM_auth_encryption_test_case_256_5),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_encryption_test_case_256_6),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_encryption_test_case_256_7),
+
+		/** AES GCM Authenticated Decryption 256 bits key */
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_MD5_HMAC_generate_case_2),
+			test_mb_AES_GCM_auth_decryption_test_case_256_1),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_decryption_test_case_256_2),
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_MD5_HMAC_verify_case_2),
+			test_mb_AES_GCM_auth_decryption_test_case_256_3),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_decryption_test_case_256_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_decryption_test_case_256_5),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_decryption_test_case_256_6),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_decryption_test_case_256_7),
 
 		TEST_CASES_END() /**< NULL terminate unit test array */
 	}
diff --git a/test/test/test_cryptodev_aes_test_vectors.h b/test/test/test_cryptodev_aes_test_vectors.h
index 07d6eab..f692d57 100644
--- a/test/test/test_cryptodev_aes_test_vectors.h
+++ b/test/test/test_cryptodev_aes_test_vectors.h
@@ -1028,7 +1028,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CTR HMAC-SHA1 Decryption Digest "
@@ -1038,7 +1039,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-192-CTR XCBC Encryption Digest",
@@ -1074,7 +1076,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-256-CTR HMAC-SHA1 Decryption Digest "
@@ -1084,7 +1087,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest",
@@ -1094,7 +1098,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1124,7 +1129,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
@@ -1141,7 +1147,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA256 Encryption Digest "
@@ -1159,7 +1166,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA256 Decryption Digest "
@@ -1175,7 +1183,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
@@ -1205,7 +1214,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA512 Decryption Digest "
@@ -1262,7 +1272,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA224 Decryption Digest "
@@ -1272,7 +1283,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA384 Encryption Digest",
@@ -1281,7 +1293,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA384 Decryption Digest "
@@ -1291,7 +1304,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1322,7 +1336,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC Decryption",
@@ -1331,7 +1346,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-192-CBC Encryption",
@@ -1340,7 +1356,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-192-CBC Encryption Scater gather",
@@ -1357,7 +1374,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-256-CBC Encryption",
@@ -1366,7 +1384,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-256-CBC Decryption",
@@ -1375,7 +1394,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-256-CBC OOP Encryption",
@@ -1400,7 +1420,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CTR Decryption",
@@ -1409,7 +1430,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-192-CTR Encryption",
@@ -1418,7 +1440,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-192-CTR Decryption",
@@ -1427,7 +1450,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-256-CTR Encryption",
@@ -1436,7 +1460,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-256-CTR Decryption",
@@ -1445,7 +1470,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 };
 
diff --git a/test/test/test_cryptodev_blockcipher.c b/test/test/test_cryptodev_blockcipher.c
index 446ab4f..85fad01 100644
--- a/test/test/test_cryptodev_blockcipher.c
+++ b/test/test/test_cryptodev_blockcipher.c
@@ -100,6 +100,7 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
 			tdata->auth_key.len);
 
 	switch (cryptodev_type) {
+	case RTE_CRYPTODEV_DPAA2_SEC_PMD:
 	case RTE_CRYPTODEV_QAT_SYM_PMD:
 	case RTE_CRYPTODEV_OPENSSL_PMD:
 	case RTE_CRYPTODEV_ARMV8_PMD: /* Fall through */
diff --git a/test/test/test_cryptodev_des_test_vectors.h b/test/test/test_cryptodev_des_test_vectors.h
index b226794..0b6e0b8 100644
--- a/test/test/test_cryptodev_des_test_vectors.h
+++ b/test/test/test_cryptodev_des_test_vectors.h
@@ -1058,14 +1058,16 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.test_data = &triple_des128cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-128-CBC HMAC-SHA1 Decryption Digest Verify",
 		.test_data = &triple_des128cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-128-CBC SHA1 Encryption Digest",
@@ -1084,14 +1086,16 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.test_data = &triple_des192cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-192-CBC HMAC-SHA1 Decryption Digest Verify",
 		.test_data = &triple_des192cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-192-CBC SHA1 Encryption Digest",
@@ -1199,28 +1203,32 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 		.test_data = &triple_des128cbc_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-128-CBC Decryption",
 		.test_data = &triple_des128cbc_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-192-CBC Encryption",
 		.test_data = &triple_des192cbc_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-192-CBC Decryption",
 		.test_data = &triple_des192cbc_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-128-CTR Encryption",
diff --git a/test/test/test_cryptodev_hash_test_vectors.h b/test/test/test_cryptodev_hash_test_vectors.h
index 3214f9a..24353fc 100644
--- a/test/test/test_cryptodev_hash_test_vectors.h
+++ b/test/test/test_cryptodev_hash_test_vectors.h
@@ -366,7 +366,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "HMAC-MD5 Digest Verify",
@@ -374,7 +375,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "SHA1 Digest",
@@ -394,7 +396,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "HMAC-SHA1 Digest Verify",
@@ -402,7 +405,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "SHA224 Digest",
@@ -422,7 +426,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "HMAC-SHA224 Digest Verify",
@@ -430,7 +435,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "SHA256 Digest",
@@ -450,7 +456,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "HMAC-SHA256 Digest Verify",
@@ -458,7 +465,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "SHA384 Digest",
@@ -478,7 +486,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "HMAC-SHA384 Digest Verify",
@@ -486,7 +495,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "SHA512 Digest",
@@ -506,7 +516,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "HMAC-SHA512 Digest Verify",
@@ -514,7 +525,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 };
 
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* Re: [PATCH 0/5] crypto/dpaa2_sec optimization and feature update
  2017-06-29 20:48 [PATCH 0/5] crypto/dpaa2_sec optimization and feature update akhil.goyal
                   ` (4 preceding siblings ...)
  2017-06-30  7:43 ` [PATCH v2 0/5] crypto/dpaa2_sec optimization and feature update akhil.goyal
@ 2017-07-02 23:43 ` De Lara Guarch, Pablo
  5 siblings, 0 replies; 21+ messages in thread
From: De Lara Guarch, Pablo @ 2017-07-02 23:43 UTC (permalink / raw)
  To: akhil.goyal, dev; +Cc: hemant.agrawal, Doherty, Declan



> -----Original Message-----
> From: akhil.goyal@nxp.com [mailto:akhil.goyal@nxp.com]
> Sent: Thursday, June 29, 2017 9:49 PM
> To: dev@dpdk.org
> Cc: hemant.agrawal@nxp.com; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; Doherty, Declan
> <declan.doherty@intel.com>; Akhil Goyal <akhil.goyal@nxp.com>
> Subject: [PATCH 0/5] crypto/dpaa2_sec optimization and feature update
> 
> From: Akhil Goyal <akhil.goyal@nxp.com>
> 
> This patchset updates dpaa2_sec crypto driver with following:
> - optimization in data path for memory allocation
> - add support for additional AES algorithms like AES-GCM and AES-CTR
> - Update test cases in test_cryptodev for all the supported test cases.
> - Update documentation for supported algorithms
> 
> The patches are based on dpdk-crypto-next and are rebased over the latest
> crypto restructuring changes by Pablo.
> http://dpdk.org/ml/archives/dev/2017-June/069372.html
> 
> 
> Akhil Goyal (5):
>   crypto/dpaa2_sec: add per device mempool to store frame list entries
>   crypto/dpaa2_sec: add descriptor support for gcm and ctr
>   crypto/dpaa2_sec: add support for AES-GCM and CTR
>   test/test: add test cases for gcm and ctr in dpaa2_sec test suite
>   doc: update documentation for dpaa2_sec supported algos
> 
>  doc/guides/cryptodevs/dpaa2_sec.rst          |   9 +-
>  doc/guides/cryptodevs/features/dpaa2_sec.ini |   6 +
>  drivers/bus/fslmc/portal/dpaa2_hw_pvt.h      |   7 +
>  drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c  | 384
> ++++++++++++++++++++++++---
>  drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h    | 100 ++++---
>  drivers/crypto/dpaa2_sec/hw/desc/algo.h      | 226 +++++++++++++++-
>  drivers/crypto/dpaa2_sec/hw/desc/ipsec.h     |  19 +-
>  test/test/test_cryptodev.c                   |  94 ++++++-
>  test/test/test_cryptodev_aes_test_vectors.h  |  78 ++++--
>  test/test/test_cryptodev_blockcipher.c       |   1 +
>  test/test/test_cryptodev_des_test_vectors.h  |  24 +-
> test/test/test_cryptodev_hash_test_vectors.h |  36 ++-
>  12 files changed, 846 insertions(+), 138 deletions(-)
> 
> --
> 2.9.3

Hi Akhil,

There are some issues with check-git-log.sh on your patches.

Wrong headline format:
        test/test: add test cases for gcm and ctr in dpaa2_sec test suite
Wrong headline prefix:
        crypto/dpaa2_sec: add per device mempool to store frame list entries
Wrong headline lowercase:
        crypto/dpaa2_sec: add hw desc support for CTR
        crypto/dpaa2_sec: add hw desc support for AES-GCM
Headline too long:
        crypto/dpaa2_sec: add per device mempool to store frame list entries
        test/test: add test cases for gcm and ctr in dpaa2_sec test suite

For the first patch, change from "test/test: ...." to "test/crypto..." too.
Also, you can probably merge the last two patches, as you are adding existing tests to the dpaa2 testsuite,
for the changes made in patch 4.

Lastly, I submitted a v4 for the crypto rework, so make sure you rebase on top of this patchset
before sending a v2.

Thanks!
Pablo

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH v3 0/5] crypto/dpaa2_sec optimization and feature update
  2017-06-30  7:43 ` [PATCH v2 0/5] crypto/dpaa2_sec optimization and feature update akhil.goyal
                     ` (4 preceding siblings ...)
  2017-06-30  7:43   ` [PATCH v2 5/5] test/test: add test cases for gcm and ctr in dpaa2_sec test suite akhil.goyal
@ 2017-07-03 12:31   ` Akhil Goyal
  2017-07-03 12:31     ` [PATCH v3 1/5] bus/fslmc: add macros to get/set fle context Akhil Goyal
                       ` (5 more replies)
  5 siblings, 6 replies; 21+ messages in thread
From: Akhil Goyal @ 2017-07-03 12:31 UTC (permalink / raw)
  To: dev
  Cc: hemant.agrawal, pablo.de.lara.guarch, shreyansh.jain,
	declan.doherty, Akhil Goyal

This patchset updates dpaa2_sec crypto driver with following:
- optimization in data path for memory allocation
- add support for additional AES algorithms like AES-GCM and AES-CTR
- Update test cases in test_cryptodev for all the supported test cases.
- Update documentation for supported algorithms

The patches are based on dpdk-crypto-next and are rebased over the
latest crypto restructuring changes by Pablo.
http://dpdk.org/ml/archives/dev/2017-July/069743.html

changes in v3:
-rebased over http://dpdk.org/ml/archives/dev/2017-July/069743.html
-fixed check-git-log errors.
-merged last two patches as per the suggestions from Pablo
-split first patch to avoid git-log error.

changes in v2:
-fixed typo in doc
-split 2nd patch in two
-squashed documentation patch in 4th patch
-fixed checkpatch errors/warnings


Akhil Goyal (5):
  bus/fslmc: add macros to get/set fle context
  crypto/dpaa2_sec: add per dev mempool to store fle
  crypto/dpaa2_sec: add HW desc support for ctr
  crypto/dpaa2_sec: add HW desc support for aes-gcm
  crypto/dpaa2_sec: add support for aes-gcm and ctr

 doc/guides/cryptodevs/dpaa2_sec.rst          |   9 +-
 doc/guides/cryptodevs/features/dpaa2_sec.ini |   8 +-
 doc/guides/rel_notes/release_17_08.rst       |   4 +
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h      |   7 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c  | 385 ++++++++++++++++++++++++---
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h    | 100 ++++---
 drivers/crypto/dpaa2_sec/hw/desc/algo.h      | 228 +++++++++++++++-
 drivers/crypto/dpaa2_sec/hw/desc/ipsec.h     |  19 +-
 test/test/test_cryptodev.c                   |  94 ++++++-
 test/test/test_cryptodev_aes_test_vectors.h  |  78 ++++--
 test/test/test_cryptodev_blockcipher.c       |   1 +
 test/test/test_cryptodev_des_test_vectors.h  |  24 +-
 test/test/test_cryptodev_hash_test_vectors.h |  36 ++-
 13 files changed, 854 insertions(+), 139 deletions(-)

-- 
2.9.3

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH v3 1/5] bus/fslmc: add macros to get/set fle context
  2017-07-03 12:31   ` [PATCH v3 0/5] crypto/dpaa2_sec optimization and feature update Akhil Goyal
@ 2017-07-03 12:31     ` Akhil Goyal
  2017-07-03 12:31     ` [PATCH v3 2/5] crypto/dpaa2_sec: add per dev mempool to store fle Akhil Goyal
                       ` (4 subsequent siblings)
  5 siblings, 0 replies; 21+ messages in thread
From: Akhil Goyal @ 2017-07-03 12:31 UTC (permalink / raw)
  To: dev
  Cc: hemant.agrawal, pablo.de.lara.guarch, shreyansh.jain,
	declan.doherty, Akhil Goyal

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 429eaee..16cadf5 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -182,6 +182,13 @@ struct qbman_fle {
 	fle->addr_lo = lower_32_bits((uint64_t)addr);     \
 	fle->addr_hi = upper_32_bits((uint64_t)addr);	  \
 } while (0)
+#define DPAA2_GET_FLE_CTXT(fle)					\
+	(uint64_t)((((uint64_t)((fle)->reserved[1])) << 32) + \
+			(fle)->reserved[0])
+#define DPAA2_FLE_SAVE_CTXT(fle, addr) do { \
+	fle->reserved[0] = lower_32_bits((uint64_t)addr);     \
+	fle->reserved[1] = upper_32_bits((uint64_t)addr);	  \
+} while (0)
 #define DPAA2_SET_FLE_OFFSET(fle, offset) \
 	((fle)->fin_bpid_offset |= (uint32_t)(offset) << 16)
 #define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (uint64_t)bpid)
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH v3 2/5] crypto/dpaa2_sec: add per dev mempool to store fle
  2017-07-03 12:31   ` [PATCH v3 0/5] crypto/dpaa2_sec optimization and feature update Akhil Goyal
  2017-07-03 12:31     ` [PATCH v3 1/5] bus/fslmc: add macros to get/set fle context Akhil Goyal
@ 2017-07-03 12:31     ` Akhil Goyal
  2017-07-03 12:31     ` [PATCH v3 3/5] crypto/dpaa2_sec: add HW desc support for ctr Akhil Goyal
                       ` (3 subsequent siblings)
  5 siblings, 0 replies; 21+ messages in thread
From: Akhil Goyal @ 2017-07-03 12:31 UTC (permalink / raw)
  To: dev
  Cc: hemant.agrawal, pablo.de.lara.guarch, shreyansh.jain,
	declan.doherty, Akhil Goyal

rte_malloc uses common memory area for all cores.

Now rte_malloc are replaced by per device mempool to allocate
space for FLE. This removes contention and improves performance.

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 79 ++++++++++++++++++++++-------
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |  2 +
 2 files changed, 63 insertions(+), 18 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 3620751..dbdaf46 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -71,6 +71,13 @@
 #define NO_PREFETCH 0
 #define TDES_CBC_IV_LEN 8
 #define AES_CBC_IV_LEN 16
+#define AES_CTR_IV_LEN 16
+#define AES_GCM_IV_LEN 12
+/* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
+#define FLE_POOL_NUM_BUFS	32000
+#define FLE_POOL_BUF_SIZE	256
+#define FLE_POOL_CACHE_SIZE	512
+
 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
 
 static inline int
@@ -84,9 +91,8 @@ build_authenc_fd(dpaa2_sec_session *sess,
 	struct sec_flow_context *flc;
 	uint32_t auth_only_len = sym_op->auth.data.length -
 				sym_op->cipher.data.length;
-	int icv_len = sess->digest_length;
+	int icv_len = sess->digest_length, retval;
 	uint8_t *old_icv;
-	uint32_t mem_len = (7 * sizeof(struct qbman_fle)) + icv_len;
 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
 			sess->iv.offset);
 
@@ -98,12 +104,14 @@ build_authenc_fd(dpaa2_sec_session *sess,
 	 * to get the MBUF Addr from the previous FLE.
 	 * We can have a better approach to use the inline Mbuf
 	 */
-	fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
-	if (!fle) {
+	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+	if (retval) {
 		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
 		return -1;
 	}
+	memset(fle, 0, FLE_POOL_BUF_SIZE);
 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	DPAA2_FLE_SAVE_CTXT(fle, priv);
 	fle = fle + 1;
 	sge = fle + 2;
 	if (likely(bpid < MAX_BPID)) {
@@ -214,21 +222,19 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 {
 	struct rte_crypto_sym_op *sym_op = op->sym;
 	struct qbman_fle *fle, *sge;
-	uint32_t mem_len = (sess->dir == DIR_ENC) ?
-			   (3 * sizeof(struct qbman_fle)) :
-			   (5 * sizeof(struct qbman_fle) +
-			    sess->digest_length);
 	struct sec_flow_context *flc;
 	struct ctxt_priv *priv = sess->ctxt;
 	uint8_t *old_digest;
+	int retval;
 
 	PMD_INIT_FUNC_TRACE();
 
-	fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
-	if (!fle) {
-		RTE_LOG(ERR, PMD, "Memory alloc failed for FLE\n");
+	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+	if (retval) {
+		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
 		return -1;
 	}
+	memset(fle, 0, FLE_POOL_BUF_SIZE);
 	/* TODO we are using the first FLE entry to store Mbuf.
 	 * Currently we donot know which FLE has the mbuf stored.
 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
@@ -236,6 +242,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 	 * We can have a better approach to use the inline Mbuf
 	 */
 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	DPAA2_FLE_SAVE_CTXT(fle, priv);
 	fle = fle + 1;
 
 	if (likely(bpid < MAX_BPID)) {
@@ -306,7 +313,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 {
 	struct rte_crypto_sym_op *sym_op = op->sym;
 	struct qbman_fle *fle, *sge;
-	uint32_t mem_len = (5 * sizeof(struct qbman_fle));
+	int retval;
 	struct sec_flow_context *flc;
 	struct ctxt_priv *priv = sess->ctxt;
 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
@@ -314,12 +321,12 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 
 	PMD_INIT_FUNC_TRACE();
 
-	/* todo - we can use some mempool to avoid malloc here */
-	fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
-	if (!fle) {
+	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+	if (retval) {
 		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
 		return -1;
 	}
+	memset(fle, 0, FLE_POOL_BUF_SIZE);
 	/* TODO we are using the first FLE entry to store Mbuf.
 	 * Currently we donot know which FLE has the mbuf stored.
 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
@@ -327,6 +334,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 	 * We can have a better approach to use the inline Mbuf
 	 */
 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	DPAA2_FLE_SAVE_CTXT(fle, priv);
 	fle = fle + 1;
 	sge = fle + 2;
 
@@ -499,6 +507,7 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
 {
 	struct qbman_fle *fle;
 	struct rte_crypto_op *op;
+	struct ctxt_priv *priv;
 
 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
 
@@ -534,7 +543,8 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
 		   DPAA2_GET_FD_LEN(fd));
 
 	/* free the fle memory */
-	rte_free(fle - 1);
+	priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
+	rte_mempool_put(priv->fle_pool, (void *)(fle - 1));
 
 	return op;
 }
@@ -764,6 +774,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
 		      dpaa2_sec_session *session)
 {
 	struct dpaa2_sec_cipher_ctxt *ctxt = &session->ext_params.cipher_ctxt;
+	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
 	struct alginfo cipherdata;
 	int bufsize, i;
 	struct ctxt_priv *priv;
@@ -780,6 +791,8 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
 		return -1;
 	}
 
+	priv->fle_pool = dev_priv->fle_pool;
+
 	flc = &priv->flc_desc[0].flc;
 
 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
@@ -875,8 +888,9 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
 		    dpaa2_sec_session *session)
 {
 	struct dpaa2_sec_auth_ctxt *ctxt = &session->ext_params.auth_ctxt;
+	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
 	struct alginfo authdata;
-	unsigned int bufsize;
+	unsigned int bufsize, i;
 	struct ctxt_priv *priv;
 	struct sec_flow_context *flc;
 
@@ -892,6 +906,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
 		return -1;
 	}
 
+	priv->fle_pool = dev_priv->fle_pool;
 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
 
 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
@@ -980,6 +995,9 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
 			(uint64_t)&(((struct dpaa2_sec_qp *)
 			dev->data->queue_pairs[0])->rx_vq));
 	session->ctxt = priv;
+	for (i = 0; i < bufsize; i++)
+		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+			    i, priv->flc_desc[0].desc[i]);
 
 	return 0;
 
@@ -995,8 +1013,9 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 		    dpaa2_sec_session *session)
 {
 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
 	struct alginfo authdata, cipherdata;
-	unsigned int bufsize;
+	unsigned int bufsize, i;
 	struct ctxt_priv *priv;
 	struct sec_flow_context *flc;
 	struct rte_crypto_cipher_xform *cipher_xform;
@@ -1032,6 +1051,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 		return -1;
 	}
 
+	priv->fle_pool = dev_priv->fle_pool;
 	flc = &priv->flc_desc[0].flc;
 
 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
@@ -1199,6 +1219,10 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 			(uint64_t)&(((struct dpaa2_sec_qp *)
 			dev->data->queue_pairs[0])->rx_vq));
 	session->ctxt = priv;
+	for (i = 0; i < bufsize; i++)
+		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+			    i, priv->flc_desc[DESC_INITFINAL].desc[i]);
+
 
 	return 0;
 
@@ -1496,6 +1520,10 @@ static struct rte_cryptodev_ops crypto_ops = {
 static int
 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
 {
+	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
+
+	rte_mempool_free(internals->fle_pool);
+
 	PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
 		     dev->data->name, rte_socket_id());
 
@@ -1512,6 +1540,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 	uint16_t token;
 	struct dpseci_attr attr;
 	int retcode, hw_id;
+	char str[20];
 
 	PMD_INIT_FUNC_TRACE();
 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
@@ -1572,6 +1601,20 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 	internals->hw = dpseci;
 	internals->token = token;
 
+	sprintf(str, "fle_pool_%d", cryptodev->data->dev_id);
+	internals->fle_pool = rte_mempool_create((const char *)str,
+			FLE_POOL_NUM_BUFS,
+			FLE_POOL_BUF_SIZE,
+			FLE_POOL_CACHE_SIZE, 0,
+			NULL, NULL, NULL, NULL,
+			SOCKET_ID_ANY, 0);
+	if (!internals->fle_pool) {
+		RTE_LOG(ERR, PMD, "%s create failed", str);
+		goto init_error;
+	} else
+		RTE_LOG(INFO, PMD, "%s created: %p\n", str,
+				internals->fle_pool);
+
 	PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
 	return 0;
 
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index eda2eec..b4dfe24 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -40,6 +40,7 @@
 struct dpaa2_sec_dev_private {
 	void *mc_portal; /**< MC Portal for configuring this device */
 	void *hw; /**< Hardware handle for this device.Used by NADK framework */
+	struct rte_mempool *fle_pool; /* per device memory pool for FLE */
 	int32_t hw_id; /**< An unique ID of this device instance */
 	int32_t vfio_fd; /**< File descriptor received via VFIO */
 	uint16_t token; /**< Token required by DPxxx objects */
@@ -128,6 +129,7 @@ struct sec_flc_desc {
 };
 
 struct ctxt_priv {
+	struct rte_mempool *fle_pool; /* per device memory pool for FLE */
 	struct sec_flc_desc flc_desc[0];
 };
 
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH v3 3/5] crypto/dpaa2_sec: add HW desc support for ctr
  2017-07-03 12:31   ` [PATCH v3 0/5] crypto/dpaa2_sec optimization and feature update Akhil Goyal
  2017-07-03 12:31     ` [PATCH v3 1/5] bus/fslmc: add macros to get/set fle context Akhil Goyal
  2017-07-03 12:31     ` [PATCH v3 2/5] crypto/dpaa2_sec: add per dev mempool to store fle Akhil Goyal
@ 2017-07-03 12:31     ` Akhil Goyal
  2017-07-03 12:31     ` [PATCH v3 4/5] crypto/dpaa2_sec: add HW desc support for aes-gcm Akhil Goyal
                       ` (2 subsequent siblings)
  5 siblings, 0 replies; 21+ messages in thread
From: Akhil Goyal @ 2017-07-03 12:31 UTC (permalink / raw)
  To: dev
  Cc: hemant.agrawal, pablo.de.lara.guarch, shreyansh.jain,
	declan.doherty, Akhil Goyal

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/crypto/dpaa2_sec/hw/desc/algo.h  | 23 ++++++++++++++++-------
 drivers/crypto/dpaa2_sec/hw/desc/ipsec.h | 19 +++++++++++++------
 2 files changed, 29 insertions(+), 13 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/hw/desc/algo.h b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
index c71ada0..166bc3a 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/algo.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
@@ -159,6 +159,10 @@ cnstr_shdsc_snow_f9(uint32_t *descbuf, bool ps, bool swap,
  * @ps: if 36/40bit addressing is desired, this parameter must be true
  * @swap: must be true when core endianness doesn't match SEC endianness
  * @cipherdata: pointer to block cipher transform definitions
+ *              Valid algorithm values one of OP_ALG_ALGSEL_* {DES, 3DES, AES}
+ *              Valid modes for:
+ *                  AES: OP_ALG_AAI_* {CBC, CTR}
+ *                  DES, 3DES: OP_ALG_AAI_CBC
  * @iv: IV data; if NULL, "ivlen" bytes from the input frame will be read as IV
  * @ivlen: IV length
  * @dir: DIR_ENC/DIR_DEC
@@ -172,8 +176,10 @@ cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
 {
 	struct program prg;
 	struct program *p = &prg;
-	const bool is_aes_dec = (dir == DIR_DEC) &&
-				(cipherdata->algtype == OP_ALG_ALGSEL_AES);
+	uint32_t iv_off = 0;
+	const bool need_dk = (dir == DIR_DEC) &&
+			     (cipherdata->algtype == OP_ALG_ALGSEL_AES) &&
+			     (cipherdata->algmode == OP_ALG_AAI_CBC);
 	LABEL(keyjmp);
 	LABEL(skipdk);
 	REFERENCE(pkeyjmp);
@@ -191,7 +197,7 @@ cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
 	KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 	    cipherdata->keylen, INLINE_KEY(cipherdata));
 
-	if (is_aes_dec) {
+	if (need_dk) {
 		ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
 			      OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
 
@@ -199,7 +205,7 @@ cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
 	}
 	SET_LABEL(p, keyjmp);
 
-	if (is_aes_dec) {
+	if (need_dk) {
 		ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode |
 			      OP_ALG_AAI_DK, OP_ALG_AS_INITFINAL,
 			      ICV_CHECK_DISABLE, dir);
@@ -209,12 +215,15 @@ cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
 			      OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
 	}
 
+	if (cipherdata->algmode == OP_ALG_AAI_CTR)
+		iv_off = 16;
+
 	if (iv)
 		/* IV load, convert size */
-		LOAD(p, (uintptr_t)iv, CONTEXT1, 0, ivlen, IMMED | COPY);
+		LOAD(p, (uintptr_t)iv, CONTEXT1, iv_off, ivlen, IMMED | COPY);
 	else
 		/* IV is present first before the actual message */
-		SEQLOAD(p, CONTEXT1, 0, ivlen, 0);
+		SEQLOAD(p, CONTEXT1, iv_off, ivlen, 0);
 
 	MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
 	MATHB(p, SEQINSZ, SUB, MATH2, VSEQOUTSZ, 4, 0);
@@ -224,7 +233,7 @@ cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
 	SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 
 	PATCH_JUMP(p, pkeyjmp, keyjmp);
-	if (is_aes_dec)
+	if (need_dk)
 		PATCH_JUMP(p, pskipdk, skipdk);
 
 	return PROGRAM_FINALIZE(p);
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h b/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
index c63d0da..5954055 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
@@ -1311,8 +1311,11 @@ cnstr_shdsc_ipsec_new_decap(uint32_t *descbuf, bool ps,
  * @descbuf: pointer to buffer used for descriptor construction
  * @ps: if 36/40bit addressing is desired, this parameter must be true
  * @swap: if true, perform descriptor byte swapping on a 4-byte boundary
- * @cipherdata: ointer to block cipher transform definitions.
+ * @cipherdata: pointer to block cipher transform definitions.
  *              Valid algorithm values one of OP_ALG_ALGSEL_* {DES, 3DES, AES}
+ *              Valid modes for:
+ *                  AES: OP_ALG_AAI_* {CBC, CTR}
+ *                  DES, 3DES: OP_ALG_AAI_CBC
  * @authdata: pointer to authentication transform definitions.
  *            Valid algorithm values - one of OP_ALG_ALGSEL_* {MD5, SHA1,
  *            SHA224, SHA256, SHA384, SHA512}
@@ -1379,8 +1382,9 @@ cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,
 {
 	struct program prg;
 	struct program *p = &prg;
-	const bool is_aes_dec = (dir == DIR_DEC) &&
-				(cipherdata->algtype == OP_ALG_ALGSEL_AES);
+	const bool need_dk = (dir == DIR_DEC) &&
+			     (cipherdata->algtype == OP_ALG_ALGSEL_AES) &&
+			     (cipherdata->algmode == OP_ALG_AAI_CBC);
 
 	LABEL(skip_patch_len);
 	LABEL(keyjmp);
@@ -1466,7 +1470,7 @@ cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,
 		      dir == DIR_ENC ? ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
 		      dir);
 
-	if (is_aes_dec)
+	if (need_dk)
 		ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode,
 			      OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
 	pskipkeys = JUMP(p, skipkeys, LOCAL_JUMP, ALL_TRUE, 0);
@@ -1478,7 +1482,7 @@ cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,
 		      dir == DIR_ENC ? ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
 		      dir);
 
-	if (is_aes_dec) {
+	if (need_dk) {
 		ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode |
 			      OP_ALG_AAI_DK, OP_ALG_AS_INITFINAL,
 			      ICV_CHECK_DISABLE, dir);
@@ -1503,7 +1507,10 @@ cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,
 	SET_LABEL(p, aonly_len_offset);
 
 	/* Read IV */
-	SEQLOAD(p, CONTEXT1, 0, ivlen, 0);
+	if (cipherdata->algmode == OP_ALG_AAI_CTR)
+		SEQLOAD(p, CONTEXT1, 16, ivlen, 0);
+	else
+		SEQLOAD(p, CONTEXT1, 0, ivlen, 0);
 
 	/*
 	 * Read data needed only for authentication. This is overwritten above
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH v3 4/5] crypto/dpaa2_sec: add HW desc support for aes-gcm
  2017-07-03 12:31   ` [PATCH v3 0/5] crypto/dpaa2_sec optimization and feature update Akhil Goyal
                       ` (2 preceding siblings ...)
  2017-07-03 12:31     ` [PATCH v3 3/5] crypto/dpaa2_sec: add HW desc support for ctr Akhil Goyal
@ 2017-07-03 12:31     ` Akhil Goyal
  2017-07-03 12:31     ` [PATCH v3 5/5] crypto/dpaa2_sec: add support for aes-gcm and ctr Akhil Goyal
  2017-07-03 18:55     ` [PATCH v3 0/5] crypto/dpaa2_sec optimization and feature update De Lara Guarch, Pablo
  5 siblings, 0 replies; 21+ messages in thread
From: Akhil Goyal @ 2017-07-03 12:31 UTC (permalink / raw)
  To: dev
  Cc: hemant.agrawal, pablo.de.lara.guarch, shreyansh.jain,
	declan.doherty, Akhil Goyal

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/crypto/dpaa2_sec/hw/desc/algo.h | 205 ++++++++++++++++++++++++++++++++
 1 file changed, 205 insertions(+)

diff --git a/drivers/crypto/dpaa2_sec/hw/desc/algo.h b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
index 166bc3a..cb633ed 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/algo.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
@@ -443,6 +443,211 @@ cnstr_shdsc_kasumi_f9(uint32_t *descbuf, bool ps, bool swap,
 }
 
 /**
+ * cnstr_shdsc_gcm_encap - AES-GCM encap as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ *		Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with
+ *		OP_ALG_AAI_GCM.
+ * @ivlen: Initialization vector length
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_gcm_encap(uint32_t *descbuf, bool ps, bool swap,
+		      struct alginfo *cipherdata,
+		      uint32_t ivlen, uint32_t icvsize)
+{
+	struct program prg;
+	struct program *p = &prg;
+
+	LABEL(keyjmp);
+	LABEL(zeroassocjump2);
+	LABEL(zeroassocjump1);
+	LABEL(zeropayloadjump);
+	REFERENCE(pkeyjmp);
+	REFERENCE(pzeroassocjump2);
+	REFERENCE(pzeroassocjump1);
+	REFERENCE(pzeropayloadjump);
+
+	PROGRAM_CNTXT_INIT(p, descbuf, 0);
+
+	if (swap)
+		PROGRAM_SET_BSWAP(p);
+	if (ps)
+		PROGRAM_SET_36BIT_ADDR(p);
+
+	SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+	pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SELF | SHRD);
+	/* Insert Key */
+	KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+	    cipherdata->keylen, INLINE_KEY(cipherdata));
+
+	SET_LABEL(p, keyjmp);
+
+	/* class 1 operation */
+	ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+		      OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, DIR_ENC);
+
+	MATHB(p, DPOVRD, AND, 0x7fffffff, MATH3, 4, IMMED2);
+
+	/* if assoclen + cryptlen is ZERO, skip to ICV write */
+	MATHB(p, SEQINSZ, SUB, ivlen, VSEQOUTSZ, 4, IMMED2);
+	pzeroassocjump2 = JUMP(p, zeroassocjump2, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+	SEQFIFOLOAD(p, IV1, ivlen, FLUSH1);
+
+	/* if assoclen is ZERO, skip reading the assoc data */
+	MATHB(p, ZERO, ADD, MATH3, VSEQINSZ, 4, 0);
+	pzeroassocjump1 = JUMP(p, zeroassocjump1, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+	MATHB(p, ZERO, ADD, MATH3, VSEQOUTSZ, 4, 0);
+
+	/* skip assoc data */
+	SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+
+	/* cryptlen = seqinlen - assoclen */
+	MATHB(p, SEQINSZ, SUB, MATH3, VSEQOUTSZ, 4, 0);
+
+	/* if cryptlen is ZERO jump to zero-payload commands */
+	pzeropayloadjump = JUMP(p, zeropayloadjump, LOCAL_JUMP, ALL_TRUE,
+				MATH_Z);
+
+	/* read assoc data */
+	SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | FLUSH1);
+	SET_LABEL(p, zeroassocjump1);
+
+	MATHB(p, SEQINSZ, SUB, MATH0, VSEQINSZ, 4, 0);
+
+	/* write encrypted data */
+	SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+	/* read payload data */
+	SEQFIFOLOAD(p, MSG1, 0, CLASS1 | VLF | LAST1);
+
+	/* jump the zero-payload commands */
+	JUMP(p, 4, LOCAL_JUMP, ALL_TRUE, 0);
+
+	/* zero-payload commands */
+	SET_LABEL(p, zeropayloadjump);
+
+	/* read assoc data */
+	SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | LAST1);
+
+	JUMP(p, 2, LOCAL_JUMP, ALL_TRUE, 0);
+
+	/* There is no input data */
+	SET_LABEL(p, zeroassocjump2);
+
+	SEQFIFOLOAD(p, IV1, ivlen, FLUSH1 | LAST1);
+
+	/* write ICV */
+	SEQSTORE(p, CONTEXT1, 0, icvsize, 0);
+
+	PATCH_JUMP(p, pkeyjmp, keyjmp);
+	PATCH_JUMP(p, pzeroassocjump2, zeroassocjump2);
+	PATCH_JUMP(p, pzeroassocjump1, zeroassocjump1);
+	PATCH_JUMP(p, pzeropayloadjump, zeropayloadjump);
+
+	return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_gcm_decap - AES-GCM decap as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ *		Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with
+ *		OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_gcm_decap(uint32_t *descbuf, bool ps, bool swap,
+		      struct alginfo *cipherdata,
+		      uint32_t ivlen, uint32_t icvsize)
+{
+	struct program prg;
+	struct program *p = &prg;
+
+	LABEL(keyjmp);
+	LABEL(zeroassocjump1);
+	LABEL(zeropayloadjump);
+	REFERENCE(pkeyjmp);
+	REFERENCE(pzeroassocjump1);
+	REFERENCE(pzeropayloadjump);
+
+	PROGRAM_CNTXT_INIT(p, descbuf, 0);
+
+	if (swap)
+		PROGRAM_SET_BSWAP(p);
+	if (ps)
+		PROGRAM_SET_36BIT_ADDR(p);
+
+	SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+	pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SELF | SHRD);
+	/* Insert Key */
+	KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+	    cipherdata->keylen, INLINE_KEY(cipherdata));
+
+	SET_LABEL(p, keyjmp);
+
+	/* class 1 operation */
+	ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+		      OP_ALG_AS_INITFINAL, ICV_CHECK_ENABLE, DIR_DEC);
+
+	MATHB(p, DPOVRD, AND, 0x7fffffff, MATH3, 4, IMMED2);
+	SEQFIFOLOAD(p, IV1, ivlen, FLUSH1);
+
+	/* if assoclen is ZERO, skip reading the assoc data */
+	MATHB(p, ZERO, ADD, MATH3, VSEQINSZ, 4, 0);
+	pzeroassocjump1 = JUMP(p, zeroassocjump1, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+	MATHB(p, ZERO, ADD, MATH3, VSEQOUTSZ, 4, 0);
+
+	/* skip assoc data */
+	SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+
+	/* read assoc data */
+	SEQFIFOLOAD(p, AAD1, 0, CLASS1 | VLF | FLUSH1);
+
+	SET_LABEL(p, zeroassocjump1);
+
+	/* cryptlen = seqoutlen - assoclen */
+	MATHB(p, SEQOUTSZ, SUB, MATH0, VSEQINSZ, 4, 0);
+
+	/* jump to zero-payload command if cryptlen is zero */
+	pzeropayloadjump = JUMP(p, zeropayloadjump, LOCAL_JUMP, ALL_TRUE,
+				MATH_Z);
+
+	MATHB(p, SEQOUTSZ, SUB, MATH0, VSEQOUTSZ, 4, 0);
+
+	/* store encrypted data */
+	SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+	/* read payload data */
+	SEQFIFOLOAD(p, MSG1, 0, CLASS1 | VLF | FLUSH1);
+
+	/* zero-payload command */
+	SET_LABEL(p, zeropayloadjump);
+
+	/* read ICV */
+	SEQFIFOLOAD(p, ICV1, icvsize, CLASS1 | LAST1);
+
+	PATCH_JUMP(p, pkeyjmp, keyjmp);
+	PATCH_JUMP(p, pzeroassocjump1, zeroassocjump1);
+	PATCH_JUMP(p, pzeropayloadjump, zeropayloadjump);
+
+	return PROGRAM_FINALIZE(p);
+}
+
+/**
  * cnstr_shdsc_crc - CRC32 Accelerator (IEEE 802 CRC32 protocol mode)
  * @descbuf: pointer to descriptor-under-construction buffer
  * @swap: must be true when core endianness doesn't match SEC endianness
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH v3 5/5] crypto/dpaa2_sec: add support for aes-gcm and ctr
  2017-07-03 12:31   ` [PATCH v3 0/5] crypto/dpaa2_sec optimization and feature update Akhil Goyal
                       ` (3 preceding siblings ...)
  2017-07-03 12:31     ` [PATCH v3 4/5] crypto/dpaa2_sec: add HW desc support for aes-gcm Akhil Goyal
@ 2017-07-03 12:31     ` Akhil Goyal
  2017-07-03 18:55     ` [PATCH v3 0/5] crypto/dpaa2_sec optimization and feature update De Lara Guarch, Pablo
  5 siblings, 0 replies; 21+ messages in thread
From: Akhil Goyal @ 2017-07-03 12:31 UTC (permalink / raw)
  To: dev
  Cc: hemant.agrawal, pablo.de.lara.guarch, shreyansh.jain,
	declan.doherty, Akhil Goyal

AES-GCM support is added as per the AEAD type of crypto
operations. Support for AES-CTR is also added.

test/crypto and documentation is also updated for
dpaa2_sec to add supported algorithms.

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 doc/guides/cryptodevs/dpaa2_sec.rst          |   9 +-
 doc/guides/cryptodevs/features/dpaa2_sec.ini |   8 +-
 doc/guides/rel_notes/release_17_08.rst       |   4 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c  | 316 +++++++++++++++++++++++++--
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h    |  98 ++++++---
 test/test/test_cryptodev.c                   |  94 +++++++-
 test/test/test_cryptodev_aes_test_vectors.h  |  78 ++++---
 test/test/test_cryptodev_blockcipher.c       |   1 +
 test/test/test_cryptodev_des_test_vectors.h  |  24 +-
 test/test/test_cryptodev_hash_test_vectors.h |  36 ++-
 10 files changed, 555 insertions(+), 113 deletions(-)

diff --git a/doc/guides/cryptodevs/dpaa2_sec.rst b/doc/guides/cryptodevs/dpaa2_sec.rst
index becb910..1444a91 100644
--- a/doc/guides/cryptodevs/dpaa2_sec.rst
+++ b/doc/guides/cryptodevs/dpaa2_sec.rst
@@ -126,7 +126,7 @@ fits in the DPAA2 Bus model
 Features
 --------
 
-The DPAA2 PMD has support for:
+The DPAA2_SEC PMD has support for:
 
 Cipher algorithms:
 
@@ -134,6 +134,9 @@ Cipher algorithms:
 * ``RTE_CRYPTO_CIPHER_AES128_CBC``
 * ``RTE_CRYPTO_CIPHER_AES192_CBC``
 * ``RTE_CRYPTO_CIPHER_AES256_CBC``
+* ``RTE_CRYPTO_CIPHER_AES128_CTR``
+* ``RTE_CRYPTO_CIPHER_AES192_CTR``
+* ``RTE_CRYPTO_CIPHER_AES256_CTR``
 
 Hash algorithms:
 
@@ -144,6 +147,10 @@ Hash algorithms:
 * ``RTE_CRYPTO_AUTH_SHA512_HMAC``
 * ``RTE_CRYPTO_AUTH_MD5_HMAC``
 
+AEAD algorithms:
+
+* ``RTE_CRYPTO_AEAD_AES_GCM``
+
 Supported DPAA2 SoCs
 --------------------
 
diff --git a/doc/guides/cryptodevs/features/dpaa2_sec.ini b/doc/guides/cryptodevs/features/dpaa2_sec.ini
index db0ea4f..c3bb3dd 100644
--- a/doc/guides/cryptodevs/features/dpaa2_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa2_sec.ini
@@ -15,6 +15,9 @@ HW Accelerated         = Y
 AES CBC (128) = Y
 AES CBC (192) = Y
 AES CBC (256) = Y
+AES CTR (128) = Y
+AES CTR (192) = Y
+AES CTR (256) = Y
 3DES CBC      = Y
 
 ;
@@ -29,6 +32,9 @@ SHA384 HMAC  = Y
 SHA512 HMAC  = Y
 
 ;
-; Supported AEAD algorithms of the 'openssl' crypto driver.
+; Supported AEAD algorithms of the 'dpaa2_sec' crypto driver.
 ;
 [AEAD]
+AES GCM (128) = Y
+AES GCM (192) = Y
+AES GCM (256) = Y
diff --git a/doc/guides/rel_notes/release_17_08.rst b/doc/guides/rel_notes/release_17_08.rst
index d29b203..c21e878 100644
--- a/doc/guides/rel_notes/release_17_08.rst
+++ b/doc/guides/rel_notes/release_17_08.rst
@@ -81,6 +81,10 @@ New Features
   necessary to use a combination of cipher and authentication
   structures anymore.
 
+* **Updated dpaa2_sec crypto PMD.**
+
+  Added support for AES-GCM and AES-CTR
+
 
 Resolved Issues
 ---------------
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index dbdaf46..b1eede9 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -69,10 +69,6 @@
 #define FSL_MC_DPSECI_DEVID     3
 
 #define NO_PREFETCH 0
-#define TDES_CBC_IV_LEN 8
-#define AES_CBC_IV_LEN 16
-#define AES_CTR_IV_LEN 16
-#define AES_GCM_IV_LEN 12
 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
 #define FLE_POOL_NUM_BUFS	32000
 #define FLE_POOL_BUF_SIZE	256
@@ -81,6 +77,149 @@
 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
 
 static inline int
+build_authenc_gcm_fd(dpaa2_sec_session *sess,
+		     struct rte_crypto_op *op,
+		     struct qbman_fd *fd, uint16_t bpid)
+{
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct ctxt_priv *priv = sess->ctxt;
+	struct qbman_fle *fle, *sge;
+	struct sec_flow_context *flc;
+	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
+	int icv_len = sess->digest_length, retval;
+	uint8_t *old_icv;
+	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+			sess->iv.offset);
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* TODO we are using the first FLE entry to store Mbuf and session ctxt.
+	 * Currently we donot know which FLE has the mbuf stored.
+	 * So while retreiving we can go back 1 FLE from the FD -ADDR
+	 * to get the MBUF Addr from the previous FLE.
+	 * We can have a better approach to use the inline Mbuf
+	 */
+	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+	if (retval) {
+		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+		return -1;
+	}
+	memset(fle, 0, FLE_POOL_BUF_SIZE);
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+	DPAA2_FLE_SAVE_CTXT(fle, priv);
+	fle = fle + 1;
+	sge = fle + 2;
+	if (likely(bpid < MAX_BPID)) {
+		DPAA2_SET_FD_BPID(fd, bpid);
+		DPAA2_SET_FLE_BPID(fle, bpid);
+		DPAA2_SET_FLE_BPID(fle + 1, bpid);
+		DPAA2_SET_FLE_BPID(sge, bpid);
+		DPAA2_SET_FLE_BPID(sge + 1, bpid);
+		DPAA2_SET_FLE_BPID(sge + 2, bpid);
+		DPAA2_SET_FLE_BPID(sge + 3, bpid);
+	} else {
+		DPAA2_SET_FD_IVP(fd);
+		DPAA2_SET_FLE_IVP(fle);
+		DPAA2_SET_FLE_IVP((fle + 1));
+		DPAA2_SET_FLE_IVP(sge);
+		DPAA2_SET_FLE_IVP((sge + 1));
+		DPAA2_SET_FLE_IVP((sge + 2));
+		DPAA2_SET_FLE_IVP((sge + 3));
+	}
+
+	/* Save the shared descriptor */
+	flc = &priv->flc_desc[0].flc;
+	/* Configure FD as a FRAME LIST */
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+	PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
+		   "iv-len=%d data_off: 0x%x\n",
+		   sym_op->aead.data.offset,
+		   sym_op->aead.data.length,
+		   sym_op->aead.digest.length,
+		   sess->iv.length,
+		   sym_op->m_src->data_off);
+
+	/* Configure Output FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+	if (auth_only_len)
+		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+	fle->length = (sess->dir == DIR_ENC) ?
+			(sym_op->aead.data.length + icv_len + auth_only_len) :
+			sym_op->aead.data.length + auth_only_len;
+
+	DPAA2_SET_FLE_SG_EXT(fle);
+
+	/* Configure Output SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+				sym_op->m_src->data_off - auth_only_len);
+	sge->length = sym_op->aead.data.length + auth_only_len;
+
+	if (sess->dir == DIR_ENC) {
+		sge++;
+		DPAA2_SET_FLE_ADDR(sge,
+				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
+		sge->length = sess->digest_length;
+		DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
+					sess->iv.length + auth_only_len));
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	sge++;
+	fle++;
+
+	/* Configure Input FLE with Scatter/Gather Entry */
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+	DPAA2_SET_FLE_SG_EXT(fle);
+	DPAA2_SET_FLE_FIN(fle);
+	fle->length = (sess->dir == DIR_ENC) ?
+		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
+		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
+		 sess->digest_length);
+
+	/* Configure Input SGE for Encap/Decap */
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
+	sge->length = sess->iv.length;
+	sge++;
+	if (auth_only_len) {
+		DPAA2_SET_FLE_ADDR(sge,
+				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
+		sge->length = auth_only_len;
+		DPAA2_SET_FLE_BPID(sge, bpid);
+		sge++;
+	}
+
+	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+				sym_op->m_src->data_off);
+	sge->length = sym_op->aead.data.length;
+	if (sess->dir == DIR_DEC) {
+		sge++;
+		old_icv = (uint8_t *)(sge + 1);
+		memcpy(old_icv,	sym_op->aead.digest.data,
+		       sess->digest_length);
+		memset(sym_op->aead.digest.data, 0, sess->digest_length);
+		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+		sge->length = sess->digest_length;
+		DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
+				 sess->digest_length +
+				 sess->iv.length +
+				 auth_only_len));
+	}
+	DPAA2_SET_FLE_FIN(sge);
+
+	if (auth_only_len) {
+		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+	}
+
+	return 0;
+}
+
+static inline int
 build_authenc_fd(dpaa2_sec_session *sess,
 		 struct rte_crypto_op *op,
 		 struct qbman_fd *fd, uint16_t bpid)
@@ -418,6 +557,9 @@ build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 	case DPAA2_SEC_AUTH:
 		ret = build_auth_fd(sess, op, fd, bpid);
 		break;
+	case DPAA2_SEC_AEAD:
+		ret = build_authenc_gcm_fd(sess, op, fd, bpid);
+		break;
 	case DPAA2_SEC_CIPHER_HASH:
 		ret = build_authenc_fd(sess, op, fd, bpid);
 		break;
@@ -773,7 +915,6 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
 		      struct rte_crypto_sym_xform *xform,
 		      dpaa2_sec_session *session)
 {
-	struct dpaa2_sec_cipher_ctxt *ctxt = &session->ext_params.cipher_ctxt;
 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
 	struct alginfo cipherdata;
 	int bufsize, i;
@@ -820,15 +961,17 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
 		cipherdata.algmode = OP_ALG_AAI_CBC;
 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
-		ctxt->iv.length = AES_CBC_IV_LEN;
 		break;
 	case RTE_CRYPTO_CIPHER_3DES_CBC:
 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
 		cipherdata.algmode = OP_ALG_AAI_CBC;
 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
-		ctxt->iv.length = TDES_CBC_IV_LEN;
 		break;
 	case RTE_CRYPTO_CIPHER_AES_CTR:
+		cipherdata.algtype = OP_ALG_ALGSEL_AES;
+		cipherdata.algmode = OP_ALG_AAI_CTR;
+		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+		break;
 	case RTE_CRYPTO_CIPHER_3DES_CTR:
 	case RTE_CRYPTO_CIPHER_AES_ECB:
 	case RTE_CRYPTO_CIPHER_3DES_ECB:
@@ -851,8 +994,8 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
 				DIR_ENC : DIR_DEC;
 
 	bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
-					&cipherdata, NULL, ctxt->iv.length,
-			session->dir);
+					&cipherdata, NULL, session->iv.length,
+					session->dir);
 	if (bufsize < 0) {
 		RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n");
 		goto error_out;
@@ -887,7 +1030,6 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
 		    struct rte_crypto_sym_xform *xform,
 		    dpaa2_sec_session *session)
 {
-	struct dpaa2_sec_auth_ctxt *ctxt = &session->ext_params.auth_ctxt;
 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
 	struct alginfo authdata;
 	unsigned int bufsize, i;
@@ -985,7 +1127,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
 
 	bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
 				   1, 0, &authdata, !session->dir,
-				   ctxt->trunc_len);
+				   session->digest_length);
 
 	flc->word1_sdl = (uint8_t)bufsize;
 	flc->word2_rflc_31_0 = lower_32_bits(
@@ -997,7 +1139,8 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
 	session->ctxt = priv;
 	for (i = 0; i < bufsize; i++)
 		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
-			    i, priv->flc_desc[0].desc[i]);
+			    i, priv->flc_desc[DESC_INITFINAL].desc[i]);
+
 
 	return 0;
 
@@ -1014,6 +1157,126 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 {
 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+	struct alginfo aeaddata;
+	unsigned int bufsize, i;
+	struct ctxt_priv *priv;
+	struct sec_flow_context *flc;
+	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+	int err;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* Set IV parameters */
+	session->iv.offset = aead_xform->iv.offset;
+	session->iv.length = aead_xform->iv.length;
+	session->ctxt_type = DPAA2_SEC_AEAD;
+
+	/* For SEC AEAD only one descriptor is required */
+	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+			RTE_CACHE_LINE_SIZE);
+	if (priv == NULL) {
+		RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+		return -1;
+	}
+
+	priv->fle_pool = dev_priv->fle_pool;
+	flc = &priv->flc_desc[0].flc;
+
+	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
+					       RTE_CACHE_LINE_SIZE);
+	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
+		RTE_LOG(ERR, PMD, "No Memory for aead key");
+		rte_free(priv);
+		return -1;
+	}
+	memcpy(session->aead_key.data, aead_xform->key.data,
+	       aead_xform->key.length);
+
+	session->digest_length = aead_xform->digest_length;
+	session->aead_key.length = aead_xform->key.length;
+	ctxt->auth_only_len = aead_xform->add_auth_data_length;
+
+	aeaddata.key = (uint64_t)session->aead_key.data;
+	aeaddata.keylen = session->aead_key.length;
+	aeaddata.key_enc_flags = 0;
+	aeaddata.key_type = RTA_DATA_IMM;
+
+	switch (aead_xform->algo) {
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		aeaddata.algtype = OP_ALG_ALGSEL_AES;
+		aeaddata.algmode = OP_ALG_AAI_GCM;
+		session->cipher_alg = RTE_CRYPTO_AEAD_AES_GCM;
+		break;
+	case RTE_CRYPTO_AEAD_AES_CCM:
+		RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u",
+			aead_xform->algo);
+		goto error_out;
+	default:
+		RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n",
+			aead_xform->algo);
+		goto error_out;
+	}
+	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+				DIR_ENC : DIR_DEC;
+
+	priv->flc_desc[0].desc[0] = aeaddata.keylen;
+	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+			       MIN_JOB_DESC_SIZE,
+			       (unsigned int *)priv->flc_desc[0].desc,
+			       &priv->flc_desc[0].desc[1], 1);
+
+	if (err < 0) {
+		PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths");
+		goto error_out;
+	}
+	if (priv->flc_desc[0].desc[1] & 1) {
+		aeaddata.key_type = RTA_DATA_IMM;
+	} else {
+		aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
+		aeaddata.key_type = RTA_DATA_PTR;
+	}
+	priv->flc_desc[0].desc[0] = 0;
+	priv->flc_desc[0].desc[1] = 0;
+
+	if (session->dir == DIR_ENC)
+		bufsize = cnstr_shdsc_gcm_encap(
+				priv->flc_desc[0].desc, 1, 0,
+				&aeaddata, session->iv.length,
+				session->digest_length);
+	else
+		bufsize = cnstr_shdsc_gcm_decap(
+				priv->flc_desc[0].desc, 1, 0,
+				&aeaddata, session->iv.length,
+				session->digest_length);
+	flc->word1_sdl = (uint8_t)bufsize;
+	flc->word2_rflc_31_0 = lower_32_bits(
+			(uint64_t)&(((struct dpaa2_sec_qp *)
+			dev->data->queue_pairs[0])->rx_vq));
+	flc->word3_rflc_63_32 = upper_32_bits(
+			(uint64_t)&(((struct dpaa2_sec_qp *)
+			dev->data->queue_pairs[0])->rx_vq));
+	session->ctxt = priv;
+	for (i = 0; i < bufsize; i++)
+		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+			    i, priv->flc_desc[0].desc[i]);
+
+	return 0;
+
+error_out:
+	rte_free(session->aead_key.data);
+	rte_free(priv);
+	return -1;
+}
+
+
+static int
+dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
+		    struct rte_crypto_sym_xform *xform,
+		    dpaa2_sec_session *session)
+{
+	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
 	struct alginfo authdata, cipherdata;
 	unsigned int bufsize, i;
 	struct ctxt_priv *priv;
@@ -1076,7 +1339,6 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 	memcpy(session->auth_key.data, auth_xform->key.data,
 	       auth_xform->key.length);
 
-	ctxt->trunc_len = auth_xform->digest_length;
 	authdata.key = (uint64_t)session->auth_key.data;
 	authdata.keylen = session->auth_key.length;
 	authdata.key_enc_flags = 0;
@@ -1147,19 +1409,21 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
 		cipherdata.algmode = OP_ALG_AAI_CBC;
 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
-		ctxt->iv.length = AES_CBC_IV_LEN;
 		break;
 	case RTE_CRYPTO_CIPHER_3DES_CBC:
 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
 		cipherdata.algmode = OP_ALG_AAI_CBC;
 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
-		ctxt->iv.length = TDES_CBC_IV_LEN;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		cipherdata.algtype = OP_ALG_ALGSEL_AES;
+		cipherdata.algmode = OP_ALG_AAI_CTR;
+		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
 		break;
 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
 	case RTE_CRYPTO_CIPHER_NULL:
 	case RTE_CRYPTO_CIPHER_3DES_ECB:
 	case RTE_CRYPTO_CIPHER_AES_ECB:
-	case RTE_CRYPTO_CIPHER_AES_CTR:
 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
 		RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
 			cipher_xform->algo);
@@ -1202,9 +1466,9 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 	if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
 		bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
 					      0, &cipherdata, &authdata,
-					      ctxt->iv.length,
+					      session->iv.length,
 					      ctxt->auth_only_len,
-					      ctxt->trunc_len,
+					      session->digest_length,
 					      session->dir);
 	} else {
 		RTE_LOG(ERR, PMD, "Hash before cipher not supported");
@@ -1221,8 +1485,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 	session->ctxt = priv;
 	for (i = 0; i < bufsize; i++)
 		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
-			    i, priv->flc_desc[DESC_INITFINAL].desc[i]);
-
+			    i, priv->flc_desc[0].desc[i]);
 
 	return 0;
 
@@ -1264,13 +1527,19 @@ dpaa2_sec_session_configure(struct rte_cryptodev *dev,
 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
 		session->ext_params.aead_ctxt.auth_cipher_text = true;
-		dpaa2_sec_aead_init(dev, xform, session);
+		dpaa2_sec_aead_chain_init(dev, xform, session);
 
 	/* Authenticate then Cipher */
 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
 		session->ext_params.aead_ctxt.auth_cipher_text = false;
+		dpaa2_sec_aead_chain_init(dev, xform, session);
+
+	/* AEAD operation for AES-GCM kind of Algorithms */
+	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+		   xform->next == NULL) {
 		dpaa2_sec_aead_init(dev, xform, session);
+
 	} else {
 		RTE_LOG(ERR, PMD, "Invalid crypto type");
 		return NULL;
@@ -1300,7 +1569,7 @@ dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
 {
 	PMD_INIT_FUNC_TRACE();
 
-	return -ENOTSUP;
+	return 0;
 }
 
 static int
@@ -1626,7 +1895,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 }
 
 static int
-cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
+cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
 			  struct rte_dpaa2_device *dpaa2_dev)
 {
 	struct rte_cryptodev *cryptodev;
@@ -1654,6 +1923,7 @@ cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
 
 	dpaa2_dev->cryptodev = cryptodev;
 	cryptodev->device = &dpaa2_dev->device;
+	cryptodev->device->driver = &dpaa2_drv->driver;
 
 	/* init user callbacks */
 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index b4dfe24..a477404 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -137,6 +137,7 @@ enum dpaa2_sec_op_type {
 	DPAA2_SEC_NONE,  /*!< No Cipher operations*/
 	DPAA2_SEC_CIPHER,/*!< CIPHER operations */
 	DPAA2_SEC_AUTH,  /*!< Authentication Operations */
+	DPAA2_SEC_AEAD,  /*!< AEAD (AES-GCM/CCM) type operations */
 	DPAA2_SEC_CIPHER_HASH,  /*!< Authenticated Encryption with
 				 * associated data
 				 */
@@ -149,30 +150,9 @@ enum dpaa2_sec_op_type {
 	DPAA2_SEC_MAX
 };
 
-struct dpaa2_sec_cipher_ctxt {
-	struct {
-		uint8_t *data;
-		uint16_t length;
-	} iv;	/**< Initialisation vector parameters */
-	uint8_t *init_counter;  /*!< Set initial counter for CTR mode */
-};
-
-struct dpaa2_sec_auth_ctxt {
-	uint8_t trunc_len;              /*!< Length for output ICV, should
-					 * be 0 if no truncation required
-					 */
-};
-
 struct dpaa2_sec_aead_ctxt {
-	struct {
-		uint8_t *data;
-		uint16_t length;
-	} iv;	/**< Initialisation vector parameters */
 	uint16_t auth_only_len; /*!< Length of data for Auth only */
 	uint8_t auth_cipher_text;       /**< Authenticate/cipher ordering */
-	uint8_t trunc_len;              /*!< Length for output ICV, should
-					 * be 0 if no truncation required
-					 */
 };
 
 typedef struct dpaa2_sec_session_entry {
@@ -181,14 +161,22 @@ typedef struct dpaa2_sec_session_entry {
 	uint8_t dir;         /*!< Operation Direction */
 	enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
 	enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
-	struct {
-		uint8_t *data;	/**< pointer to key data */
-		size_t length;	/**< key length in bytes */
-	} cipher_key;
-	struct {
-		uint8_t *data;	/**< pointer to key data */
-		size_t length;	/**< key length in bytes */
-	} auth_key;
+	union {
+		struct {
+			uint8_t *data;	/**< pointer to key data */
+			size_t length;	/**< key length in bytes */
+		} aead_key;
+		struct {
+			struct {
+				uint8_t *data;	/**< pointer to key data */
+				size_t length;	/**< key length in bytes */
+			} cipher_key;
+			struct {
+				uint8_t *data;	/**< pointer to key data */
+				size_t length;	/**< key length in bytes */
+			} auth_key;
+		};
+	};
 	struct {
 		uint16_t length; /**< IV length in bytes */
 		uint16_t offset; /**< IV offset in bytes */
@@ -196,8 +184,6 @@ typedef struct dpaa2_sec_session_entry {
 	uint16_t digest_length;
 	uint8_t status;
 	union {
-		struct dpaa2_sec_cipher_ctxt cipher_ctxt;
-		struct dpaa2_sec_auth_ctxt auth_ctxt;
 		struct dpaa2_sec_aead_ctxt aead_ctxt;
 	} ext_params;
 } dpaa2_sec_session;
@@ -335,6 +321,36 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 			}, }
 		}, }
 	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
 	{	/* AES CBC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -355,6 +371,26 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 			}, }
 		}, }
 	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
 	{	/* 3DES CBC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index db0999e..fe6c8dd 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -1738,6 +1738,22 @@ test_AES_cipheronly_dpaa2_sec_all(void)
 }
 
 static int
+test_authonly_dpaa2_sec_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool, ts_params->valid_devs[0],
+		RTE_CRYPTODEV_DPAA2_SEC_PMD,
+		BLKCIPHER_AUTHONLY_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
 test_authonly_openssl_all(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -8280,28 +8296,84 @@ static struct unit_test_suite cryptodev_dpaa2_sec_testsuite  = {
 	.teardown = testsuite_teardown,
 	.unit_test_cases = {
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_device_configure_invalid_dev_id),
+			test_device_configure_invalid_dev_id),
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_multi_session),
+			test_multi_session),
 
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_AES_chain_dpaa2_sec_all),
+			test_AES_chain_dpaa2_sec_all),
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_3DES_chain_dpaa2_sec_all),
+			test_3DES_chain_dpaa2_sec_all),
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_AES_cipheronly_dpaa2_sec_all),
+			test_AES_cipheronly_dpaa2_sec_all),
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_3DES_cipheronly_dpaa2_sec_all),
+			test_3DES_cipheronly_dpaa2_sec_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_authonly_dpaa2_sec_all),
 
-		/** HMAC_MD5 Authentication */
+		/** AES GCM Authenticated Encryption */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_1),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_2),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_3),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_5),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_6),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_encryption_test_case_7),
+
+		/** AES GCM Authenticated Decryption */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_1),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_2),
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_MD5_HMAC_generate_case_1),
+			test_mb_AES_GCM_authenticated_decryption_test_case_3),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_5),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_6),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_authenticated_decryption_test_case_7),
+
+		/** AES GCM Authenticated Encryption 256 bits key */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_encryption_test_case_256_1),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_encryption_test_case_256_2),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_encryption_test_case_256_3),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_encryption_test_case_256_4),
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_MD5_HMAC_verify_case_1),
+			test_mb_AES_GCM_auth_encryption_test_case_256_5),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_encryption_test_case_256_6),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_encryption_test_case_256_7),
+
+		/** AES GCM Authenticated Decryption 256 bits key */
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_MD5_HMAC_generate_case_2),
+			test_mb_AES_GCM_auth_decryption_test_case_256_1),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_decryption_test_case_256_2),
 		TEST_CASE_ST(ut_setup, ut_teardown,
-			     test_MD5_HMAC_verify_case_2),
+			test_mb_AES_GCM_auth_decryption_test_case_256_3),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_decryption_test_case_256_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_decryption_test_case_256_5),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_decryption_test_case_256_6),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_mb_AES_GCM_auth_decryption_test_case_256_7),
 
 		TEST_CASES_END() /**< NULL terminate unit test array */
 	}
diff --git a/test/test/test_cryptodev_aes_test_vectors.h b/test/test/test_cryptodev_aes_test_vectors.h
index 07d6eab..f692d57 100644
--- a/test/test/test_cryptodev_aes_test_vectors.h
+++ b/test/test/test_cryptodev_aes_test_vectors.h
@@ -1028,7 +1028,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CTR HMAC-SHA1 Decryption Digest "
@@ -1038,7 +1039,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-192-CTR XCBC Encryption Digest",
@@ -1074,7 +1076,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-256-CTR HMAC-SHA1 Decryption Digest "
@@ -1084,7 +1087,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest",
@@ -1094,7 +1098,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1124,7 +1129,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
@@ -1141,7 +1147,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA256 Encryption Digest "
@@ -1159,7 +1166,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA256 Decryption Digest "
@@ -1175,7 +1183,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
@@ -1205,7 +1214,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA512 Decryption Digest "
@@ -1262,7 +1272,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA224 Decryption Digest "
@@ -1272,7 +1283,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA384 Encryption Digest",
@@ -1281,7 +1293,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA384 Decryption Digest "
@@ -1291,7 +1304,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1322,7 +1336,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CBC Decryption",
@@ -1331,7 +1346,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-192-CBC Encryption",
@@ -1340,7 +1356,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-192-CBC Encryption Scater gather",
@@ -1357,7 +1374,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-256-CBC Encryption",
@@ -1366,7 +1384,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-256-CBC Decryption",
@@ -1375,7 +1394,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-256-CBC OOP Encryption",
@@ -1400,7 +1420,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-128-CTR Decryption",
@@ -1409,7 +1430,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-192-CTR Encryption",
@@ -1418,7 +1440,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-192-CTR Decryption",
@@ -1427,7 +1450,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-256-CTR Encryption",
@@ -1436,7 +1460,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "AES-256-CTR Decryption",
@@ -1445,7 +1470,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 };
 
diff --git a/test/test/test_cryptodev_blockcipher.c b/test/test/test_cryptodev_blockcipher.c
index 446ab4f..85fad01 100644
--- a/test/test/test_cryptodev_blockcipher.c
+++ b/test/test/test_cryptodev_blockcipher.c
@@ -100,6 +100,7 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
 			tdata->auth_key.len);
 
 	switch (cryptodev_type) {
+	case RTE_CRYPTODEV_DPAA2_SEC_PMD:
 	case RTE_CRYPTODEV_QAT_SYM_PMD:
 	case RTE_CRYPTODEV_OPENSSL_PMD:
 	case RTE_CRYPTODEV_ARMV8_PMD: /* Fall through */
diff --git a/test/test/test_cryptodev_des_test_vectors.h b/test/test/test_cryptodev_des_test_vectors.h
index b226794..0b6e0b8 100644
--- a/test/test/test_cryptodev_des_test_vectors.h
+++ b/test/test/test_cryptodev_des_test_vectors.h
@@ -1058,14 +1058,16 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.test_data = &triple_des128cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-128-CBC HMAC-SHA1 Decryption Digest Verify",
 		.test_data = &triple_des128cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-128-CBC SHA1 Encryption Digest",
@@ -1084,14 +1086,16 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.test_data = &triple_des192cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-192-CBC HMAC-SHA1 Decryption Digest Verify",
 		.test_data = &triple_des192cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-192-CBC SHA1 Encryption Digest",
@@ -1199,28 +1203,32 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 		.test_data = &triple_des128cbc_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-128-CBC Decryption",
 		.test_data = &triple_des128cbc_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-192-CBC Encryption",
 		.test_data = &triple_des192cbc_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-192-CBC Decryption",
 		.test_data = &triple_des192cbc_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "3DES-128-CTR Encryption",
diff --git a/test/test/test_cryptodev_hash_test_vectors.h b/test/test/test_cryptodev_hash_test_vectors.h
index 3214f9a..24353fc 100644
--- a/test/test/test_cryptodev_hash_test_vectors.h
+++ b/test/test/test_cryptodev_hash_test_vectors.h
@@ -366,7 +366,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "HMAC-MD5 Digest Verify",
@@ -374,7 +375,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "SHA1 Digest",
@@ -394,7 +396,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "HMAC-SHA1 Digest Verify",
@@ -402,7 +405,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "SHA224 Digest",
@@ -422,7 +426,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "HMAC-SHA224 Digest Verify",
@@ -430,7 +435,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "SHA256 Digest",
@@ -450,7 +456,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "HMAC-SHA256 Digest Verify",
@@ -458,7 +465,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "SHA384 Digest",
@@ -478,7 +486,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "HMAC-SHA384 Digest Verify",
@@ -486,7 +495,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "SHA512 Digest",
@@ -506,7 +516,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 	{
 		.test_descr = "HMAC-SHA512 Digest Verify",
@@ -514,7 +525,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC
 	},
 };
 
-- 
2.9.3

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* Re: [PATCH v3 0/5] crypto/dpaa2_sec optimization and feature update
  2017-07-03 12:31   ` [PATCH v3 0/5] crypto/dpaa2_sec optimization and feature update Akhil Goyal
                       ` (4 preceding siblings ...)
  2017-07-03 12:31     ` [PATCH v3 5/5] crypto/dpaa2_sec: add support for aes-gcm and ctr Akhil Goyal
@ 2017-07-03 18:55     ` De Lara Guarch, Pablo
  5 siblings, 0 replies; 21+ messages in thread
From: De Lara Guarch, Pablo @ 2017-07-03 18:55 UTC (permalink / raw)
  To: Akhil Goyal, dev; +Cc: hemant.agrawal, shreyansh.jain, Doherty, Declan



> -----Original Message-----
> From: Akhil Goyal [mailto:akhil.goyal@nxp.com]
> Sent: Monday, July 3, 2017 1:32 PM
> To: dev@dpdk.org
> Cc: hemant.agrawal@nxp.com; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; shreyansh.jain@nxp.com; Doherty,
> Declan <declan.doherty@intel.com>; Akhil Goyal <akhil.goyal@nxp.com>
> Subject: [PATCH v3 0/5] crypto/dpaa2_sec optimization and feature update
> 
> This patchset updates dpaa2_sec crypto driver with following:
> - optimization in data path for memory allocation
> - add support for additional AES algorithms like AES-GCM and AES-CTR
> - Update test cases in test_cryptodev for all the supported test cases.
> - Update documentation for supported algorithms
> 
> The patches are based on dpdk-crypto-next and are rebased over the latest
> crypto restructuring changes by Pablo.
> http://dpdk.org/ml/archives/dev/2017-July/069743.html
> 
> changes in v3:
> -rebased over http://dpdk.org/ml/archives/dev/2017-July/069743.html
> -fixed check-git-log errors.
> -merged last two patches as per the suggestions from Pablo -split first
> patch to avoid git-log error.
> 
> changes in v2:
> -fixed typo in doc
> -split 2nd patch in two
> -squashed documentation patch in 4th patch -fixed checkpatch
> errors/warnings
> 
> 
> Akhil Goyal (5):
>   bus/fslmc: add macros to get/set fle context
>   crypto/dpaa2_sec: add per dev mempool to store fle
>   crypto/dpaa2_sec: add HW desc support for ctr
>   crypto/dpaa2_sec: add HW desc support for aes-gcm
>   crypto/dpaa2_sec: add support for aes-gcm and ctr
> 
>  doc/guides/cryptodevs/dpaa2_sec.rst          |   9 +-
>  doc/guides/cryptodevs/features/dpaa2_sec.ini |   8 +-
>  doc/guides/rel_notes/release_17_08.rst       |   4 +
>  drivers/bus/fslmc/portal/dpaa2_hw_pvt.h      |   7 +
>  drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c  | 385
> ++++++++++++++++++++++++---
>  drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h    | 100 ++++---
>  drivers/crypto/dpaa2_sec/hw/desc/algo.h      | 228 +++++++++++++++-
>  drivers/crypto/dpaa2_sec/hw/desc/ipsec.h     |  19 +-
>  test/test/test_cryptodev.c                   |  94 ++++++-
>  test/test/test_cryptodev_aes_test_vectors.h  |  78 ++++--
>  test/test/test_cryptodev_blockcipher.c       |   1 +
>  test/test/test_cryptodev_des_test_vectors.h  |  24 +-
> test/test/test_cryptodev_hash_test_vectors.h |  36 ++-
>  13 files changed, 854 insertions(+), 139 deletions(-)
> 
> --
> 2.9.3

Applied to dpdk-next-crypto.
Thanks,

Pablo

^ permalink raw reply	[flat|nested] 21+ messages in thread

end of thread, other threads:[~2017-07-03 18:55 UTC | newest]

Thread overview: 21+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-06-29 20:48 [PATCH 0/5] crypto/dpaa2_sec optimization and feature update akhil.goyal
2017-06-29 20:49 ` [PATCH 1/5] crypto/dpaa2_sec: add per device mempool to store frame list entries akhil.goyal
2017-06-29 20:49 ` [PATCH 3/5] crypto/dpaa2_sec: add support for AES-GCM and CTR akhil.goyal
2017-06-29 20:49 ` [PATCH 4/5] test/test: add test cases for gcm and ctr in dpaa2_sec test suite akhil.goyal
2017-06-29 20:49 ` [PATCH 5/5] doc: update documentation for dpaa2_sec supported algos akhil.goyal
2017-06-29 21:07   ` De Lara Guarch, Pablo
2017-06-29 21:54     ` Akhil Goyal
2017-06-30  7:43 ` [PATCH v2 0/5] crypto/dpaa2_sec optimization and feature update akhil.goyal
2017-06-30  7:43   ` [PATCH v2 1/5] crypto/dpaa2_sec: add per device mempool to store frame list entries akhil.goyal
2017-06-30  7:43   ` [PATCH v2 2/5] crypto/dpaa2_sec: add hw desc support for CTR akhil.goyal
2017-06-30  7:43   ` [PATCH v2 3/5] crypto/dpaa2_sec: add hw desc support for AES-GCM akhil.goyal
2017-06-30  7:43   ` [PATCH v2 4/5] crypto/dpaa2_sec: add support for AES-GCM and CTR akhil.goyal
2017-06-30  7:43   ` [PATCH v2 5/5] test/test: add test cases for gcm and ctr in dpaa2_sec test suite akhil.goyal
2017-07-03 12:31   ` [PATCH v3 0/5] crypto/dpaa2_sec optimization and feature update Akhil Goyal
2017-07-03 12:31     ` [PATCH v3 1/5] bus/fslmc: add macros to get/set fle context Akhil Goyal
2017-07-03 12:31     ` [PATCH v3 2/5] crypto/dpaa2_sec: add per dev mempool to store fle Akhil Goyal
2017-07-03 12:31     ` [PATCH v3 3/5] crypto/dpaa2_sec: add HW desc support for ctr Akhil Goyal
2017-07-03 12:31     ` [PATCH v3 4/5] crypto/dpaa2_sec: add HW desc support for aes-gcm Akhil Goyal
2017-07-03 12:31     ` [PATCH v3 5/5] crypto/dpaa2_sec: add support for aes-gcm and ctr Akhil Goyal
2017-07-03 18:55     ` [PATCH v3 0/5] crypto/dpaa2_sec optimization and feature update De Lara Guarch, Pablo
2017-07-02 23:43 ` [PATCH " De Lara Guarch, Pablo

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.