All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD
@ 2017-11-30 13:12 Ravi Kumar
  2017-11-30 13:12 ` [PATCH 02/11] crypto/ccp: add " Ravi Kumar
                   ` (11 more replies)
  0 siblings, 12 replies; 131+ messages in thread
From: Ravi Kumar @ 2017-11-30 13:12 UTC (permalink / raw)
  To: dev

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 MAINTAINERS             | 6 ++++++
 config/common_base      | 5 +++++
 drivers/crypto/Makefile | 1 +
 mk/rte.app.mk           | 2 ++
 4 files changed, 14 insertions(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index f0baeb4..daac82e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -588,6 +588,12 @@ M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
 T: git://dpdk.org/next/dpdk-next-crypto
 F: doc/guides/cryptodevs/features/default.ini
 
+AMD CCP Crypto PMD 
+M: Ravi Kumar <ravi1.kumar@amd.com>
+F: drivers/crypto/ccp/
+F: doc/guides/cryptodevs/ccp.rst
+F: doc/guides/cryptodevs/features/ccp.ini
+
 ARMv8 Crypto
 M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
 F: drivers/crypto/armv8/
diff --git a/config/common_base b/config/common_base
index e74febe..88826c8 100644
--- a/config/common_base
+++ b/config/common_base
@@ -557,6 +557,11 @@ CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n
 CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
 
 #
+# Compile PMD for AMD CCP crypto device
+#
+CONFIG_RTE_LIBRTE_PMD_CCP=n
+
+#
 # Compile PMD for Marvell Crypto device
 #
 CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO=n
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 645b696..b378643 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -44,5 +44,6 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += mrvl
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp
 
 include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 6a6a745..0453b7f 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -191,6 +191,8 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC)   += -lrte_bus_dpaa
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC)   += -lrte_pmd_dpaa_sec
 endif # CONFIG_RTE_LIBRTE_DPAA_BUS
 
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_CCP)   += -lrte_pmd_ccp -lcrypto
+
 endif # CONFIG_RTE_LIBRTE_CRYPTODEV
 
 ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y)
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH 02/11] crypto/ccp: add support for AMD CCP crypto PMD
  2017-11-30 13:12 [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD Ravi Kumar
@ 2017-11-30 13:12 ` Ravi Kumar
  2017-12-11 19:50   ` De Lara Guarch, Pablo
  2017-11-30 13:12 ` [PATCH 03/11] crypto: add macros for AES-CMAC and SHA3 Ravi Kumar
                   ` (10 subsequent siblings)
  11 siblings, 1 reply; 131+ messages in thread
From: Ravi Kumar @ 2017-11-30 13:12 UTC (permalink / raw)
  To: dev

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/Makefile                |   69 ++
 drivers/crypto/ccp/ccp_crypto.c            | 1776 ++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_crypto.h            |  364 ++++++
 drivers/crypto/ccp/ccp_dev.c               |  846 +++++++++++++
 drivers/crypto/ccp/ccp_dev.h               |  530 +++++++++
 drivers/crypto/ccp/ccp_pci.c               |  328 +++++
 drivers/crypto/ccp/ccp_pci.h               |   55 +
 drivers/crypto/ccp/ccp_pmd_ops.c           |  645 ++++++++++
 drivers/crypto/ccp/ccp_pmd_private.h       |  128 ++
 drivers/crypto/ccp/rte_ccp_pmd.c           |  281 +++++
 drivers/crypto/ccp/rte_pmd_ccp_version.map |    4 +
 11 files changed, 5026 insertions(+)
 create mode 100644 drivers/crypto/ccp/Makefile
 create mode 100644 drivers/crypto/ccp/ccp_crypto.c
 create mode 100644 drivers/crypto/ccp/ccp_crypto.h
 create mode 100644 drivers/crypto/ccp/ccp_dev.c
 create mode 100644 drivers/crypto/ccp/ccp_dev.h
 create mode 100644 drivers/crypto/ccp/ccp_pci.c
 create mode 100644 drivers/crypto/ccp/ccp_pci.h
 create mode 100644 drivers/crypto/ccp/ccp_pmd_ops.c
 create mode 100644 drivers/crypto/ccp/ccp_pmd_private.h
 create mode 100644 drivers/crypto/ccp/rte_ccp_pmd.c
 create mode 100644 drivers/crypto/ccp/rte_pmd_ccp_version.map

diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
new file mode 100644
index 0000000..03ec1ec
--- /dev/null
+++ b/drivers/crypto/ccp/Makefile
@@ -0,0 +1,69 @@
+#
+#   Copyright(c) 2017 Advanced Micro Devices, Inc.
+#   All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+# 	* Redistributions of source code must retain the above copyright
+# 	notice, this list of conditions and the following disclaimer.
+# 	* Redistributions in binary form must reproduce the above copyright
+# 	notice, this list of conditions and the following disclaimer in the
+# 	documentation and/or other materials provided with the distribution.
+# 	* Neither the name of the copyright holder nor the names of its
+# 	contributors may be used to endorse or promote products derived from
+# 	this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+
+# library name
+LIB = librte_pmd_ccp.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += -I$(SRCDIR)
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# external library include paths
+LDLIBS += -lcrypto
+LDLIBS += -lpthread
+LDLIBS += -lcrypto
+
+# versioning export map
+EXPORT_MAP := rte_pmd_ccp_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_crypto.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
+
+# export include files
+SYMLINK-y-include +=
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += lib/librte_ring
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += lib/librte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
new file mode 100644
index 0000000..68e1169
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -0,0 +1,1776 @@
+/*-
+ *   Copyright(c) 2017 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ * 	* Redistributions of source code must retain the above copyright
+ * 	notice, this list of conditions and the following disclaimer.
+ * 	* Redistributions in binary form must reproduce the above copyright
+ * 	notice, this list of conditions and the following disclaimer in the
+ * 	documentation and/or other materials provided with the distribution.
+ * 	* Neither the name of the copyright holder nor the names of its
+ * 	contributors may be used to endorse or promote products derived from
+ * 	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+#include <rte_cryptodev_pmd.h>
+
+#include <ccp_dev.h>
+#include <ccp_crypto.h>
+#include <ccp_pci.h>
+#include <ccp_pmd_private.h>
+
+#include <openssl/sha.h> /*partial hash apis*/
+#include <openssl/evp.h> /*sub key apis*/
+
+/* SHA initial context values */
+static uint32_t ccp_sha1_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+	SHA1_H4, SHA1_H3,
+	SHA1_H2, SHA1_H1,
+	SHA1_H0, 0x0U,
+	0x0U, 0x0U,
+};
+
+uint32_t ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+	SHA224_H7, SHA224_H6,
+	SHA224_H5, SHA224_H4,
+	SHA224_H3, SHA224_H2,
+	SHA224_H1, SHA224_H0,
+};
+
+uint32_t ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+	SHA256_H7, SHA256_H6,
+	SHA256_H5, SHA256_H4,
+	SHA256_H3, SHA256_H2,
+	SHA256_H1, SHA256_H0,
+};
+
+uint64_t ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+	SHA384_H7, SHA384_H6,
+	SHA384_H5, SHA384_H4,
+	SHA384_H3, SHA384_H2,
+	SHA384_H1, SHA384_H0,
+};
+
+uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+	SHA512_H7, SHA512_H6,
+	SHA512_H5, SHA512_H4,
+	SHA512_H3, SHA512_H2,
+	SHA512_H1, SHA512_H0,
+};
+
+static enum ccp_cmd_order
+ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+{
+	enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
+
+	if (xform == NULL)
+		return res;
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (xform->next == NULL)
+			return CCP_CMD_AUTH;
+		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+			return CCP_CMD_HASH_CIPHER;
+	}
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (xform->next == NULL)
+			return CCP_CMD_CIPHER;
+		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+			return CCP_CMD_CIPHER_HASH;
+	}
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+		return CCP_CMD_COMBINED;
+	return res;
+}
+
+/**partial hash using openssl*/
+static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA_CTX ctx;
+
+	if (!SHA1_Init(&ctx))
+		return -EFAULT;
+	SHA1_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA256_CTX ctx;
+
+	if (!SHA224_Init(&ctx))
+		return -EFAULT;
+	SHA256_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA256_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA256_CTX ctx;
+
+	if (!SHA256_Init(&ctx))
+		return -EFAULT;
+	SHA256_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA256_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA512_CTX ctx;
+
+	if (!SHA384_Init(&ctx))
+		return -EFAULT;
+	SHA512_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA512_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA512_CTX ctx;
+
+	if (!SHA512_Init(&ctx))
+		return -EFAULT;
+	SHA512_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA512_DIGEST_LENGTH);
+	return 0;
+}
+
+static int generate_partial_hash(struct ccp_session *sess)
+{
+
+	uint8_t ipad[sess->auth.block_size];
+	uint8_t	opad[sess->auth.block_size];
+	uint8_t *ipad_t, *opad_t;
+	uint32_t *hash_value_be32, hash_temp32[8];
+	uint64_t *hash_value_be64, hash_temp64[8];
+	int i, count;
+
+	opad_t = ipad_t = (uint8_t *)sess->auth.key;
+
+	hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
+	hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute);
+
+	/**considering key size is always equal to block size of algorithm*/
+	for (i = 0; i < sess->auth.block_size; i++) {
+		ipad[i] = (ipad_t[i] ^ HMAC_IPAD_VALUE);
+		opad[i] = (opad_t[i] ^ HMAC_OPAD_VALUE);
+	}
+
+	switch (sess->auth.algo) {
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+		count = SHA1_DIGEST_SIZE >> 2;
+
+		if (partial_hash_sha1(ipad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+
+		hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha1(opad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+		return 0;
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+		count = SHA256_DIGEST_SIZE >> 2;
+
+		if (partial_hash_sha224(ipad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+
+		hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha224(opad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+		return 0;
+	case CCP_AUTH_ALGO_SHA256_HMAC:
+		count = SHA256_DIGEST_SIZE >> 2;
+
+		if (partial_hash_sha256(ipad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+
+		hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha256(opad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+		return 0;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+		count = SHA512_DIGEST_SIZE >> 3;
+
+		if (partial_hash_sha384(ipad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+
+		hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha384(opad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+		return 0;
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		count = SHA512_DIGEST_SIZE >> 3;
+
+		if (partial_hash_sha512(ipad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+
+		hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha512(opad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+		return 0;
+	default:
+		CCP_LOG_ERR("Invalid session algo");
+		return -1;
+	}
+}
+
+/**configure session*/
+static int
+ccp_configure_session_cipher(struct ccp_session *sess,
+			     const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+	size_t i, j, x;
+
+	cipher_xform = &xform->cipher;
+
+	/* set cipher direction */
+	if (cipher_xform->op ==  RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+		sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+	else
+		sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+
+	/* set cipher key */
+	sess->cipher.key_length = cipher_xform->key.length;
+	rte_memcpy(sess->cipher.key, cipher_xform->key.data,
+		   cipher_xform->key.length);
+
+	/* set iv parameters */
+	sess->iv.offset = cipher_xform->iv.offset;
+	sess->iv.length = cipher_xform->iv.length;
+
+	switch (cipher_xform->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_ECB:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
+		sess->cipher.um.aes_mode = CCP_DES_MODE_CBC;
+		sess->cipher.engine = CCP_ENGINE_3DES;
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported cipher algo");
+		return -1;
+	}
+
+
+	switch (sess->cipher.engine) {
+	case CCP_ENGINE_AES:
+		if (sess->cipher.key_length == 16)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+		else if (sess->cipher.key_length == 24)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+		else if (sess->cipher.key_length == 32)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+		else {
+			CCP_LOG_ERR("Invalid cipher key length");
+			return -1;
+		}
+		for (i = 0; i < sess->cipher.key_length ; i++)
+			sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+				sess->cipher.key[i];
+		break;
+	case CCP_ENGINE_3DES:
+		if (sess->cipher.key_length == 16)
+			sess->cipher.ut.des_type = CCP_DES_TYPE_128;
+		else if (sess->cipher.key_length == 24)
+			sess->cipher.ut.des_type = CCP_DES_TYPE_192;
+		else {
+			CCP_LOG_ERR("Invalid cipher key length");
+			return -1;
+		}
+		for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
+			for (i = 0; i < 8; i++)
+				sess->cipher.key_ccp[(8 + x) - i - 1] =
+					sess->cipher.key[i + x];
+		break;
+	default:
+		/* should never reach here */
+		CCP_LOG_ERR("Invalid CCP Engine");
+		return -1;
+	}
+	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+	return 0;
+}
+
+static int
+ccp_configure_session_auth(struct ccp_session *sess,
+			   const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_auth_xform *auth_xform = NULL;
+
+	auth_xform = &xform->auth;
+
+	sess->auth.digest_length = auth_xform->digest_length;
+	if (auth_xform->op ==  RTE_CRYPTO_AUTH_OP_GENERATE)
+		sess->auth.op = CCP_AUTH_OP_GENERATE;
+	else
+		sess->auth.op = CCP_AUTH_OP_VERIFY;
+	switch (auth_xform->algo) {
+	case RTE_CRYPTO_AUTH_SHA1:
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA1;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+		sess->auth.ctx = (void *)ccp_sha1_init;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+			return -1;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		sess->auth.block_size = SHA1_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA224:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA224;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+		sess->auth.ctx = (void *)ccp_sha224_init;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		sess->auth.block_size = SHA224_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA256;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+		sess->auth.ctx = (void *)ccp_sha256_init;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		sess->auth.block_size = SHA256_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA384;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+		sess->auth.ctx = (void *)ccp_sha384_init;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		sess->auth.block_size = SHA384_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA512;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+		sess->auth.ctx = (void *)ccp_sha512_init;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		sess->auth.block_size = SHA512_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported hash algo");
+		return -1;
+	}
+	return 0;
+}
+
+static int
+ccp_configure_session_aead(struct ccp_session *sess,
+			   const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_aead_xform *aead_xform = NULL;
+	size_t i;
+
+	aead_xform = &xform->aead;
+
+	sess->cipher.key_length = aead_xform->key.length;
+	rte_memcpy(sess->cipher.key, aead_xform->key.data,
+		   aead_xform->key.length);
+
+	if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+		sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+		sess->auth.op = CCP_AUTH_OP_GENERATE;
+	} else {
+		sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+		sess->auth.op = CCP_AUTH_OP_VERIFY;
+	}
+	sess->auth.aad_length = aead_xform->aad_length;
+	sess->auth.digest_length = aead_xform->digest_length;
+
+	/* set iv parameters */
+	sess->iv.offset = aead_xform->iv.offset;
+	sess->iv.length = aead_xform->iv.length;
+
+	switch (aead_xform->algo) {
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_GCM;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_GCTR;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		if (sess->cipher.key_length == 16)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+		else if (sess->cipher.key_length == 24)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+		else if (sess->cipher.key_length == 32)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+		else {
+			CCP_LOG_ERR("Invalid aead key length");
+			return -1;
+		}
+		for (i = 0; i < sess->cipher.key_length; i++)
+			sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+				sess->cipher.key[i];
+		sess->auth.algo = CCP_AUTH_ALGO_AES_GCM;
+		sess->auth.engine = CCP_ENGINE_AES;
+		sess->auth.um.aes_mode = CCP_AES_MODE_GHASH;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = 0;
+		sess->auth.block_size = AES_BLOCK_SIZE;
+		sess->cmd_id = CCP_CMD_COMBINED;
+		break;
+	default:
+		return -ENOTSUP;
+	}
+	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+	return 0;
+}
+
+int
+ccp_set_session_parameters(struct ccp_session *sess,
+			   const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_sym_xform *cipher_xform = NULL;
+	const struct rte_crypto_sym_xform *auth_xform = NULL;
+	const struct rte_crypto_sym_xform *aead_xform = NULL;
+	int ret = 0;
+
+	sess->cmd_id = ccp_get_cmd_id(xform);
+
+	switch (sess->cmd_id) {
+	case CCP_CMD_CIPHER:
+		cipher_xform = xform;
+		break;
+	case CCP_CMD_AUTH:
+		auth_xform = xform;
+		break;
+	case CCP_CMD_CIPHER_HASH:
+		cipher_xform = xform;
+		auth_xform = xform->next;
+		break;
+	case CCP_CMD_HASH_CIPHER:
+		auth_xform = xform;
+		cipher_xform = xform->next;
+		break;
+	case CCP_CMD_COMBINED:
+		aead_xform = xform;
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported cmd_id");
+		return -1;
+	}
+
+	/* Default IV length = 0 */
+	sess->iv.length = 0;
+	if (cipher_xform) {
+		ret = ccp_configure_session_cipher(sess, cipher_xform);
+		if (ret != 0) {
+			CCP_LOG_ERR("Invalid/unsupported cipher parameters");
+			return ret;
+		}
+	}
+	if (auth_xform) {
+		ret = ccp_configure_session_auth(sess, auth_xform);
+		if (ret != 0) {
+			CCP_LOG_ERR("Invalid/unsupported auth parameters");
+			return ret;
+		}
+	}
+	if (aead_xform) {
+		ret = ccp_configure_session_aead(sess, aead_xform);
+		if (ret != 0) {
+			CCP_LOG_ERR("Invalid/unsupported aead parameters");
+			return ret;
+		}
+	}
+	return ret;
+}
+
+/* calculate CCP descriptors requirement */
+static inline int
+ccp_cipher_slot(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->cipher.algo) {
+	case CCP_CIPHER_ALGO_AES_CBC:
+		count = 2;
+		/**< op + passthrough for iv*/
+		break;
+	case CCP_CIPHER_ALGO_AES_ECB:
+		count = 1;
+		/**<only op*/
+		break;
+	case CCP_CIPHER_ALGO_AES_CTR:
+		count = 2;
+		/**< op + passthrough for iv*/
+		break;
+	case CCP_CIPHER_ALGO_3DES_CBC:
+		count = 2;
+		/**< op + passthrough for iv*/
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported ALGO %d", session->cipher.algo);
+	}
+	return count;
+}
+
+static inline int
+ccp_auth_slot(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_SHA1:
+	case CCP_AUTH_ALGO_SHA224:
+	case CCP_AUTH_ALGO_SHA256:
+	case CCP_AUTH_ALGO_SHA384:
+	case CCP_AUTH_ALGO_SHA512:
+		count = 3;
+		/**< op + lsb passthrough cpy to/from*/
+		break;
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+	case CCP_AUTH_ALGO_SHA256_HMAC:
+		count = 6;
+		break;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		count = 7;
+		/**
+		 * 1. Load PHash1 = H(k ^ ipad); to LSB
+		 * 2. generate IHash = H(hash on meassage with PHash1
+		 * as init values);
+		 * 3. Retrieve IHash 2 slots for 384/512
+		 * 4. Load Phash2 = H(k ^ opad); to LSB
+		 * 5. generate FHash = H(hash on Ihash with Phash2
+		 * as init value);
+		 * 6. Retrieve HMAC output from LSB to host memory
+		 */
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported ALGO %d", session->cipher.algo);
+	}
+
+	return count;
+}
+
+static int
+ccp_combined_mode_slot(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->cipher.algo) {
+	case CCP_CIPHER_ALGO_AES_GCM:
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported combined cipher ALGO %d",
+			    session->cipher.algo);
+	}
+	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_AES_GCM:
+		count = 5;
+		/**
+		 * 1. Passthru iv
+		 * 2. Hash AAD
+		 * 3. GCTR
+		 * 4. Reload passthru
+		 * 5. Hash Final tag
+		 */
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported combined auth ALGO %d",
+			    session->auth.algo);
+	}
+	return count;
+}
+
+int
+ccp_compute_slot_count(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->cmd_id) {
+	case CCP_CMD_CIPHER:
+		count = ccp_cipher_slot(session);
+		break;
+	case CCP_CMD_AUTH:
+		count = ccp_auth_slot(session);
+		break;
+	case CCP_CMD_CIPHER_HASH:
+	case CCP_CMD_HASH_CIPHER:
+		count = ccp_cipher_slot(session);
+		count += ccp_auth_slot(session);
+		break;
+	case CCP_CMD_COMBINED:
+		count = ccp_combined_mode_slot(session);
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported cmd_id");
+
+	}
+
+	return count;
+}
+
+static void
+ccp_perform_passthru(struct ccp_passthru *pst,
+		     struct ccp_queue *cmd_q)
+{
+	struct ccp_desc *desc;
+	union ccp_function function;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 0;
+	CCP_CMD_EOM(desc) = 0;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_PT_BYTESWAP(&function) = pst->byte_swap;
+	CCP_PT_BITWISE(&function) = pst->bit_mod;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = pst->len;
+
+	if (pst->dir) {
+		CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+		CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+		CCP_CMD_DST_HI(desc) = 0;
+		CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+		if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+			CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
+	} else {
+
+		CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+		CCP_CMD_SRC_HI(desc) = 0;
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
+
+		CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+		CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
+		CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+	}
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+}
+
+static int
+ccp_perform_hmac(struct rte_crypto_op *op,
+		 struct ccp_queue *cmd_q)
+{
+
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr, dest_addr_t;
+	struct ccp_passthru pst;
+	uint64_t auth_msg_bits;
+	void *append_ptr;
+	uint8_t *addr;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+	addr = session->auth.pre_compute;
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+	dest_addr_t = dest_addr;
+
+	/** Load PHash1 to LSB*/
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.len = session->auth.ctx_len;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/**sha engine command descriptor for IntermediateHash*/
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+	auth_msg_bits = (op->sym->auth.data.length +
+			 session->auth.block_size)  * 8;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+	CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+	CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Intermediate Hash value retrieve */
+	if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA_TYPE_512)) {
+
+		pst.src_addr =
+			(phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+	} else {
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = session->auth.ctx_len;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+	}
+
+	/** Load PHash2 to LSB*/
+	addr += session->auth.ctx_len;
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.len = session->auth.ctx_len;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/**sha engine command descriptor for FinalHash*/
+	dest_addr_t += session->auth.offset;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = (session->auth.ctx_len -
+			     session->auth.offset);
+	auth_msg_bits = (session->auth.block_size +
+			 session->auth.ctx_len -
+			 session->auth.offset) * 8;
+
+	CCP_CMD_SRC_LO(desc) = (uint32_t)(dest_addr_t);
+	CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+	CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+	CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Retrieve hmac output */
+	pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.dest_addr = dest_addr;
+	pst.len = session->auth.ctx_len;
+	pst.dir = 0;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	else
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+
+}
+
+static int
+ccp_perform_sha(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr;
+	struct ccp_passthru pst;
+	void *append_ptr;
+	uint64_t auth_msg_bits;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+
+	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+
+	/** Passthru sha context*/
+
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
+						     session->auth.ctx);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.len = session->auth.ctx_len;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/**prepare sha command descriptor*/
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+	auth_msg_bits = op->sym->auth.data.length * 8;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+	CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+	CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Hash value retrieve */
+	pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.dest_addr = dest_addr;
+	pst.len = session->auth.ctx_len;
+	pst.dir = 0;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	else
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+
+}
+
+static int
+ccp_perform_aes(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	uint8_t *lsb_buf;
+	struct ccp_passthru pst = {0};
+	struct ccp_desc *desc;
+	phys_addr_t src_addr, dest_addr, key_addr;
+	uint8_t *iv;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+	function.raw = 0;
+
+	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+	if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
+		if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
+			rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
+				   iv, session->iv.length);
+			pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
+			CCP_AES_SIZE(&function) = 0x1F;
+		} else {
+			lsb_buf =
+			&(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+			rte_memcpy(lsb_buf +
+				   (CCP_SB_BYTES - session->iv.length),
+				   iv, session->iv.length);
+			pst.src_addr = b_info->lsb_buf_phys +
+				(b_info->lsb_buf_idx * CCP_SB_BYTES);
+			b_info->lsb_buf_idx++;
+		}
+
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+	}
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->cipher.data.offset);
+	if (likely(op->sym->m_dst != NULL))
+		dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+						op->sym->cipher.data.offset);
+	else
+		dest_addr = src_addr;
+	key_addr = session->cipher.key_phys;
+
+	/* prepare desc for aes command */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+	CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
+ccp_perform_3des(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	unsigned char *lsb_buf;
+	struct ccp_passthru pst;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	uint8_t *iv;
+	phys_addr_t src_addr, dest_addr, key_addr;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+
+	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+	switch (session->cipher.um.des_mode) {
+	case CCP_DES_MODE_CBC:
+		lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+		b_info->lsb_buf_idx++;
+
+		rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
+			   iv, session->iv.length);
+
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+		break;
+	case CCP_DES_MODE_CFB:
+	case CCP_DES_MODE_ECB:
+		CCP_LOG_ERR("Unsupported DES cipher mode");
+		return -1;
+	}
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->cipher.data.offset);
+	if (unlikely(op->sym->m_dst != NULL))
+		dest_addr =
+			rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+						   op->sym->cipher.data.offset);
+	else
+		dest_addr = src_addr;
+
+	key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+	memset(desc, 0, Q_DESC_SIZE);
+
+	/* prepare desc for des command */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_DES_ENCRYPT(&function) = session->cipher.dir;
+	CCP_DES_MODE(&function) = session->cipher.um.des_mode;
+	CCP_DES_TYPE(&function) = session->cipher.ut.des_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	if (session->cipher.um.des_mode)
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	/* Write the new tail address back to the queue register */
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	/* Turn the queue back on using our cached control register */
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
+ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	uint8_t *lsb_buf, *append_ptr, *iv;
+	struct ccp_passthru pst;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	uint64_t *temp;
+	phys_addr_t src_addr, dest_addr, key_addr, aad_addr;
+	phys_addr_t digest_dest_addr;
+	int length, non_align_len, i;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+	key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->cipher.data.offset);
+	if (unlikely(op->sym->m_dst != NULL))
+		dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+						op->sym->cipher.data.offset);
+	else
+		dest_addr = src_addr;
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						   session->auth.ctx_len);
+	digest_dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	append_ptr += AES_BLOCK_SIZE;
+	temp = (uint64_t *)append_ptr;
+	*temp++ =  rte_bswap64(session->auth.aad_length << 3);
+	*temp =  rte_bswap64(op->sym->cipher.data.length << 3);
+
+	non_align_len = op->sym->cipher.data.length % AES_BLOCK_SIZE;
+	length = CCP_ALIGN(op->sym->cipher.data.length, AES_BLOCK_SIZE);
+
+	aad_addr = rte_mem_virt2phy((void *)op->sym->aead.aad.data);
+
+	/* CMD1 IV Passthru */
+	for (i = 0;  i < CTR_IV_SIZE; i++)
+		session->cipher.nonce[CTR_NONCE_SIZE + CTR_IV_SIZE - 1 - i] =
+			iv[i];
+	lsb_buf = session->cipher.nonce;
+
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+	pst.len = CCP_SB_BYTES;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/* CMD2 GHASH-AAD */
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_AAD;
+	CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = session->auth.aad_length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)aad_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(aad_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* CMD3 : GCTR Plain text */
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+	CCP_AES_MODE(&function) = CCP_AES_MODE_GCTR;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+	if (non_align_len == 0)
+		CCP_AES_SIZE(&function) = (AES_BLOCK_SIZE << 3) - 1;
+	else
+		CCP_AES_SIZE(&function) = (non_align_len << 3) - 1;
+
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* CMD4 : PT to copy IV */
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+	pst.len = AES_BLOCK_SIZE;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/* CMD5 : GHASH-Final */
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_FINAL;
+	CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+	/* Last block (AAD_len || PT_len)*/
+	CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)digest_dest_addr + AES_BLOCK_SIZE);
+	CCP_CMD_SRC_HI(desc) = high32_value(digest_dest_addr + AES_BLOCK_SIZE);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)digest_dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(digest_dest_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static inline int
+ccp_crypto_cipher(struct rte_crypto_op *op,
+		  struct ccp_queue *cmd_q,
+		  struct ccp_batch_info *b_info)
+{
+	int result;
+	struct ccp_session *session;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+
+	switch (session->cipher.algo) {
+	case CCP_CIPHER_ALGO_AES_CBC:
+		result = ccp_perform_aes(op, cmd_q, b_info);
+		b_info->desccnt += 2;
+		break;
+	case CCP_CIPHER_ALGO_AES_CTR:
+		result = ccp_perform_aes(op, cmd_q, b_info);
+		b_info->desccnt += 2;
+		break;
+	case CCP_CIPHER_ALGO_AES_ECB:
+		result = ccp_perform_aes(op, cmd_q, b_info);
+		b_info->desccnt += 1;
+		break;
+	case CCP_CIPHER_ALGO_3DES_CBC:
+		result = ccp_perform_3des(op, cmd_q, b_info);
+		b_info->desccnt += 2;
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported Cipher algo");
+		result = -1;
+	}
+	return result;
+}
+
+static inline int
+ccp_crypto_auth(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
+{
+
+	int result  = 0;
+	struct ccp_session *session;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+
+	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_SHA1:
+	case CCP_AUTH_ALGO_SHA224:
+	case CCP_AUTH_ALGO_SHA256:
+	case CCP_AUTH_ALGO_SHA384:
+	case CCP_AUTH_ALGO_SHA512:
+		result = ccp_perform_sha(op, cmd_q);
+		b_info->desccnt += 3;
+		break;
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+	case CCP_AUTH_ALGO_SHA256_HMAC:
+		result = ccp_perform_hmac(op, cmd_q);
+		b_info->desccnt += 6;
+		break;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		result = ccp_perform_hmac(op, cmd_q);
+		b_info->desccnt += 7;
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported Cipher algo");
+		result = -1;
+	}
+
+	return result;
+}
+
+static inline int
+ccp_crypto_combined(struct rte_crypto_op *op,
+		    struct ccp_queue *cmd_q __rte_unused,
+		    struct ccp_batch_info *b_info __rte_unused)
+{
+	int result  = 0;
+	struct ccp_session *session;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+
+	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_AES_GCM:
+		if (session->cipher.algo != CCP_CIPHER_ALGO_AES_GCM) {
+			CCP_LOG_ERR("Incorrect chain order");
+			return -1;
+		}
+		result = ccp_perform_aes_gcm(op, cmd_q);
+		b_info->desccnt += 5;
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported combined algo");
+		return -1;
+	}
+	return result;
+}
+
+int
+process_ops_to_enqueue(const struct ccp_qp *qp,
+		       struct rte_crypto_op **op,
+		       struct ccp_queue *cmd_q,
+		       uint16_t nb_ops,
+		       int slots_req)
+{
+	int i, result = 0;
+	struct ccp_batch_info *b_info;
+	struct ccp_session *session;
+
+	if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
+		CCP_LOG_ERR("batch info allocation failed");
+		return 0;
+	}
+	/* populate batch info necessary for dequeue */
+	b_info->op_idx = 0;
+	b_info->lsb_buf_idx = 0;
+	b_info->desccnt = 0;
+	b_info->cmd_q = cmd_q;
+	b_info->lsb_buf_phys =
+		(phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+	rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
+
+	b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+					 Q_DESC_SIZE);
+	for (i = 0; i < nb_ops; i++) {
+		session = (struct ccp_session *)get_session_private_data(
+						 op[i]->sym->session,
+						 cryptodev_driver_id);
+		switch (session->cmd_id) {
+		case CCP_CMD_CIPHER:
+			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_AUTH:
+			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_CIPHER_HASH:
+			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+			if (result)
+				break;
+			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_HASH_CIPHER:
+			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+			if (result)
+				break;
+			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_COMBINED:
+			result = ccp_crypto_combined(op[i], cmd_q, b_info);
+			break;
+		default:
+			CCP_LOG_ERR("Unsupported cmd_id");
+			result = -1;
+		}
+		if (unlikely(result < 0)) {
+			rte_atomic64_add(&b_info->cmd_q->free_slots,
+					 (slots_req - b_info->desccnt));
+			break;
+		}
+		b_info->op[i] = op[i];
+	}
+
+	b_info->opcnt = i;
+	b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+					 Q_DESC_SIZE);
+
+	rte_wmb();
+	/* Write the new tail address back to the queue register */
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+			      b_info->tail_offset);
+	/* Turn the queue back on using our cached control register */
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol | CMD_Q_RUN);
+
+	rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
+
+	return i;
+}
+
+static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
+{
+	struct ccp_session *session;
+	uint8_t *digest_data, *addr;
+	struct rte_mbuf *m_last;
+	int offset, digest_offset;
+	uint8_t digest_le[64];
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+
+	if (session->cmd_id == CCP_CMD_COMBINED) {
+		digest_data = op->sym->aead.digest.data;
+		digest_offset = op->sym->aead.data.offset +
+					op->sym->aead.data.length;
+	} else {
+		digest_data = op->sym->auth.digest.data;
+		digest_offset = op->sym->auth.data.offset +
+					op->sym->auth.data.length;
+	}
+	m_last = rte_pktmbuf_lastseg(op->sym->m_src);
+	addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
+			   m_last->data_len - session->auth.ctx_len);
+
+	rte_mb();
+	offset = session->auth.offset;
+
+	if (session->auth.engine == CCP_ENGINE_SHA)
+		if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
+		    (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
+		    (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
+			/* All other algorithms require byte
+			 * swap done by host
+			 */
+			unsigned int i;
+
+			offset = session->auth.ctx_len -
+				session->auth.offset - 1;
+			for (i = 0; i < session->auth.digest_length; i++)
+				digest_le[i] = addr[offset - i];
+			offset = 0;
+			addr = digest_le;
+		}
+
+	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	if (session->auth.op == CCP_AUTH_OP_VERIFY) {
+		if (memcmp(addr + offset, digest_data,
+			   session->auth.digest_length) != 0)
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+	} else {
+		if (unlikely(digest_data == 0))
+			digest_data = rte_pktmbuf_mtod_offset(
+					op->sym->m_dst, uint8_t *,
+					digest_offset);
+		rte_memcpy(digest_data, addr + offset,
+			   session->auth.digest_length);
+	}
+	/* Trim area used for digest from mbuf. */
+	rte_pktmbuf_trim(op->sym->m_src,
+			 session->auth.ctx_len);
+}
+
+static int
+ccp_prepare_ops(struct rte_crypto_op **op_d,
+		struct ccp_batch_info *b_info,
+		uint16_t nb_ops)
+{
+	int i, min_ops;
+	struct ccp_session *session;
+
+	min_ops = RTE_MIN(nb_ops, b_info->opcnt);
+
+	for (i = 0; i < min_ops; i++) {
+		op_d[i] = b_info->op[b_info->op_idx++];
+		session = (struct ccp_session *)get_session_private_data(
+						 op_d[i]->sym->session,
+						 cryptodev_driver_id);
+		switch (session->cmd_id) {
+		case CCP_CMD_CIPHER:
+			op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+			break;
+		case CCP_CMD_AUTH:
+		case CCP_CMD_CIPHER_HASH:
+		case CCP_CMD_HASH_CIPHER:
+		case CCP_CMD_COMBINED:
+			ccp_auth_dq_prepare(op_d[i]);
+			break;
+		default:
+			CCP_LOG_ERR("Unsupported cmd_id");
+		}
+	}
+
+	b_info->opcnt -= min_ops;
+	return min_ops;
+}
+
+int
+process_ops_to_dequeue(struct ccp_qp *qp,
+		       struct rte_crypto_op **op,
+		       uint16_t nb_ops)
+{
+	struct ccp_batch_info *b_info;
+	uint32_t cur_head_offset;
+
+	if (qp->b_info != NULL) {
+		b_info = qp->b_info;
+		if (unlikely(b_info->op_idx > 0))
+			goto success;
+	} else if (rte_ring_dequeue(qp->processed_pkts,
+				    (void **)&b_info))
+		return 0;
+	cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
+				       CMD_Q_HEAD_LO_BASE);
+
+	if (b_info->head_offset < b_info->tail_offset) {
+		if ((cur_head_offset >= b_info->head_offset) &&
+		    (cur_head_offset < b_info->tail_offset)) {
+			qp->b_info = b_info;
+			return 0;
+		}
+	} else {
+		if ((cur_head_offset >= b_info->head_offset) ||
+		    (cur_head_offset < b_info->tail_offset)) {
+			qp->b_info = b_info;
+			return 0;
+		}
+	}
+
+
+success:
+	nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
+	rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
+	b_info->desccnt = 0;
+	if (b_info->opcnt > 0) {
+		qp->b_info = b_info;
+	} else {
+		rte_mempool_put(qp->batch_mp, (void *)b_info);
+		qp->b_info = NULL;
+	}
+
+	return nb_ops;
+}
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
new file mode 100644
index 0000000..675b5ae
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -0,0 +1,364 @@
+/*-
+ *   Copyright(c) 2017 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ * 	* Redistributions of source code must retain the above copyright
+ * 	notice, this list of conditions and the following disclaimer.
+ * 	* Redistributions in binary form must reproduce the above copyright
+ * 	notice, this list of conditions and the following disclaimer in the
+ * 	documentation and/or other materials provided with the distribution.
+ * 	* Neither the name of the copyright holder nor the names of its
+ * 	contributors may be used to endorse or promote products derived from
+ * 	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CCP_CRYPTO_H_
+#define _CCP_CRYPTO_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+#include <ccp_dev.h>
+
+#define AES_BLOCK_SIZE 16
+#define CTR_NONCE_SIZE 4
+#define CTR_IV_SIZE 8
+#define CCP_SHA_CTX_SIZE 200
+
+/**Macro helpers for CCP command creation*/
+#define	CCP_AES_SIZE(p)		((p)->aes.size)
+#define	CCP_AES_ENCRYPT(p)	((p)->aes.encrypt)
+#define	CCP_AES_MODE(p)		((p)->aes.mode)
+#define	CCP_AES_TYPE(p)		((p)->aes.type)
+#define	CCP_DES_ENCRYPT(p)	((p)->des.encrypt)
+#define	CCP_DES_MODE(p)		((p)->des.mode)
+#define	CCP_DES_TYPE(p)		((p)->des.type)
+#define	CCP_SHA_TYPE(p)		((p)->sha.type)
+#define	CCP_PT_BYTESWAP(p)	((p)->pt.byteswap)
+#define	CCP_PT_BITWISE(p)	((p)->pt.bitwise)
+
+/**HMAC*/
+#define HMAC_IPAD_VALUE 0x36
+#define HMAC_OPAD_VALUE 0x5c
+
+/**SHA */
+#define SHA1_DIGEST_SIZE        20
+#define SHA1_BLOCK_SIZE         64
+
+#define SHA224_DIGEST_SIZE      28
+#define SHA224_BLOCK_SIZE       64
+
+#define SHA256_DIGEST_SIZE      32
+#define SHA256_BLOCK_SIZE       64
+
+#define SHA384_DIGEST_SIZE      48
+#define SHA384_BLOCK_SIZE       128
+
+#define SHA512_DIGEST_SIZE      64
+#define SHA512_BLOCK_SIZE       128
+
+/**SHA LSB intialiazation values*/
+
+#define SHA1_H0		0x67452301UL
+#define SHA1_H1		0xefcdab89UL
+#define SHA1_H2		0x98badcfeUL
+#define SHA1_H3		0x10325476UL
+#define SHA1_H4		0xc3d2e1f0UL
+
+#define SHA224_H0	0xc1059ed8UL
+#define SHA224_H1	0x367cd507UL
+#define SHA224_H2	0x3070dd17UL
+#define SHA224_H3	0xf70e5939UL
+#define SHA224_H4	0xffc00b31UL
+#define SHA224_H5	0x68581511UL
+#define SHA224_H6	0x64f98fa7UL
+#define SHA224_H7	0xbefa4fa4UL
+
+#define SHA256_H0	0x6a09e667UL
+#define SHA256_H1	0xbb67ae85UL
+#define SHA256_H2	0x3c6ef372UL
+#define SHA256_H3	0xa54ff53aUL
+#define SHA256_H4	0x510e527fUL
+#define SHA256_H5	0x9b05688cUL
+#define SHA256_H6	0x1f83d9abUL
+#define SHA256_H7	0x5be0cd19UL
+
+#define SHA384_H0	0xcbbb9d5dc1059ed8ULL
+#define SHA384_H1	0x629a292a367cd507ULL
+#define SHA384_H2	0x9159015a3070dd17ULL
+#define SHA384_H3	0x152fecd8f70e5939ULL
+#define SHA384_H4	0x67332667ffc00b31ULL
+#define SHA384_H5	0x8eb44a8768581511ULL
+#define SHA384_H6	0xdb0c2e0d64f98fa7ULL
+#define SHA384_H7	0x47b5481dbefa4fa4ULL
+
+#define SHA512_H0	0x6a09e667f3bcc908ULL
+#define SHA512_H1	0xbb67ae8584caa73bULL
+#define SHA512_H2	0x3c6ef372fe94f82bULL
+#define SHA512_H3	0xa54ff53a5f1d36f1ULL
+#define SHA512_H4	0x510e527fade682d1ULL
+#define SHA512_H5	0x9b05688c2b3e6c1fULL
+#define SHA512_H6	0x1f83d9abfb41bd6bULL
+#define SHA512_H7	0x5be0cd19137e2179ULL
+
+/**
+ * CCP supported AES modes
+ */
+enum ccp_aes_mode {
+	CCP_AES_MODE_ECB = 0,
+	CCP_AES_MODE_CBC,
+	CCP_AES_MODE_OFB,
+	CCP_AES_MODE_CFB,
+	CCP_AES_MODE_CTR,
+	CCP_AES_MODE_CMAC,
+	CCP_AES_MODE_GHASH,
+	CCP_AES_MODE_GCTR,
+	CCP_AES_MODE__LAST,
+};
+
+/**
+ * CCP AES GHASH mode
+ */
+enum ccp_aes_ghash_mode {
+	CCP_AES_MODE_GHASH_AAD = 0,
+	CCP_AES_MODE_GHASH_FINAL
+};
+
+/**
+ * CCP supported AES types
+ */
+enum ccp_aes_type {
+	CCP_AES_TYPE_128 = 0,
+	CCP_AES_TYPE_192,
+	CCP_AES_TYPE_256,
+	CCP_AES_TYPE__LAST,
+};
+
+/***** 3DES engine *****/
+
+/**
+ * CCP supported DES/3DES modes
+ */
+enum ccp_des_mode {
+	CCP_DES_MODE_ECB = 0, /* Not supported */
+	CCP_DES_MODE_CBC,
+	CCP_DES_MODE_CFB,
+};
+
+/**
+ * CCP supported DES types
+ */
+enum ccp_des_type {
+	CCP_DES_TYPE_128 = 0,	/* 112 + 16 parity */
+	CCP_DES_TYPE_192,	/* 168 + 24 parity */
+	CCP_DES_TYPE__LAST,
+};
+
+/***** SHA engine *****/
+
+/**
+ * ccp_sha_type - type of SHA operation
+ *
+ * @CCP_SHA_TYPE_1: SHA-1 operation
+ * @CCP_SHA_TYPE_224: SHA-224 operation
+ * @CCP_SHA_TYPE_256: SHA-256 operation
+ */
+enum ccp_sha_type {
+	CCP_SHA_TYPE_1 = 1,
+	CCP_SHA_TYPE_224,
+	CCP_SHA_TYPE_256,
+	CCP_SHA_TYPE_384,
+	CCP_SHA_TYPE_512,
+	CCP_SHA_TYPE_RSVD1,
+	CCP_SHA_TYPE_RSVD2,
+	CCP_SHA_TYPE__LAST,
+};
+
+/**
+ * CCP supported cipher algorithms
+ */
+enum ccp_cipher_algo {
+	CCP_CIPHER_ALGO_AES_CBC = 0,
+	CCP_CIPHER_ALGO_AES_ECB,
+	CCP_CIPHER_ALGO_AES_CTR,
+	CCP_CIPHER_ALGO_AES_GCM,
+	CCP_CIPHER_ALGO_3DES_CBC,
+};
+
+/**
+ * CCP cipher operation type
+ */
+enum ccp_cipher_dir {
+	CCP_CIPHER_DIR_DECRYPT = 0,
+	CCP_CIPHER_DIR_ENCRYPT = 1,
+};
+
+/**
+ * CCP supported hash algorithms
+ */
+enum ccp_hash_algo {
+	CCP_AUTH_ALGO_SHA1 = 0,
+	CCP_AUTH_ALGO_SHA1_HMAC,
+	CCP_AUTH_ALGO_SHA224,
+	CCP_AUTH_ALGO_SHA224_HMAC,
+	CCP_AUTH_ALGO_SHA256,
+	CCP_AUTH_ALGO_SHA256_HMAC,
+	CCP_AUTH_ALGO_SHA384,
+	CCP_AUTH_ALGO_SHA384_HMAC,
+	CCP_AUTH_ALGO_SHA512,
+	CCP_AUTH_ALGO_SHA512_HMAC,
+	CCP_AUTH_ALGO_AES_GCM,
+};
+
+/**
+ * CCP hash operation type
+ */
+enum ccp_hash_op {
+	CCP_AUTH_OP_GENERATE = 0,
+	CCP_AUTH_OP_VERIFY = 1,
+};
+
+/* CCP crypto private session structure */
+struct ccp_session {
+	enum ccp_cmd_order cmd_id;
+	/**< chain order mode */
+	struct {
+		uint16_t length;
+		uint16_t offset;
+	} iv;
+	/**< IV parameters */
+	struct {
+		enum ccp_cipher_algo  algo;
+		enum ccp_engine  engine;
+		union {
+			enum ccp_aes_mode aes_mode;
+			enum ccp_des_mode des_mode;
+		} um;
+		union {
+			enum ccp_aes_type aes_type;
+			enum ccp_des_type des_type;
+		} ut;
+		enum ccp_cipher_dir dir;
+		uint64_t key_length;
+		/**< max cipher key size 256 bits */
+		uint8_t key[32];
+		/**ccp key format*/
+		uint8_t key_ccp[32];
+		phys_addr_t key_phys;
+		/**AES-ctr nonce(4) iv(8) ctr*/
+		uint8_t nonce[32];
+		phys_addr_t nonce_phys;
+	} cipher;
+	/**< Cipher Parameters */
+
+	struct {
+		enum ccp_hash_algo algo;
+		enum ccp_engine  engine;
+		union {
+			enum ccp_aes_mode aes_mode;
+		} um;
+		union {
+			enum ccp_sha_type sha_type;
+			enum ccp_aes_type aes_type;
+		} ut;
+		enum ccp_hash_op op;
+		uint64_t key_length;
+		/**< max hash key size 144 bytes (struct capabilties) */
+		uint8_t key[144];
+		/**< max be key size of AES is 32*/
+		uint8_t key_ccp[32];
+		phys_addr_t key_phys;
+		uint64_t digest_length;
+		void *ctx;
+		int ctx_len;
+		int offset;
+		int block_size;
+		/**<Buffer to store  Software generated precomute values*/
+		/**< For HMAC H(ipad ^ key) and H(opad ^ key) */
+		uint8_t pre_compute[2 * CCP_SHA_CTX_SIZE];
+		int aad_length;
+	} auth;
+	/**< Authentication Parameters */
+	enum rte_crypto_aead_algorithm aead_algo;
+	/**< AEAD Algorithm */
+
+	uint32_t reserved;
+} __rte_cache_aligned;
+
+extern uint8_t cryptodev_driver_id;
+
+struct ccp_qp;
+
+/**
+ * Set and validate CCP crypto session parameters
+ *
+ * @param sess ccp private session
+ * @param xform crypto xform for this session
+ * @return 0 on success otherwise -1
+ */
+int ccp_set_session_parameters(struct ccp_session *sess,
+			       const struct rte_crypto_sym_xform *xform);
+
+/**
+ * Find count of slots
+ *
+ * @param session CCP private session
+ * @return count of free slots available
+ */
+int ccp_compute_slot_count(struct ccp_session *session);
+
+/**
+ * process crypto ops to be enqueued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param cmd_q CCP cmd queue
+ * @param nb_ops No. of ops to be submitted
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_enqueue(const struct ccp_qp *qp,
+			   struct rte_crypto_op **op,
+			   struct ccp_queue *cmd_q,
+			   uint16_t nb_ops,
+			   int slots_req);
+
+/**
+ * process crypto ops to be dequeued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param nb_ops requested no. of ops
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_dequeue(struct ccp_qp *qp,
+			   struct rte_crypto_op **op,
+			   uint16_t nb_ops);
+
+
+#endif /* _CCP_CRYPTO_H_ */
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
new file mode 100644
index 0000000..bc26020
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -0,0 +1,846 @@
+/*-
+ *   Copyright(c) 2017 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ * 	* Redistributions of source code must retain the above copyright
+ * 	notice, this list of conditions and the following disclaimer.
+ * 	* Redistributions in binary form must reproduce the above copyright
+ * 	notice, this list of conditions and the following disclaimer in the
+ * 	documentation and/or other materials provided with the distribution.
+ * 	* Neither the name of the copyright holder nor the names of its
+ * 	contributors may be used to endorse or promote products derived from
+ * 	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <sys/file.h>
+#include <unistd.h>
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+
+#include <ccp_dev.h>
+#include <ccp_pci.h>
+#include <ccp_pmd_private.h>
+
+#include <openssl/sha.h> /*partial hash apis*/
+#include <openssl/evp.h> /*sub key apis*/
+
+struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
+static int ccp_dev_id;
+
+int
+ccp_dev_start(struct rte_cryptodev *dev)
+{
+	struct ccp_private *priv = dev->data->dev_private;
+
+	priv->last_dev = TAILQ_FIRST(&ccp_list);
+	return 0;
+}
+
+struct ccp_queue *
+ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
+{
+	int i, ret = 0;
+	struct ccp_device *dev;
+	struct ccp_private *priv = cdev->data->dev_private;
+
+	dev = TAILQ_NEXT(priv->last_dev, next);
+	if (unlikely(dev == NULL))
+		dev = TAILQ_FIRST(&ccp_list);
+	priv->last_dev = dev;
+	if (dev->qidx >= dev->cmd_q_count)
+		dev->qidx = 0;
+	ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+	if (ret >= slot_req)
+		return &dev->cmd_q[dev->qidx];
+	for (i = 0; i < dev->cmd_q_count; i++) {
+		dev->qidx++;
+		if (dev->qidx >= dev->cmd_q_count)
+			dev->qidx = 0;
+		ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+		if (ret >= slot_req)
+			return &dev->cmd_q[dev->qidx];
+	}
+	return NULL;
+}
+
+int
+ccp_read_hwrng(uint32_t *value)
+{
+	struct ccp_device *dev;
+
+	TAILQ_FOREACH(dev, &ccp_list, next) {
+		void *vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+		while (dev->hwrng_retries++ < TRNG_RETRIES) {
+			*value = CCP_READ_REG(vaddr, TRNG_OUT_REG);
+			if (*value) {
+				dev->hwrng_retries = 0;
+				return 0;
+			}
+		}
+		dev->hwrng_retries = 0;
+	}
+	return -1;
+}
+
+static const struct rte_memzone *
+ccp_queue_dma_zone_reserve(const char *queue_name,
+			   uint32_t queue_size,
+			   int socket_id)
+{
+	const struct rte_memzone *mz;
+	unsigned int memzone_flags = 0;
+	const struct rte_memseg *ms;
+
+	mz = rte_memzone_lookup(queue_name);
+	if (mz != 0)
+		return mz;
+
+	ms = rte_eal_get_physmem_layout();
+	switch (ms[0].hugepage_sz) {
+	case(RTE_PGSIZE_2M):
+		memzone_flags = RTE_MEMZONE_2MB;
+		break;
+	case(RTE_PGSIZE_1G):
+		memzone_flags = RTE_MEMZONE_1GB;
+		break;
+	case(RTE_PGSIZE_16M):
+		memzone_flags = RTE_MEMZONE_16MB;
+		break;
+	case(RTE_PGSIZE_16G):
+		memzone_flags = RTE_MEMZONE_16GB;
+		break;
+	default:
+		memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
+	}
+
+	return rte_memzone_reserve_aligned(queue_name,
+					   queue_size,
+					   socket_id,
+					   memzone_flags,
+					   queue_size);
+}
+
+/*---bitmap support apis---*/
+static inline void
+ccp_set_bit(unsigned long *bitmap, int n)
+{
+	__sync_fetch_and_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)));
+}
+
+static inline void
+ccp_clear_bit(unsigned long *bitmap, int n)
+{
+	__sync_fetch_and_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)));
+}
+
+static inline uint32_t
+ccp_get_bit(unsigned long *bitmap, int n)
+{
+	return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0);
+}
+
+
+static inline uint32_t
+ccp_ffz(unsigned long word)
+{
+	unsigned long first_zero;
+
+	first_zero = __builtin_ffsl(~word);
+	return first_zero ? (first_zero - 1) :
+		BITS_PER_WORD;
+}
+
+static inline uint32_t
+ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit)
+{
+	uint32_t i;
+	uint32_t nwords = 0;
+
+	nwords = (limit - 1) / BITS_PER_WORD + 1;
+	for (i = 0; i < nwords; i++) {
+		if (addr[i] == 0UL)
+			return i * BITS_PER_WORD;
+		if (addr[i] < ~(0UL))
+			break;
+	}
+	return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]);
+}
+
+static void
+ccp_bitmap_set(unsigned long *map, unsigned int start, int len)
+{
+	unsigned long *p = map + WORD_OFFSET(start);
+	const unsigned int size = start + len;
+	int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD);
+	unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+	while (len - bits_to_set >= 0) {
+		*p |= mask_to_set;
+		len -= bits_to_set;
+		bits_to_set = BITS_PER_WORD;
+		mask_to_set = ~0UL;
+		p++;
+	}
+	if (len) {
+		mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size);
+		*p |= mask_to_set;
+	}
+}
+
+static void
+ccp_bitmap_clear(unsigned long *map, unsigned int start, int len)
+{
+	unsigned long *p = map + WORD_OFFSET(start);
+	const unsigned int size = start + len;
+	int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD);
+	unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+	while (len - bits_to_clear >= 0) {
+		*p &= ~mask_to_clear;
+		len -= bits_to_clear;
+		bits_to_clear = BITS_PER_WORD;
+		mask_to_clear = ~0UL;
+		p++;
+	}
+	if (len) {
+		mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size);
+		*p &= ~mask_to_clear;
+	}
+}
+
+
+static unsigned long
+_ccp_find_next_bit(const unsigned long *addr,
+		   unsigned long nbits,
+		   unsigned long start,
+		   unsigned long invert)
+{
+	unsigned long tmp;
+
+	if (!nbits || start >= nbits)
+		return nbits;
+
+	tmp = addr[start / BITS_PER_WORD] ^ invert;
+
+	/* Handle 1st word. */
+	tmp &= CCP_BITMAP_FIRST_WORD_MASK(start);
+	start = ccp_round_down(start, BITS_PER_WORD);
+
+	while (!tmp) {
+		start += BITS_PER_WORD;
+		if (start >= nbits)
+			return nbits;
+
+		tmp = addr[start / BITS_PER_WORD] ^ invert;
+	}
+
+	return RTE_MIN(start + (ffs(tmp) - 1), nbits);
+}
+
+static unsigned long
+ccp_find_next_bit(const unsigned long *addr,
+		  unsigned long size,
+		  unsigned long offset)
+{
+	return _ccp_find_next_bit(addr, size, offset, 0UL);
+}
+
+static unsigned long
+ccp_find_next_zero_bit(const unsigned long *addr,
+		       unsigned long size,
+		       unsigned long offset)
+{
+	return _ccp_find_next_bit(addr, size, offset, ~0UL);
+}
+
+/**
+ * bitmap_find_next_zero_area - find a contiguous aligned zero area
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ */
+static unsigned long
+ccp_bitmap_find_next_zero_area(unsigned long *map,
+			       unsigned long size,
+			       unsigned long start,
+			       unsigned int nr)
+{
+	unsigned long index, end, i;
+
+again:
+	index = ccp_find_next_zero_bit(map, size, start);
+
+	end = index + nr;
+	if (end > size)
+		return end;
+	i = ccp_find_next_bit(map, end, index);
+	if (i < end) {
+		start = i + 1;
+		goto again;
+	}
+	return index;
+}
+
+static uint32_t
+ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count)
+{
+	struct ccp_device *ccp;
+	int start;
+
+	/* First look at the map for the queue */
+	if (cmd_q->lsb >= 0) {
+		start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap,
+								 LSB_SIZE, 0,
+								 count);
+		if (start < LSB_SIZE) {
+			ccp_bitmap_set(cmd_q->lsbmap, start, count);
+			return start + cmd_q->lsb * LSB_SIZE;
+		}
+	}
+
+	/* try to get an entry from the shared blocks */
+	ccp = cmd_q->dev;
+
+	rte_spinlock_lock(&ccp->lsb_lock);
+
+	start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap,
+						    MAX_LSB_CNT * LSB_SIZE,
+						    0, count);
+	if (start <= MAX_LSB_CNT * LSB_SIZE) {
+		ccp_bitmap_set(ccp->lsbmap, start, count);
+		rte_spinlock_unlock(&ccp->lsb_lock);
+		return start * LSB_ITEM_SIZE;
+	}
+	CCP_LOG_ERR("NO LSBs available");
+
+	rte_spinlock_unlock(&ccp->lsb_lock);
+
+	return 0;
+}
+
+static void __rte_unused
+ccp_lsb_free(struct ccp_queue *cmd_q,
+	     unsigned int start,
+	     unsigned int count)
+{
+	int lsbno = start / LSB_SIZE;
+
+	if (!start)
+		return;
+
+	if (cmd_q->lsb == lsbno) {
+		/* An entry from the private LSB */
+		ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
+	} else {
+		/* From the shared LSBs */
+		struct ccp_device *ccp = cmd_q->dev;
+
+		rte_spinlock_lock(&ccp->lsb_lock);
+		ccp_bitmap_clear(ccp->lsbmap, start, count);
+		rte_spinlock_unlock(&ccp->lsb_lock);
+	}
+}
+
+static int
+ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status)
+{
+	int q_mask = 1 << cmd_q->id;
+	int weight = 0;
+	int j;
+
+	/* Build a bit mask to know which LSBs
+	 * this queue has access to.
+	 * Don't bother with segment 0
+	 * as it has special
+	 * privileges.
+	 */
+	cmd_q->lsbmask = 0;
+	status >>= LSB_REGION_WIDTH;
+	for (j = 1; j < MAX_LSB_CNT; j++) {
+		if (status & q_mask)
+			ccp_set_bit(&cmd_q->lsbmask, j);
+
+		status >>= LSB_REGION_WIDTH;
+	}
+
+	for (j = 0; j < MAX_LSB_CNT; j++)
+		if (ccp_get_bit(&cmd_q->lsbmask, j))
+			weight++;
+
+	printf("Queue %d can access %d LSB regions  of mask  %lu\n",
+	       (int)cmd_q->id, weight, cmd_q->lsbmask);
+
+	return weight ? 0 : -EINVAL;
+}
+
+static int
+ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
+			     int lsb_cnt, int n_lsbs,
+			     unsigned long *lsb_pub)
+{
+	unsigned long qlsb = 0;
+	int bitno = 0;
+	int qlsb_wgt = 0;
+	int i, j;
+
+	/* For each queue:
+	 * If the count of potential LSBs available to a queue matches the
+	 * ordinal given to us in lsb_cnt:
+	 * Copy the mask of possible LSBs for this queue into "qlsb";
+	 * For each bit in qlsb, see if the corresponding bit in the
+	 * aggregation mask is set; if so, we have a match.
+	 *     If we have a match, clear the bit in the aggregation to
+	 *     mark it as no longer available.
+	 *     If there is no match, clear the bit in qlsb and keep looking.
+	 */
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		struct ccp_queue *cmd_q = &ccp->cmd_q[i];
+
+		qlsb_wgt = 0;
+		for (j = 0; j < MAX_LSB_CNT; j++)
+			if (ccp_get_bit(&cmd_q->lsbmask, j))
+				qlsb_wgt++;
+
+		if (qlsb_wgt == lsb_cnt) {
+			qlsb = cmd_q->lsbmask;
+
+			bitno = ffs(qlsb) - 1;
+			while (bitno < MAX_LSB_CNT) {
+				if (ccp_get_bit(lsb_pub, bitno)) {
+					/* We found an available LSB
+					 * that this queue can access
+					 */
+					cmd_q->lsb = bitno;
+					ccp_clear_bit(lsb_pub, bitno);
+					break;
+				}
+				ccp_clear_bit(&qlsb, bitno);
+				bitno = ffs(qlsb) - 1;
+			}
+			if (bitno >= MAX_LSB_CNT)
+				return -EINVAL;
+			n_lsbs--;
+		}
+	}
+	return n_lsbs;
+}
+
+/* For each queue, from the most- to least-constrained:
+ * find an LSB that can be assigned to the queue. If there are N queues that
+ * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
+ * dedicated LSB. Remaining LSB regions become a shared resource.
+ * If we have fewer LSBs than queues, all LSB regions become shared
+ * resources.
+ */
+static int
+ccp_assign_lsbs(struct ccp_device *ccp)
+{
+	unsigned long lsb_pub = 0, qlsb = 0;
+	int n_lsbs = 0;
+	int bitno;
+	int i, lsb_cnt;
+	int rc = 0;
+
+	rte_spinlock_init(&ccp->lsb_lock);
+
+	/* Create an aggregate bitmap to get a total count of available LSBs */
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		lsb_pub |= ccp->cmd_q[i].lsbmask;
+
+	for (i = 0; i < MAX_LSB_CNT; i++)
+		if (ccp_get_bit(&lsb_pub, i))
+			n_lsbs++;
+
+	if (n_lsbs >= ccp->cmd_q_count) {
+		/* We have enough LSBS to give every queue a private LSB.
+		 * Brute force search to start with the queues that are more
+		 * constrained in LSB choice. When an LSB is privately
+		 * assigned, it is removed from the public mask.
+		 * This is an ugly N squared algorithm with some optimization.
+		 */
+		for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
+		     lsb_cnt++) {
+			rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
+							  &lsb_pub);
+			if (rc < 0)
+				return -EINVAL;
+			n_lsbs = rc;
+		}
+	}
+
+	rc = 0;
+	/* What's left of the LSBs, according to the public mask, now become
+	 * shared. Any zero bits in the lsb_pub mask represent an LSB region
+	 * that can't be used as a shared resource, so mark the LSB slots for
+	 * them as "in use".
+	 */
+	qlsb = lsb_pub;
+	bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+	while (bitno < MAX_LSB_CNT) {
+		ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
+		ccp_set_bit(&qlsb, bitno);
+		bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+	}
+
+	return rc;
+}
+
+static int
+ccp_add_device(struct ccp_device *dev, int type)
+{
+	int i;
+	uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi;
+	uint64_t status;
+	struct ccp_queue *cmd_q;
+	const struct rte_memzone *q_mz;
+	void *vaddr;
+
+	if (dev == NULL)
+		return -1;
+
+	dev->id = ccp_dev_id++;
+	dev->qidx = 0;
+	vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+	if (type == CCP_VERSION_5B) {
+		CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57);
+		CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003);
+		for (i = 0; i < 12; i++) {
+			CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET,
+				      CCP_READ_REG(vaddr, TRNG_OUT_REG));
+		}
+		CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F);
+		CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D);
+		CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000);
+
+		CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF);
+		CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF);
+
+		CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
+	}
+	CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
+
+	/* Copy the private LSB mask to the public registers */
+	status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
+	status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET);
+	CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo);
+	CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi);
+	status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo);
+
+	dev->cmd_q_count = 0;
+	/* Find available queues */
+	qmr = CCP_READ_REG(vaddr, Q_MASK_REG);
+	for (i = 0; i < MAX_HW_QUEUES; i++) {
+		if (!(qmr & (1 << i)))
+			continue;
+		cmd_q = &dev->cmd_q[dev->cmd_q_count++];
+		cmd_q->dev = dev;
+		cmd_q->id = i;
+		cmd_q->qidx = 0;
+		cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+
+		cmd_q->reg_base = (uint8_t *)vaddr +
+			CMD_Q_STATUS_INCR * (i + 1);
+
+		/* CCP queue memory */
+		snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name),
+			 "%s_%d_%s_%d_%s",
+			 "ccp_dev",
+			 (int)dev->id, "queue",
+			 (int)cmd_q->id, "mem");
+		q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name,
+						  cmd_q->qsize, SOCKET_ID_ANY);
+		cmd_q->qbase_addr = (void *)q_mz->addr;
+		cmd_q->qbase_desc = (void *)q_mz->addr;
+		cmd_q->qbase_phys_addr =  q_mz->phys_addr;
+
+		cmd_q->qcontrol = 0;
+		/* init control reg to zero */
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol);
+
+		/* Disable the interrupts */
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00);
+		CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE);
+		CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE);
+
+		/* Clear the interrupts */
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE,
+			      ALL_INTERRUPTS);
+
+		/* Configure size of each virtual queue accessible to host */
+		cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT);
+		cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT;
+
+		dma_addr_lo = low32_value(cmd_q->qbase_phys_addr);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+			      (uint32_t)dma_addr_lo);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE,
+			      (uint32_t)dma_addr_lo);
+
+		dma_addr_hi = high32_value(cmd_q->qbase_phys_addr);
+		cmd_q->qcontrol |= (dma_addr_hi << 16);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol);
+
+		/* create LSB Mask map */
+		if (ccp_find_lsb_regions(cmd_q, status))
+			CCP_LOG_ERR("queue doesn't have lsb regions");
+		cmd_q->lsb = -1;
+
+		rte_atomic64_init(&cmd_q->free_slots);
+		rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1));
+		/* unused slot barrier b/w H&T */
+	}
+
+	if (ccp_assign_lsbs(dev))
+		CCP_LOG_ERR("Unable to assign lsb region");
+
+	/* pre-allocate LSB slots */
+	for (i = 0; i < dev->cmd_q_count; i++) {
+		dev->cmd_q[i].sb_key =
+			ccp_lsb_alloc(&dev->cmd_q[i], 1);
+		dev->cmd_q[i].sb_iv =
+			ccp_lsb_alloc(&dev->cmd_q[i], 1);
+		dev->cmd_q[i].sb_sha =
+			ccp_lsb_alloc(&dev->cmd_q[i], 2);
+		dev->cmd_q[i].sb_hmac =
+			ccp_lsb_alloc(&dev->cmd_q[i], 2);
+	}
+
+	TAILQ_INSERT_TAIL(&ccp_list, dev, next);
+	return 0;
+}
+
+static void
+ccp_remove_device(struct ccp_device *dev)
+{
+	if (dev == NULL)
+		return;
+
+	TAILQ_REMOVE(&ccp_list, dev, next);
+}
+
+static int
+is_ccp_device(const char *dirname,
+	      const struct rte_pci_id *ccp_id,
+	      int *type)
+{
+	char filename[PATH_MAX];
+	const struct rte_pci_id *id;
+	uint16_t vendor, device_id;
+	int i;
+	unsigned long tmp;
+
+	/* get vendor id */
+	snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		return 0;
+	vendor = (uint16_t)tmp;
+
+	/* get device id */
+	snprintf(filename, sizeof(filename), "%s/device", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		return 0;
+	device_id = (uint16_t)tmp;
+
+	for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) {
+		if (vendor == id->vendor_id &&
+		    device_id == id->device_id) {
+			*type = i;
+			return 1; /* Matched device */
+		}
+	}
+	return 0;
+}
+
+static int
+ccp_probe_device(const char *dirname, uint16_t domain,
+		 uint8_t bus, uint8_t devid,
+		 uint8_t function, int ccp_type)
+{
+	struct ccp_device *ccp_dev = NULL;
+	struct rte_pci_device *pci;
+	char filename[PATH_MAX];
+	unsigned long tmp;
+	int uio_fd = -1, i, uio_num;
+	char uio_devname[PATH_MAX];
+	void *map_addr;
+
+	ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
+			      RTE_CACHE_LINE_SIZE);
+	if (ccp_dev == NULL)
+		goto fail;
+	pci = &(ccp_dev->pci);
+
+	pci->addr.domain = domain;
+	pci->addr.bus = bus;
+	pci->addr.devid = devid;
+	pci->addr.function = function;
+
+	/* get vendor id */
+	snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.vendor_id = (uint16_t)tmp;
+
+	/* get device id */
+	snprintf(filename, sizeof(filename), "%s/device", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.device_id = (uint16_t)tmp;
+
+	/* get subsystem_vendor id */
+	snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
+			dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.subsystem_vendor_id = (uint16_t)tmp;
+
+	/* get subsystem_device id */
+	snprintf(filename, sizeof(filename), "%s/subsystem_device",
+			dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.subsystem_device_id = (uint16_t)tmp;
+
+	/* get class_id */
+	snprintf(filename, sizeof(filename), "%s/class",
+			dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	/* the least 24 bits are valid: class, subclass, program interface */
+	pci->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
+
+	/* parse resources */
+	snprintf(filename, sizeof(filename), "%s/resource", dirname);
+	if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
+		goto fail;
+
+	uio_num = ccp_find_uio_devname(dirname);
+	if (uio_num < 0) {
+		/*
+		 * It may take time for uio device to appear,
+		 * wait  here and try again
+		 */
+		usleep(100000);
+		uio_num = ccp_find_uio_devname(dirname);
+		if (uio_num < 0)
+			goto fail;
+	}
+	snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
+
+	uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK);
+	if (uio_fd < 0)
+		goto fail;
+	if (flock(uio_fd, LOCK_EX | LOCK_NB))
+		goto fail;
+
+	/* Map the PCI memory resource of device */
+	for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+
+		char devname[PATH_MAX];
+		int res_fd;
+
+		if (pci->mem_resource[i].phys_addr == 0)
+			continue;
+		snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
+		res_fd = open(devname, O_RDWR);
+		if (res_fd < 0)
+			goto fail;
+		map_addr = mmap(NULL, pci->mem_resource[i].len,
+				PROT_READ | PROT_WRITE,
+				MAP_SHARED, res_fd, 0);
+		if (map_addr == MAP_FAILED)
+			goto fail;
+
+		pci->mem_resource[i].addr = map_addr;
+	}
+
+	/* device is valid, add in list */
+	if (ccp_add_device(ccp_dev, ccp_type)) {
+		ccp_remove_device(ccp_dev);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	CCP_LOG_ERR("CCP Device probe failed");
+	if (uio_fd > 0)
+		close(uio_fd);
+	if (ccp_dev)
+		rte_free(ccp_dev);
+	return -1;
+}
+
+int
+ccp_probe_devices(const struct rte_pci_id *ccp_id)
+{
+	int dev_cnt = 0;
+	int ccp_type = 0;
+	struct dirent *d;
+	DIR *dir;
+	int ret = 0;
+	int module_idx = 0;
+	uint16_t domain;
+	uint8_t bus, devid, function;
+	char dirname[PATH_MAX];
+
+	module_idx = ccp_check_pci_uio_module();
+	if (module_idx < 0)
+		return -1;
+
+	TAILQ_INIT(&ccp_list);
+	dir = opendir(SYSFS_PCI_DEVICES);
+	if (dir == NULL)
+		return -1;
+	while ((d = readdir(dir)) != NULL) {
+		if (d->d_name[0] == '.')
+			continue;
+		if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name),
+					&domain, &bus, &devid, &function) != 0)
+			continue;
+		snprintf(dirname, sizeof(dirname), "%s/%s",
+			     SYSFS_PCI_DEVICES, d->d_name);
+		if (is_ccp_device(dirname, ccp_id, &ccp_type)) {
+			printf("CCP : Detected CCP device with ID = 0x%x\n",
+			       ccp_id[ccp_type].device_id);
+			ret = ccp_probe_device(dirname, domain, bus, devid,
+					       function, ccp_type);
+			if (ret == 0)
+				dev_cnt++;
+		}
+	}
+	closedir(dir);
+	return dev_cnt;
+}
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
new file mode 100644
index 0000000..28eb68e
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -0,0 +1,530 @@
+/*-
+ *   Copyright(c) 2017 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ * 	* Redistributions of source code must retain the above copyright
+ * 	notice, this list of conditions and the following disclaimer.
+ * 	* Redistributions in binary form must reproduce the above copyright
+ * 	notice, this list of conditions and the following disclaimer in the
+ * 	documentation and/or other materials provided with the distribution.
+ * 	* Neither the name of the copyright holder nor the names of its
+ * 	contributors may be used to endorse or promote products derived from
+ * 	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CCP_DEV_H_
+#define _CCP_DEV_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+/**CCP sspecific*/
+#define MAX_HW_QUEUES                   5
+#define TRNG_RETRIES                    10
+#define CCP_ALIGN(x, y) ((((x) + (y - 1)) / y) * y)
+
+/****** Register Mappings ******/
+#define Q_MASK_REG                      0x000
+#define TRNG_OUT_REG                    0x00c
+
+/* ----------- CCP Version 5 Specifics ------------ */
+#define CMD_QUEUE_MASK_OFFSET		0x00
+#define	CMD_QUEUE_PRIO_OFFSET		0x04
+#define CMD_REQID_CONFIG_OFFSET		0x08
+#define	CMD_CMD_TIMEOUT_OFFSET		0x10
+#define LSB_PUBLIC_MASK_LO_OFFSET	0x18
+#define LSB_PUBLIC_MASK_HI_OFFSET	0x1C
+#define LSB_PRIVATE_MASK_LO_OFFSET	0x20
+#define LSB_PRIVATE_MASK_HI_OFFSET	0x24
+
+#define CMD_Q_CONTROL_BASE		0x0000
+#define CMD_Q_TAIL_LO_BASE		0x0004
+#define CMD_Q_HEAD_LO_BASE		0x0008
+#define CMD_Q_INT_ENABLE_BASE		0x000C
+#define CMD_Q_INTERRUPT_STATUS_BASE	0x0010
+
+#define CMD_Q_STATUS_BASE		0x0100
+#define CMD_Q_INT_STATUS_BASE		0x0104
+
+#define	CMD_CONFIG_0_OFFSET		0x6000
+#define	CMD_TRNG_CTL_OFFSET		0x6008
+#define	CMD_AES_MASK_OFFSET		0x6010
+#define	CMD_CLK_GATE_CTL_OFFSET		0x603C
+
+/* Address offset between two virtual queue registers */
+#define CMD_Q_STATUS_INCR              0x1000
+
+/* Bit masks */
+#define CMD_Q_RUN                      0x1
+
+#define CMD_Q_SIZE                     0x1F
+#define CMD_Q_SHIFT                    3
+#define COMMANDS_PER_QUEUE              2048
+
+#define QUEUE_SIZE_VAL                  ((ffs(COMMANDS_PER_QUEUE) - 2) & \
+					 CMD_Q_SIZE)
+#define Q_DESC_SIZE                     sizeof(struct ccp_desc)
+#define Q_SIZE(n)                       (COMMANDS_PER_QUEUE*(n))
+
+#define INT_COMPLETION                  0x1
+#define INT_ERROR                       0x2
+#define INT_QUEUE_STOPPED               0x4
+#define ALL_INTERRUPTS                  (INT_COMPLETION| \
+					 INT_ERROR| \
+					 INT_QUEUE_STOPPED)
+
+#define LSB_REGION_WIDTH                5
+#define MAX_LSB_CNT                     8
+
+#define LSB_SIZE                        16
+#define LSB_ITEM_SIZE                   32
+#define SLSB_MAP_SIZE                   (MAX_LSB_CNT * LSB_SIZE)
+#define LSB_ENTRY_NUMBER(LSB_ADDR)      (LSB_ADDR / LSB_ITEM_SIZE)
+
+/* ------------------------ General CCP Defines ------------------------ */
+
+#define CCP_SB_BYTES                    32
+/* Word 0 */
+#define CCP_CMD_DW0(p)		((p)->dw0)
+#define CCP_CMD_SOC(p)		(CCP_CMD_DW0(p).soc)
+#define CCP_CMD_IOC(p)		(CCP_CMD_DW0(p).ioc)
+#define CCP_CMD_INIT(p)	        (CCP_CMD_DW0(p).init)
+#define CCP_CMD_EOM(p)		(CCP_CMD_DW0(p).eom)
+#define CCP_CMD_FUNCTION(p)	(CCP_CMD_DW0(p).function)
+#define CCP_CMD_ENGINE(p)	(CCP_CMD_DW0(p).engine)
+#define CCP_CMD_PROT(p)	        (CCP_CMD_DW0(p).prot)
+
+/* Word 1 */
+#define CCP_CMD_DW1(p)		((p)->length)
+#define CCP_CMD_LEN(p)		(CCP_CMD_DW1(p))
+
+/* Word 2 */
+#define CCP_CMD_DW2(p)		((p)->src_lo)
+#define CCP_CMD_SRC_LO(p)	(CCP_CMD_DW2(p))
+
+/* Word 3 */
+#define CCP_CMD_DW3(p)		((p)->dw3)
+#define CCP_CMD_SRC_MEM(p)	((p)->dw3.src_mem)
+#define CCP_CMD_SRC_HI(p)	((p)->dw3.src_hi)
+#define CCP_CMD_LSB_ID(p)	((p)->dw3.lsb_cxt_id)
+#define CCP_CMD_FIX_SRC(p)	((p)->dw3.fixed)
+
+/* Words 4/5 */
+#define CCP_CMD_DW4(p)		((p)->dw4)
+#define CCP_CMD_DST_LO(p)	(CCP_CMD_DW4(p).dst_lo)
+#define CCP_CMD_DW5(p)		((p)->dw5.fields.dst_hi)
+#define CCP_CMD_DST_HI(p)	(CCP_CMD_DW5(p))
+#define CCP_CMD_DST_MEM(p)	((p)->dw5.fields.dst_mem)
+#define CCP_CMD_FIX_DST(p)	((p)->dw5.fields.fixed)
+#define CCP_CMD_SHA_LO(p)	((p)->dw4.sha_len_lo)
+#define CCP_CMD_SHA_HI(p)	((p)->dw5.sha_len_hi)
+
+/* Word 6/7 */
+#define CCP_CMD_DW6(p)		((p)->key_lo)
+#define CCP_CMD_KEY_LO(p)	(CCP_CMD_DW6(p))
+#define CCP_CMD_DW7(p)		((p)->dw7)
+#define CCP_CMD_KEY_HI(p)	((p)->dw7.key_hi)
+#define CCP_CMD_KEY_MEM(p)	((p)->dw7.key_mem)
+/* bitmap */
+enum {
+	BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT
+};
+
+#define WORD_OFFSET(b) ((b) / BITS_PER_WORD)
+#define BIT_OFFSET(b)  ((b) % BITS_PER_WORD)
+
+#define CCP_DIV_ROUND_UP(n, d)  (((n) + (d) - 1) / (d))
+#define CCP_BITMAP_SIZE(nr) \
+	CCP_DIV_ROUND_UP(nr, CHAR_BIT * sizeof(unsigned long))
+
+#define CCP_BITMAP_FIRST_WORD_MASK(start) \
+	(~0UL << ((start) & (BITS_PER_WORD - 1)))
+#define CCP_BITMAP_LAST_WORD_MASK(nbits) \
+	(~0UL >> (-(nbits) & (BITS_PER_WORD - 1)))
+
+#define __ccp_round_mask(x, y) ((typeof(x))((y)-1))
+#define ccp_round_down(x, y) ((x) & ~__ccp_round_mask(x, y))
+
+/** CCP registers Write/Read */
+
+static inline void ccp_pci_reg_write(void *base, int offset,
+				     uint32_t value)
+{
+	volatile void *reg_addr = ((uint8_t *)base + offset);
+
+	rte_write32((rte_cpu_to_le_32(value)), reg_addr);
+}
+
+static inline uint32_t ccp_pci_reg_read(void *base, int offset)
+{
+	volatile void *reg_addr = ((uint8_t *)base + offset);
+
+	return rte_le_to_cpu_32(rte_read32(reg_addr));
+}
+
+#define CCP_READ_REG(hw_addr, reg_offset) \
+	ccp_pci_reg_read(hw_addr, reg_offset)
+
+#define CCP_WRITE_REG(hw_addr, reg_offset, value) \
+	ccp_pci_reg_write(hw_addr, reg_offset, value)
+
+TAILQ_HEAD(ccp_list, ccp_device);
+
+extern struct ccp_list ccp_list;
+
+/**
+ * CCP device version
+ */
+enum ccp_device_version {
+	CCP_VERSION_5A = 0,
+	CCP_VERSION_5B,
+};
+
+/**
+ * A structure describing statictics.
+ */
+
+struct stats {
+	uint64_t enq_cnt;
+	uint64_t deq_cnt;
+	uint64_t err_cnt;
+};
+
+/**
+ * A structure describing a CCP command queue.
+ */
+struct ccp_queue {
+	struct ccp_device *dev;
+	char memz_name[RTE_MEMZONE_NAMESIZE];
+	struct stats stat;
+
+	rte_atomic64_t free_slots;
+	/**<available free slots updated from enq/deq calls */
+
+	/* Queue identifier */
+	uint64_t id;	/**< queue id */
+	uint64_t qidx;	/**< queue index */
+	uint64_t qsize;	/**< queue size */
+
+	/* Queue  address */
+	struct ccp_desc *qbase_desc;
+	void *qbase_addr;
+	phys_addr_t qbase_phys_addr;
+	/**< queue-page registers addr */
+	void *reg_base;
+
+	uint32_t qcontrol;
+	/**< queue ctrl reg*/
+
+	int lsb;
+	/**<lsb region assigned to queue*/
+	unsigned long lsbmask;
+	/**<lsb regions queue can access*/
+	unsigned long lsbmap[CCP_BITMAP_SIZE(LSB_SIZE)];
+	/**<All lsb resources which queue is using*/
+	uint32_t sb_key;
+	/**<lsb assigned for queue*/
+	uint32_t sb_iv;
+	/**<lsb assigned for iv*/
+	uint32_t sb_sha;
+	/**<lsb assigned for sha ctx*/
+	uint32_t sb_hmac;
+	/**<lsb assigned for hmac ctx*/
+} ____cacheline_aligned;
+
+/**
+ * A structure describing a CCP device.
+ */
+struct ccp_device {
+	TAILQ_ENTRY(ccp_device) next;
+	int id;
+	/**<CCP dev id on platform*/
+	struct ccp_queue cmd_q[MAX_HW_QUEUES];
+	/**<CCP queue*/
+	int cmd_q_count;
+	/**<# of ccp Queues*/
+	struct rte_pci_device pci;
+	struct stats stat;
+	unsigned long lsbmap[CCP_BITMAP_SIZE(SLSB_MAP_SIZE)];
+	/**<shared lsb mask of ccp*/
+	rte_spinlock_t lsb_lock;
+	/**<protection for shared lsb region allocation*/
+	int qidx;
+	/**<current queue index */
+	int hwrng_retries;
+	/**<retry counter for CCP TRNG */
+} __rte_cache_aligned;
+
+
+/**CCP H/W engine related*/
+/**
+ * ccp_engine - CCP operation identifiers
+ *
+ * @CCP_ENGINE_AES: AES operation
+ * @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation
+ * @CCP_ENGINE_3DES: DES/3DES operation
+ * @CCP_ENGINE_SHA: SHA operation
+ * @CCP_ENGINE_RSA: RSA operation
+ * @CCP_ENGINE_PASSTHRU: pass-through operation
+ * @CCP_ENGINE_ZLIB_DECOMPRESS: unused
+ * @CCP_ENGINE_ECC: ECC operation
+ */
+enum ccp_engine {
+	CCP_ENGINE_AES = 0,
+	CCP_ENGINE_XTS_AES_128,
+	CCP_ENGINE_3DES,
+	CCP_ENGINE_SHA,
+	CCP_ENGINE_RSA,
+	CCP_ENGINE_PASSTHRU,
+	CCP_ENGINE_ZLIB_DECOMPRESS,
+	CCP_ENGINE_ECC,
+	CCP_ENGINE__LAST,
+};
+
+
+/***** Passthru engine *****/
+/**
+ * ccp_passthru_bitwise - type of bitwise passthru operation
+ *
+ * @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed
+ * @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask
+ * @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask
+ * @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask
+ * @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask
+ */
+enum ccp_passthru_bitwise {
+	CCP_PASSTHRU_BITWISE_NOOP = 0,
+	CCP_PASSTHRU_BITWISE_AND,
+	CCP_PASSTHRU_BITWISE_OR,
+	CCP_PASSTHRU_BITWISE_XOR,
+	CCP_PASSTHRU_BITWISE_MASK,
+	CCP_PASSTHRU_BITWISE__LAST,
+};
+
+/**
+ * ccp_passthru_byteswap - type of byteswap passthru operation
+ *
+ * @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed
+ * @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words
+ * @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words
+ */
+enum ccp_passthru_byteswap {
+	CCP_PASSTHRU_BYTESWAP_NOOP = 0,
+	CCP_PASSTHRU_BYTESWAP_32BIT,
+	CCP_PASSTHRU_BYTESWAP_256BIT,
+	CCP_PASSTHRU_BYTESWAP__LAST,
+};
+
+/**
+ * CCP passthru
+ */
+struct ccp_passthru {
+	phys_addr_t src_addr;
+	phys_addr_t dest_addr;
+	enum ccp_passthru_bitwise bit_mod;
+	enum ccp_passthru_byteswap byte_swap;
+	int len;
+	int dir;
+};
+
+/* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
+union ccp_function {
+	struct {
+		uint16_t size:7;
+		uint16_t encrypt:1;
+		uint16_t mode:5;
+		uint16_t type:2;
+	} aes;
+	struct {
+		uint16_t size:7;
+		uint16_t encrypt:1;
+		uint16_t mode:5;
+		uint16_t type:2;
+	} des;
+	struct {
+		uint16_t size:7;
+		uint16_t encrypt:1;
+		uint16_t rsvd:5;
+		uint16_t type:2;
+	} aes_xts;
+	struct {
+		uint16_t rsvd1:10;
+		uint16_t type:4;
+		uint16_t rsvd2:1;
+	} sha;
+	struct {
+		uint16_t mode:3;
+		uint16_t size:12;
+	} rsa;
+	struct {
+		uint16_t byteswap:2;
+		uint16_t bitwise:3;
+		uint16_t reflect:2;
+		uint16_t rsvd:8;
+	} pt;
+	struct  {
+		uint16_t rsvd:13;
+	} zlib;
+	struct {
+		uint16_t size:10;
+		uint16_t type:2;
+		uint16_t mode:3;
+	} ecc;
+	uint16_t raw;
+};
+
+
+/**
+ * descriptor for version 5 CPP commands
+ * 8 32-bit words:
+ * word 0: function; engine; control bits
+ * word 1: length of source data
+ * word 2: low 32 bits of source pointer
+ * word 3: upper 16 bits of source pointer; source memory type
+ * word 4: low 32 bits of destination pointer
+ * word 5: upper 16 bits of destination pointer; destination memory
+ * type
+ * word 6: low 32 bits of key pointer
+ * word 7: upper 16 bits of key pointer; key memory type
+ */
+struct dword0 {
+	uint32_t soc:1;
+	uint32_t ioc:1;
+	uint32_t rsvd1:1;
+	uint32_t init:1;
+	uint32_t eom:1;
+	uint32_t function:15;
+	uint32_t engine:4;
+	uint32_t prot:1;
+	uint32_t rsvd2:7;
+};
+
+struct dword3 {
+	uint32_t src_hi:16;
+	uint32_t src_mem:2;
+	uint32_t lsb_cxt_id:8;
+	uint32_t rsvd1:5;
+	uint32_t fixed:1;
+};
+
+union dword4 {
+	uint32_t dst_lo;	/* NON-SHA */
+	uint32_t sha_len_lo;	/* SHA */
+};
+
+union dword5 {
+	struct {
+		uint32_t dst_hi:16;
+		uint32_t dst_mem:2;
+		uint32_t rsvd1:13;
+		uint32_t fixed:1;
+	}
+	fields;
+	uint32_t sha_len_hi;
+};
+
+struct dword7 {
+	uint32_t key_hi:16;
+	uint32_t key_mem:2;
+	uint32_t rsvd1:14;
+};
+
+struct ccp_desc {
+	struct dword0 dw0;
+	uint32_t length;
+	uint32_t src_lo;
+	struct dword3 dw3;
+	union dword4 dw4;
+	union dword5 dw5;
+	uint32_t key_lo;
+	struct dword7 dw7;
+};
+
+enum ccp_memtype {
+	CCP_MEMTYPE_SYSTEM = 0,
+	CCP_MEMTYPE_SB,
+	CCP_MEMTYPE_LOCAL,
+	CCP_MEMTYPE__LAST,
+};
+
+/**
+ * cmd id to follow order
+ */
+enum ccp_cmd_order {
+	CCP_CMD_CIPHER = 0,
+	CCP_CMD_AUTH,
+	CCP_CMD_CIPHER_HASH,
+	CCP_CMD_HASH_CIPHER,
+	CCP_CMD_COMBINED,
+	CCP_CMD_NOT_SUPPORTED,
+};
+
+static inline uint32_t
+low32_value(unsigned long addr)
+{
+	return ((uint64_t)addr) & 0x0ffffffff;
+}
+
+static inline uint32_t
+high32_value(unsigned long addr)
+{
+	return ((uint64_t)addr >> 32) & 0x00000ffff;
+}
+
+/*
+ * Start CCP device
+ */
+int ccp_dev_start(struct rte_cryptodev *dev);
+
+/**
+ * Detect ccp platform and initialize all ccp devices
+ *
+ * @param ccp_id rte_pci_id list for supported CCP devices
+ * @return no. of successfully initialized CCP devices
+ */
+int ccp_probe_devices(const struct rte_pci_id *ccp_id);
+
+/**
+ * allocate a ccp command queue
+ *
+ * @dev rte crypto device
+ * @param slot_req number of required
+ * @return allotted CCP queue on success otherwise NULL
+ */
+struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
+
+/**
+ * read hwrng value
+ *
+ * @param trng_value data pointer to write RNG value
+ * @return 0 on success otherwise -1
+ */
+int ccp_read_hwrng(uint32_t *trng_value);
+
+#endif /* _CCP_DEV_H_ */
diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c
new file mode 100644
index 0000000..838627d
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pci.c
@@ -0,0 +1,328 @@
+/*-
+ *   Copyright(c) 2017 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ * 	* Redistributions of source code must retain the above copyright
+ * 	notice, this list of conditions and the following disclaimer.
+ * 	* Redistributions in binary form must reproduce the above copyright
+ * 	notice, this list of conditions and the following disclaimer in the
+ * 	documentation and/or other materials provided with the distribution.
+ * 	* Neither the name of the copyright holder nor the names of its
+ * 	contributors may be used to endorse or promote products derived from
+ * 	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_string_fns.h>
+
+#include <ccp_pci.h>
+
+static const char * const uio_module_names[] = {
+	"igb_uio",
+	"uio_pci_generic",
+};
+
+int
+ccp_check_pci_uio_module(void)
+{
+	FILE *fp;
+	int i;
+	char buf[BUFSIZ];
+
+	fp = fopen(PROC_MODULES, "r");
+	if (fp == NULL)
+		return -1;
+	i = 0;
+	while (uio_module_names[i] != NULL) {
+		while (fgets(buf, sizeof(buf), fp) != NULL) {
+			if (!strncmp(buf, uio_module_names[i],
+				     strlen(uio_module_names[i])))
+				return i;
+		}
+		i++;
+		rewind(fp);
+	}
+	printf("Insert igb_uio or uio_pci_generic kernel module(s)");
+	return -1;/* uio not inserted */
+}
+
+/*
+ * split up a pci address into its constituent parts.
+ */
+int
+ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+			  uint8_t *bus, uint8_t *devid, uint8_t *function)
+{
+	/* first split on ':' */
+	union splitaddr {
+		struct {
+			char *domain;
+			char *bus;
+			char *devid;
+			char *function;
+		};
+		char *str[PCI_FMT_NVAL];
+		/* last element-separator is "." not ":" */
+	} splitaddr;
+
+	char *buf_copy = strndup(buf, bufsize);
+
+	if (buf_copy == NULL)
+		return -1;
+
+	if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':')
+			!= PCI_FMT_NVAL - 1)
+		goto error;
+	/* final split is on '.' between devid and function */
+	splitaddr.function = strchr(splitaddr.devid, '.');
+	if (splitaddr.function == NULL)
+		goto error;
+	*splitaddr.function++ = '\0';
+
+	/* now convert to int values */
+	errno = 0;
+	*domain = (uint8_t)strtoul(splitaddr.domain, NULL, 16);
+	*bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16);
+	*devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16);
+	*function = (uint8_t)strtoul(splitaddr.function, NULL, 10);
+	if (errno != 0)
+		goto error;
+
+	free(buf_copy); /* free the copy made with strdup */
+	return 0;
+error:
+	free(buf_copy);
+	return -1;
+}
+
+int
+ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val)
+{
+	FILE *f;
+	char buf[BUFSIZ];
+	char *end = NULL;
+
+	f = fopen(filename, "r");
+	if (f == NULL)
+		return -1;
+	if (fgets(buf, sizeof(buf), f) == NULL) {
+		fclose(f);
+		return -1;
+	}
+	*val = strtoul(buf, &end, 0);
+	if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
+		fclose(f);
+		return -1;
+	}
+	fclose(f);
+	return 0;
+}
+
+/** IO resource type: */
+#define IORESOURCE_IO         0x00000100
+#define IORESOURCE_MEM        0x00000200
+
+/* parse one line of the "resource" sysfs file (note that the 'line'
+ * string is modified)
+ */
+static int
+ccp_pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr,
+				 uint64_t *end_addr, uint64_t *flags)
+{
+	union pci_resource_info {
+		struct {
+			char *phys_addr;
+			char *end_addr;
+			char *flags;
+		};
+		char *ptrs[PCI_RESOURCE_FMT_NVAL];
+	} res_info;
+
+	if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3)
+		return -1;
+	errno = 0;
+	*phys_addr = strtoull(res_info.phys_addr, NULL, 16);
+	*end_addr = strtoull(res_info.end_addr, NULL, 16);
+	*flags = strtoull(res_info.flags, NULL, 16);
+	if (errno != 0)
+		return -1;
+
+	return 0;
+}
+
+/* parse the "resource" sysfs file */
+int
+ccp_pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
+{
+	FILE *fp;
+	char buf[BUFSIZ];
+	int i;
+	uint64_t phys_addr, end_addr, flags;
+
+	fp = fopen(filename, "r");
+	if (fp == NULL)
+		return -1;
+
+	for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+		if (fgets(buf, sizeof(buf), fp) == NULL)
+			goto error;
+		if (ccp_pci_parse_one_sysfs_resource(buf, sizeof(buf),
+				&phys_addr, &end_addr, &flags) < 0)
+			goto error;
+
+		if (flags & IORESOURCE_MEM) {
+			dev->mem_resource[i].phys_addr = phys_addr;
+			dev->mem_resource[i].len = end_addr - phys_addr + 1;
+			/* not mapped for now */
+			dev->mem_resource[i].addr = NULL;
+		}
+	}
+	fclose(fp);
+	return 0;
+
+error:
+	fclose(fp);
+	return -1;
+}
+
+int
+ccp_find_uio_devname(const char *dirname)
+{
+
+	DIR *dir;
+	struct dirent *e;
+	char dirname_uio[PATH_MAX];
+	unsigned int uio_num;
+	int ret = -1;
+
+	/* depending on kernel version, uio can be located in uio/uioX
+	 * or uio:uioX
+	 */
+	snprintf(dirname_uio, sizeof(dirname_uio), "%s/uio", dirname);
+	dir = opendir(dirname_uio);
+	if (dir == NULL) {
+	/* retry with the parent directory might be different kernel version*/
+		dir = opendir(dirname);
+		if (dir == NULL)
+			return -1;
+	}
+
+	/* take the first file starting with "uio" */
+	while ((e = readdir(dir)) != NULL) {
+		/* format could be uio%d ...*/
+		int shortprefix_len = sizeof("uio") - 1;
+		/* ... or uio:uio%d */
+		int longprefix_len = sizeof("uio:uio") - 1;
+		char *endptr;
+
+		if (strncmp(e->d_name, "uio", 3) != 0)
+			continue;
+
+		/* first try uio%d */
+		errno = 0;
+		uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
+		if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
+			ret = uio_num;
+			break;
+		}
+
+		/* then try uio:uio%d */
+		errno = 0;
+		uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
+		if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
+			ret = uio_num;
+			break;
+		}
+	}
+	closedir(dir);
+	return ret;
+
+
+}
+
+#define UIO_NEWID "/sys/bus/pci/drivers/%s/new_id"
+#define UIO_BIND  "/sys/bus/pci/drivers/%s/bind"
+
+int
+ccp_bind_uio(int idx, const char *dirname, struct rte_pci_id *id)
+{
+	FILE *fp;
+	int cnt;
+	char path[PATH_MAX];
+	char *name = NULL;
+	char buf[BUFSIZ];
+	char driver_name[PATH_MAX];
+	char filename[PATH_MAX];
+
+	snprintf(filename, sizeof(filename), "%s/driver", dirname);
+	cnt = readlink(filename, path, PATH_MAX);
+	if (cnt >= PATH_MAX)
+		return -1;
+	if (cnt >= 0) {
+		path[cnt] = '\0';
+		name = strrchr(path, '/');
+		if (name) {
+			strncpy(driver_name, name + 1, strlen(name + 1) + 1);
+			if (!strcmp(driver_name, uio_module_names[0]) ||
+			    !strcmp(driver_name, uio_module_names[1]))
+				return 0; /* Already binded */
+			snprintf(filename, sizeof(filename), "%s/driver/unbind",
+				 dirname);
+			fp = fopen(filename, "w");
+			if (fp != NULL) {
+				name = strrchr(dirname, '/');
+				name += 1;
+				if (fwrite(name, strlen(name), 1, fp) == 0) {
+					fclose(fp);
+					return -1; /* Failed to unbind */
+				}
+				fclose(fp);
+			}
+		}
+	}
+	snprintf(filename, sizeof(filename), UIO_NEWID, uio_module_names[idx]);
+	snprintf(buf, sizeof(buf), "%x %x", id->vendor_id, id->device_id);
+	fp = fopen(filename, "w");
+	if (fp != NULL) {
+		if (fwrite(buf, strlen(buf), 1, fp) == 0) {
+			fclose(fp);
+			return -1; /* Failed to bind */
+		}
+		fclose(fp);
+	} else
+		return -1; /* Failed to bind */
+	snprintf(filename, sizeof(filename), UIO_BIND, uio_module_names[idx]);
+	name = strrchr(dirname, '/');
+	name += 1;
+	fp = fopen(filename, "w");
+	if (fp != NULL) {
+		if (fwrite(name, strlen(name), 1, fp) == 0) {
+			fclose(fp);
+			return -1; /* Failed to bind */
+		}
+		fclose(fp);
+		return 0; /* Bind success*/
+	}
+	return -1; /* Failed to bind */
+}
diff --git a/drivers/crypto/ccp/ccp_pci.h b/drivers/crypto/ccp/ccp_pci.h
new file mode 100644
index 0000000..a0edf75
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pci.h
@@ -0,0 +1,55 @@
+/*-
+ *   Copyright(c) 2017 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ * 	* Redistributions of source code must retain the above copyright
+ * 	notice, this list of conditions and the following disclaimer.
+ * 	* Redistributions in binary form must reproduce the above copyright
+ * 	notice, this list of conditions and the following disclaimer in the
+ * 	documentation and/or other materials provided with the distribution.
+ * 	* Neither the name of the copyright holder nor the names of its
+ * 	contributors may be used to endorse or promote products derived from
+ * 	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CCP_PCI_H_
+#define _CCP_PCI_H_
+
+#include <stdint.h>
+
+#include <rte_pci.h>
+
+#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
+#define PROC_MODULES "/proc/modules"
+
+int ccp_check_pci_uio_module(void);
+
+int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+			      uint8_t *bus, uint8_t *devid, uint8_t *function);
+
+int ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val);
+
+int ccp_pci_parse_sysfs_resource(const char *filename,
+				 struct rte_pci_device *dev);
+
+int ccp_find_uio_devname(const char *dirname);
+
+int ccp_bind_uio(int idx, const char *dirname, struct rte_pci_id *id);
+
+#endif /* _CCP_PCI_H_ */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
new file mode 100644
index 0000000..02080a5
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -0,0 +1,645 @@
+/*-
+ *   Copyright(c) 2017 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ * 	* Redistributions of source code must retain the above copyright
+ * 	notice, this list of conditions and the following disclaimer.
+ * 	* Redistributions in binary form must reproduce the above copyright
+ * 	notice, this list of conditions and the following disclaimer in the
+ * 	documentation and/or other materials provided with the distribution.
+ * 	* Neither the name of the copyright holder nor the names of its
+ * 	contributors may be used to endorse or promote products derived from
+ * 	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_malloc.h>
+
+#include <ccp_pmd_private.h>
+#include <ccp_dev.h>
+#include <ccp_crypto.h>
+
+static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+	{	/* SHA1 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA1,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 20,
+					 .max = 20,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 20,
+					 .max = 20,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA224 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA224,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA224 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA256 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA256,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA384 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA384,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 1,
+					 .max = 128,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA512  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA512,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 1,
+					 .max = 128,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{       /* AES ECB */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_ECB,
+				.block_size = 16,
+				.key_size = {
+				   .min = 16,
+				   .max = 32,
+				   .increment = 8
+				},
+				.iv_size = {
+				   .min = 0,
+				   .max = 0,
+				   .increment = 0
+				}
+			}, }
+		}, }
+	},
+	{       /* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* 3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 16,
+					.max = 24,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{       /* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				 .algo = RTE_CRYPTO_AEAD_AES_GCM,
+				 .block_size = 16,
+				 .key_size = {
+					 .min = 16,
+					 .max = 32,
+					 .increment = 8
+				 },
+				 .digest_size = {
+					 .min = 16,
+					 .max = 16,
+					 .increment = 0
+				 },
+				 .aad_size = {
+					 .min = 0,
+					 .max = 65535,
+					 .increment = 1
+				 },
+				 .iv_size = {
+					 .min = 12,
+					 .max = 16,
+					 .increment = 4
+				 },
+			}, }
+		}, }
+	},
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+ccp_pmd_config(struct rte_cryptodev *dev __rte_unused,
+	       struct rte_cryptodev_config *config __rte_unused)
+{
+	return 0;
+}
+
+static int
+ccp_pmd_start(struct rte_cryptodev *dev)
+{
+	return ccp_dev_start(dev);
+}
+
+static void
+ccp_pmd_stop(struct rte_cryptodev *dev __rte_unused)
+{
+
+}
+
+static int
+ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
+{
+	return 0;
+}
+
+static void
+ccp_pmd_stats_get(struct rte_cryptodev *dev,
+		  struct rte_cryptodev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+		stats->enqueued_count += qp->qp_stats.enqueued_count;
+		stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+	}
+
+}
+
+static void
+ccp_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	}
+}
+
+static void
+ccp_pmd_info_get(struct rte_cryptodev *dev,
+		 struct rte_cryptodev_info *dev_info)
+{
+	struct ccp_private *internals = dev->data->dev_private;
+
+	if (dev_info != NULL) {
+		dev_info->driver_id = dev->driver_id;
+		dev_info->feature_flags = dev->feature_flags;
+		dev_info->capabilities = ccp_pmd_capabilities;
+		dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+		dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+	}
+}
+
+static int
+ccp_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+	if (dev->data->queue_pairs[qp_id] != NULL) {
+		rte_free(dev->data->queue_pairs[qp_id]);
+		dev->data->queue_pairs[qp_id] = NULL;
+	}
+	return 0;
+}
+
+static int
+ccp_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+		struct ccp_qp *qp)
+{
+	unsigned int n = snprintf(qp->name, sizeof(qp->name),
+			"ccp_pmd_%u_qp_%u",
+			dev->data->dev_id, qp->id);
+
+	if (n > sizeof(qp->name))
+		return -1;
+
+	return 0;
+}
+
+static struct rte_ring *
+ccp_pmd_qp_create_batch_info_ring(struct ccp_qp *qp,
+				  unsigned int ring_size, int socket_id)
+{
+	struct rte_ring *r;
+
+	r = rte_ring_lookup(qp->name);
+	if (r) {
+		if (r->size >= ring_size) {
+			CCP_LOG_INFO(
+				"Reusing ring %s for processed packets",
+				qp->name);
+			return r;
+		}
+		CCP_LOG_INFO(
+			"Unable to reuse ring %s for processed packets",
+			 qp->name);
+		return NULL;
+	}
+
+	return rte_ring_create(qp->name, ring_size, socket_id,
+			RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+static int
+ccp_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+		 const struct rte_cryptodev_qp_conf *qp_conf,
+		 int socket_id, struct rte_mempool *session_pool)
+{
+	struct ccp_private *internals = dev->data->dev_private;
+	struct ccp_qp *qp;
+	int retval = 0;
+
+	if (qp_id >= internals->max_nb_qpairs) {
+		CCP_LOG_ERR("Invalid qp_id %u, should be less than %u",
+			    qp_id, internals->max_nb_qpairs);
+		return (-EINVAL);
+	}
+
+	/* Free memory prior to re-allocation if needed. */
+	if (dev->data->queue_pairs[qp_id] != NULL)
+		ccp_pmd_qp_release(dev, qp_id);
+
+	/* Allocate the queue pair data structure. */
+	qp = rte_zmalloc_socket("CCP Crypto PMD Queue Pair", sizeof(*qp),
+					RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp == NULL) {
+		CCP_LOG_ERR("Failed to allocate queue pair memory");
+		return (-ENOMEM);
+	}
+
+	qp->dev = dev;
+	qp->id = qp_id;
+	dev->data->queue_pairs[qp_id] = qp;
+
+	retval = ccp_pmd_qp_set_unique_name(dev, qp);
+	if (retval) {
+		CCP_LOG_ERR("Failed to create unique name for ccp qp");
+		goto qp_setup_cleanup;
+	}
+
+	qp->processed_pkts = ccp_pmd_qp_create_batch_info_ring(qp,
+			qp_conf->nb_descriptors, socket_id);
+	if (qp->processed_pkts == NULL) {
+		CCP_LOG_ERR("Failed to create batch info ring");
+		goto qp_setup_cleanup;
+	}
+
+	qp->sess_mp = session_pool;
+
+	/* mempool for batch info*/
+	qp->batch_mp = rte_mempool_create(
+				qp->name,
+				qp_conf->nb_descriptors,
+				sizeof(struct ccp_batch_info),
+				RTE_CACHE_LINE_SIZE,
+				0, NULL, NULL, NULL, NULL,
+				SOCKET_ID_ANY, 0);
+	if (qp->batch_mp == NULL)
+		goto qp_setup_cleanup;
+	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	return 0;
+
+qp_setup_cleanup:
+	dev->data->queue_pairs[qp_id] = NULL;
+	if (qp)
+		rte_free(qp);
+	return -1;
+}
+
+static int
+ccp_pmd_qp_start(struct rte_cryptodev *dev __rte_unused,
+		 uint16_t queue_pair_id __rte_unused)
+{
+	return -ENOTSUP;
+}
+
+static int
+ccp_pmd_qp_stop(struct rte_cryptodev *dev __rte_unused,
+		uint16_t queue_pair_id __rte_unused)
+{
+	return -ENOTSUP;
+}
+
+static uint32_t
+ccp_pmd_qp_count(struct rte_cryptodev *dev)
+{
+	return dev->data->nb_queue_pairs;
+}
+
+static unsigned
+ccp_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct ccp_session);
+}
+
+static int
+ccp_pmd_session_configure(struct rte_cryptodev *dev,
+			  struct rte_crypto_sym_xform *xform,
+			  struct rte_cryptodev_sym_session *sess,
+			  struct rte_mempool *mempool)
+{
+	int ret;
+	void *sess_private_data;
+
+	if (unlikely(sess == NULL || xform == NULL)) {
+		CCP_LOG_ERR("Invalid session struct or xform");
+		return -ENOMEM;
+	}
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		CCP_LOG_ERR("Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+	ret = ccp_set_session_parameters(sess_private_data, xform);
+	if (ret != 0) {
+		CCP_LOG_ERR("failed configure session parameters");
+
+		/* Return session to mempool */
+		rte_mempool_put(mempool, sess_private_data);
+		return ret;
+	}
+	set_session_private_data(sess, dev->driver_id,
+				 sess_private_data);
+
+	return 0;
+}
+
+static void
+ccp_pmd_session_clear(struct rte_cryptodev *dev,
+		      struct rte_cryptodev_sym_session *sess)
+{
+	uint8_t index = dev->driver_id;
+	void *sess_priv = get_session_private_data(sess, index);
+
+	if (sess_priv) {
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+		rte_mempool_put(sess_mp, sess_priv);
+		memset(sess_priv, 0, sizeof(struct ccp_session));
+		set_session_private_data(sess, index, NULL);
+	}
+}
+
+struct rte_cryptodev_ops ccp_ops = {
+		.dev_configure		= ccp_pmd_config,
+		.dev_start		= ccp_pmd_start,
+		.dev_stop		= ccp_pmd_stop,
+		.dev_close		= ccp_pmd_close,
+
+		.stats_get		= ccp_pmd_stats_get,
+		.stats_reset		= ccp_pmd_stats_reset,
+
+		.dev_infos_get		= ccp_pmd_info_get,
+
+		.queue_pair_setup	= ccp_pmd_qp_setup,
+		.queue_pair_release	= ccp_pmd_qp_release,
+		.queue_pair_start	= ccp_pmd_qp_start,
+		.queue_pair_stop	= ccp_pmd_qp_stop,
+		.queue_pair_count	= ccp_pmd_qp_count,
+
+		.session_get_size	= ccp_pmd_session_get_size,
+		.session_configure	= ccp_pmd_session_configure,
+		.session_clear		= ccp_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops;
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
new file mode 100644
index 0000000..95433d7
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -0,0 +1,128 @@
+/*-
+ *   Copyright(c) 2017 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ * 	* Redistributions of source code must retain the above copyright
+ * 	notice, this list of conditions and the following disclaimer.
+ * 	* Redistributions in binary form must reproduce the above copyright
+ * 	notice, this list of conditions and the following disclaimer in the
+ * 	documentation and/or other materials provided with the distribution.
+ * 	* Neither the name of the copyright holder nor the names of its
+ * 	contributors may be used to endorse or promote products derived from
+ * 	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CCP_PMD_PRIVATE_H_
+#define _CCP_PMD_PRIVATE_H_
+
+#include <rte_config.h>
+#include <rte_cryptodev.h>
+
+#define CRYPTODEV_NAME_CCP_PMD crypto_ccp
+
+#define CCP_LOG_ERR(fmt, args...) \
+	RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",  \
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+			__func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_CCP_DEBUG
+#define CCP_LOG_INFO(fmt, args...) \
+	RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+			__func__, __LINE__, ## args)
+
+#define CCP_LOG_DBG(fmt, args...) \
+	RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+			__func__, __LINE__, ## args)
+#else
+#define CCP_LOG_INFO(fmt, args...)
+#define CCP_LOG_DBG(fmt, args...)
+#endif
+
+/**< Maximum queue pairs supported by CCP PMD */
+#define CCP_PMD_MAX_QUEUE_PAIRS	1
+#define CCP_NB_MAX_DESCRIPTORS 1024
+#define CCP_MAX_BURST 64
+
+#include <ccp_dev.h>
+
+/* private data structure for each CCP crypto device */
+struct ccp_private {
+	unsigned int max_nb_qpairs;	/**< Max number of queue pairs */
+	unsigned int max_nb_sessions;	/**< Max number of sessions */
+	uint8_t crypto_num_dev;		/**< Number of working crypto devices */
+	struct ccp_device *last_dev;	/**< Last working crypto device */
+};
+
+/* CCP batch info */
+struct ccp_batch_info {
+	struct rte_crypto_op *op[CCP_MAX_BURST];
+	/**< optable populated at enque time from app*/
+	int op_idx;
+	struct ccp_queue *cmd_q;
+	uint16_t opcnt;
+	/**<#  of crypto ops in batch*/
+	int desccnt;
+	/**<# of ccp queue descriptors*/
+	uint32_t head_offset;
+	/**< ccp queue head tail offsets time of enqueue*/
+	uint32_t tail_offset;
+	uint8_t lsb_buf[CCP_SB_BYTES * CCP_MAX_BURST];
+	phys_addr_t lsb_buf_phys;
+	/**< LSB intermediate buf for passthru */
+	int lsb_buf_idx;
+} __rte_cache_aligned;
+
+/**< CCP crypto queue pair */
+struct ccp_qp {
+	uint16_t id;
+	/**< Queue Pair Identifier */
+	char name[RTE_CRYPTODEV_NAME_LEN];
+	/**< Unique Queue Pair Name */
+	struct rte_ring *processed_pkts;
+	/**< Ring for placing process packets */
+	struct rte_mempool *sess_mp;
+	/**< Session Mempool */
+	struct rte_mempool *batch_mp;
+	/**< Session Mempool for batch info */
+	struct rte_cryptodev_stats qp_stats;
+	/**< Queue pair statistics */
+	struct ccp_batch_info *b_info;
+	/**< Store ops pulled out of queue */
+	struct rte_cryptodev *dev;
+	/**< rte crypto device to which this qp belongs */
+} __rte_cache_aligned;
+
+
+/**< device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *ccp_pmd_ops;
+
+
+extern struct rte_cryptodev_ops *ccp_cpu_pmd_ops;
+uint16_t
+ccp_cpu_pmd_enqueue_burst(void *queue_pair,
+			  struct rte_crypto_op **ops,
+			  uint16_t nb_ops);
+uint16_t
+ccp_cpu_pmd_dequeue_burst(void *queue_pair,
+			  struct rte_crypto_op **ops,
+			  uint16_t nb_ops);
+
+#endif /* _CCP_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
new file mode 100644
index 0000000..9410470
--- /dev/null
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -0,0 +1,281 @@
+/*-
+ *   Copyright(c) 2017 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ * 	* Redistributions of source code must retain the above copyright
+ * 	notice, this list of conditions and the following disclaimer.
+ * 	* Redistributions in binary form must reproduce the above copyright
+ * 	notice, this list of conditions and the following disclaimer in the
+ * 	documentation and/or other materials provided with the distribution.
+ * 	* Neither the name of the copyright holder nor the names of its
+ * 	contributors may be used to endorse or promote products derived from
+ * 	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
+#include <rte_vdev.h>
+#include <rte_malloc.h>
+
+#include <ccp_crypto.h>
+#include <ccp_dev.h>
+#include <ccp_pmd_private.h>
+
+/**
+ * Global static parameter used to find if CCP device is already initialized.
+ */
+static unsigned int ccp_pmd_init_done;
+uint8_t cryptodev_driver_id;
+
+static struct ccp_session *
+get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op)
+{
+	struct ccp_session *sess = NULL;
+
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		if (unlikely(op->sym->session == NULL))
+			return NULL;
+
+		sess = (struct ccp_session *)
+			get_session_private_data(
+				op->sym->session,
+				cryptodev_driver_id);
+	} else {
+		void *_sess;
+		void *_sess_private_data = NULL;
+
+		if (rte_mempool_get(qp->sess_mp, &_sess))
+			return NULL;
+		if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+			return NULL;
+
+		sess = (struct ccp_session *)_sess_private_data;
+
+		if (unlikely(ccp_set_session_parameters(sess,
+							op->sym->xform) != 0)) {
+			rte_mempool_put(qp->sess_mp, _sess);
+			rte_mempool_put(qp->sess_mp, _sess_private_data);
+			sess = NULL;
+		}
+		op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+		set_session_private_data(op->sym->session, cryptodev_driver_id,
+					 _sess_private_data);
+	}
+
+	return sess;
+}
+
+static uint16_t
+ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		      uint16_t nb_ops)
+{
+	struct ccp_session *sess = NULL;
+	struct ccp_qp *qp = queue_pair;
+	struct ccp_queue *cmd_q;
+	struct rte_cryptodev *dev = qp->dev;
+	int i, enq_cnt, slots_req = 0;
+
+	if (nb_ops == 0)
+		return 0;
+
+	if (unlikely(rte_ring_full(qp->processed_pkts) != 0))
+		return 0;
+
+	for (i = 0; i < nb_ops; i++) {
+		sess = get_ccp_session(qp, ops[i]);
+		if (unlikely(sess == NULL) && (i == 0)) {
+			qp->qp_stats.enqueue_err_count++;
+			return 0;
+		} else if (sess == NULL) {
+			nb_ops = i;
+			break;
+		}
+		slots_req += ccp_compute_slot_count(sess);
+	}
+
+	cmd_q = ccp_allot_queue(dev, slots_req);
+	if (unlikely(cmd_q == NULL))
+		return 0;
+
+	enq_cnt = process_ops_to_enqueue(qp, ops, cmd_q, nb_ops, slots_req);
+	qp->qp_stats.enqueued_count += enq_cnt;
+	return enq_cnt;
+}
+
+static uint16_t
+ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	struct ccp_qp *qp = queue_pair;
+	unsigned int nb_dequeued, i;
+
+	nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops);
+
+	/* Free session if a session-less crypto op. */
+	for (i = 0; i < nb_dequeued; i++)
+		if (unlikely(ops[i]->sess_type ==
+			     RTE_CRYPTO_OP_SESSIONLESS)) {
+			rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
+			ops[i]->sym->session = NULL;
+		}
+	qp->qp_stats.dequeued_count += nb_dequeued;
+
+	return nb_dequeued;
+}
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id ccp_pci_id[] = {
+	{
+		RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */
+	},
+	{
+		RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */
+	},
+	{.device_id = 0},
+};
+
+/** Remove ccp pmd */
+static int
+cryptodev_ccp_remove(struct rte_vdev_device *dev)
+{
+	const char *name;
+
+	ccp_pmd_init_done = 0;
+	name = rte_vdev_device_name(dev);
+	if (name == NULL)
+		return -EINVAL;
+
+	RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n",
+			name, rte_socket_id());
+
+	return 0;
+}
+
+/** Create crypto device */
+static int
+cryptodev_ccp_create(const char *name,
+		     struct rte_vdev_device *vdev,
+		     struct rte_crypto_vdev_init_params *init_params)
+{
+	struct rte_cryptodev *dev;
+	struct ccp_private *internals;
+	uint8_t cryptodev_cnt = 0;
+
+	if (init_params->name[0] == '\0')
+		snprintf(init_params->name, sizeof(init_params->name),
+				"%s", name);
+
+	dev = rte_cryptodev_vdev_pmd_init(init_params->name,
+			sizeof(struct ccp_private),
+			init_params->socket_id,
+			vdev);
+	if (dev == NULL) {
+		CCP_LOG_ERR("failed to create cryptodev vdev");
+		goto init_error;
+	}
+
+	cryptodev_cnt = ccp_probe_devices(ccp_pci_id);
+
+	if (cryptodev_cnt == 0) {
+		CCP_LOG_ERR("failed to detect CCP crypto device");
+		goto init_error;
+	}
+
+	printf("CCP : Crypto device count = %d\n", cryptodev_cnt);
+	dev->driver_id = cryptodev_driver_id;
+
+	/* register rx/tx burst functions for data path */
+	dev->dev_ops = ccp_pmd_ops;
+	dev->enqueue_burst = ccp_pmd_enqueue_burst;
+	dev->dequeue_burst = ccp_pmd_dequeue_burst;
+
+	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+			RTE_CRYPTODEV_FF_HW_ACCELERATED |
+			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+	internals = dev->data->dev_private;
+
+	internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+	internals->max_nb_sessions = init_params->max_nb_sessions;
+	internals->crypto_num_dev = cryptodev_cnt;
+
+	return 0;
+
+init_error:
+	CCP_LOG_ERR("driver %s: cryptodev_ccp_create failed",
+		    init_params->name);
+	cryptodev_ccp_remove(vdev);
+
+	return -EFAULT;
+}
+
+/** Probe ccp pmd */
+static int
+cryptodev_ccp_probe(struct rte_vdev_device *vdev)
+{
+	int rc = 0;
+	const char *name;
+	struct rte_crypto_vdev_init_params init_params = {
+		CCP_PMD_MAX_QUEUE_PAIRS,
+		RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+		rte_socket_id(),
+		{0}
+	};
+	const char *input_args;
+
+	if (ccp_pmd_init_done) {
+		RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
+		return -EFAULT;
+	}
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	input_args = rte_vdev_device_args(vdev);
+	rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
+	init_params.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS;
+
+	RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+			init_params.socket_id);
+	RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n",
+			init_params.max_nb_queue_pairs);
+	RTE_LOG(INFO, PMD, "Max number of sessions = %d\n",
+			init_params.max_nb_sessions);
+
+	rc = cryptodev_ccp_create(name, vdev, &init_params);
+	if (rc)
+		return rc;
+	ccp_pmd_init_done = 1;
+	return 0;
+}
+
+static struct rte_vdev_driver cryptodev_ccp_pmd_drv = {
+	.probe = cryptodev_ccp_probe,
+	.remove = cryptodev_ccp_remove
+};
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD,
+	"max_nb_queue_pairs=<int> max_nb_sessions=<int> socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_ccp_pmd_drv, cryptodev_driver_id);
diff --git a/drivers/crypto/ccp/rte_pmd_ccp_version.map b/drivers/crypto/ccp/rte_pmd_ccp_version.map
new file mode 100644
index 0000000..a70bd19
--- /dev/null
+++ b/drivers/crypto/ccp/rte_pmd_ccp_version.map
@@ -0,0 +1,4 @@
+DPDK_17.11 {
+
+	local: *;
+};
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH 03/11] crypto: add macros for AES-CMAC and SHA3
  2017-11-30 13:12 [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD Ravi Kumar
  2017-11-30 13:12 ` [PATCH 02/11] crypto/ccp: add " Ravi Kumar
@ 2017-11-30 13:12 ` Ravi Kumar
  2017-11-30 13:12 ` [PATCH 04/11] crypto/ccp: add support for AES-CMAC Ravi Kumar
                   ` (9 subsequent siblings)
  11 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2017-11-30 13:12 UTC (permalink / raw)
  To: dev

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 doc/guides/cryptodevs/features/default.ini | 12 ++++++++++++
 lib/librte_cryptodev/rte_crypto_sym.h      | 18 ++++++++++++++++++
 2 files changed, 30 insertions(+)

diff --git a/doc/guides/cryptodevs/features/default.ini b/doc/guides/cryptodevs/features/default.ini
index 18d66cb..cc2cec4 100644
--- a/doc/guides/cryptodevs/features/default.ini
+++ b/doc/guides/cryptodevs/features/default.ini
@@ -27,6 +27,9 @@ NULL           =
 AES CBC (128)  =
 AES CBC (192)  =
 AES CBC (256)  =
+AES ECB (128)  =
+AES ECB (192)  =
+AES ECB (256)  =
 AES CTR (128)  =
 AES CTR (192)  =
 AES CTR (256)  =
@@ -61,6 +64,15 @@ AES GMAC     =
 SNOW3G UIA2  =
 KASUMI F9    =
 ZUC EIA3     =
+AES CMAC     =
+SHA3_224     =
+SHA3_224 HMAC=
+SHA3_256     =
+SHA3_256 HMAC=
+SHA3_384     =
+SHA3_384 HMAC=
+SHA3_512     =
+SHA3_512 HMAC=
 
 ;
 ; Supported AEAD algorithms of a default crypto driver.
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index c981f0b..03e154f 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -273,6 +273,24 @@ enum rte_crypto_auth_algorithm {
 	RTE_CRYPTO_AUTH_ZUC_EIA3,
 	/**< ZUC algorithm in EIA3 mode */
 
+	/**< SHA3 algorithm support*/
+	RTE_CRYPTO_AUTH_SHA3_224,
+	/**< 224 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+	/**< HMAC using 224 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_256,
+	/**< 256 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+	/**< HMAC using 256 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_384,
+	/**< 384 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+	/**< HMAC using 384 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_512,
+	/**< 512 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+	/**< HMAC using 512 bit SHA3 algorithm. */
+
 	RTE_CRYPTO_AUTH_LIST_END
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH 04/11] crypto/ccp: add support for AES-CMAC
  2017-11-30 13:12 [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD Ravi Kumar
  2017-11-30 13:12 ` [PATCH 02/11] crypto/ccp: add " Ravi Kumar
  2017-11-30 13:12 ` [PATCH 03/11] crypto: add macros for AES-CMAC and SHA3 Ravi Kumar
@ 2017-11-30 13:12 ` Ravi Kumar
  2017-11-30 13:12 ` [PATCH 05/11] crypto/ccp: add support for CPU based authentication Ravi Kumar
                   ` (8 subsequent siblings)
  11 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2017-11-30 13:12 UTC (permalink / raw)
  To: dev

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 272 +++++++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_crypto.h  |   3 +
 drivers/crypto/ccp/ccp_dev.c     |   1 +
 drivers/crypto/ccp/ccp_pmd_ops.c |  21 +++
 4 files changed, 297 insertions(+)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 68e1169..4d71ec1 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -51,6 +51,7 @@
 #include <ccp_pmd_private.h>
 
 #include <openssl/sha.h> /*partial hash apis*/
+#include <openssl/cmac.h> /*sub key apis*/
 #include <openssl/evp.h> /*sub key apis*/
 
 /* SHA initial context values */
@@ -276,6 +277,84 @@ static int generate_partial_hash(struct ccp_session *sess)
 	}
 }
 
+/* prepare temporary keys K1 and K2 */
+static void prepare_key(unsigned char *k, unsigned char *l, int bl)
+{
+	int i;
+	/* Shift block to left, including carry */
+	for (i = 0; i < bl; i++) {
+		k[i] = l[i] << 1;
+		if (i < bl - 1 && l[i + 1] & 0x80)
+			k[i] |= 1;
+	}
+	/* If MSB set fixup with R */
+	if (l[0] & 0x80)
+		k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b;
+}
+
+/**subkeys K1 and K2 generation for CMAC*/
+static int
+generate_cmac_subkeys(struct ccp_session *sess)
+{
+	const EVP_CIPHER *algo;
+	EVP_CIPHER_CTX *ctx;
+	unsigned char *ccp_ctx;
+	size_t i;
+	int dstlen, totlen;
+	unsigned char zero_iv[AES_BLOCK_SIZE] = {0};
+	unsigned char dst[2 * AES_BLOCK_SIZE] = {0};
+	unsigned char k1[AES_BLOCK_SIZE] = {0};
+	unsigned char k2[AES_BLOCK_SIZE] = {0};
+
+	if (sess->auth.ut.aes_type == CCP_AES_TYPE_128)
+		algo =  EVP_aes_128_cbc();
+	else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192)
+		algo =  EVP_aes_192_cbc();
+	else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256)
+		algo =  EVP_aes_256_cbc();
+	else {
+		CCP_LOG_ERR("Invalid CMAC type length");
+		return -1;
+	}
+
+	ctx = EVP_CIPHER_CTX_new();
+	if (!ctx) {
+		CCP_LOG_ERR("ctx creation failed");
+		return -1;
+	}
+	if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key,
+			    (unsigned char *)zero_iv) <= 0)
+		goto key_generate_err;
+	if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
+		goto key_generate_err;
+	if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv,
+			      AES_BLOCK_SIZE) <= 0)
+		goto key_generate_err;
+	if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
+		goto key_generate_err;
+
+	memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2);
+
+	ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1);
+	prepare_key(k1, dst, AES_BLOCK_SIZE);
+	for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
+		*ccp_ctx = k1[i];
+
+	ccp_ctx = (unsigned char *)(sess->auth.pre_compute +
+				   (2 * CCP_SB_BYTES) - 1);
+	prepare_key(k2, k1, AES_BLOCK_SIZE);
+	for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
+		*ccp_ctx = k2[i];
+
+	EVP_CIPHER_CTX_free(ctx);
+
+	return 0;
+
+key_generate_err:
+	CCP_LOG_ERR("CMAC Init failed");
+		return -1;
+}
+
 /**configure session*/
 static int
 ccp_configure_session_cipher(struct ccp_session *sess,
@@ -373,6 +452,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   const struct rte_crypto_sym_xform *xform)
 {
 	const struct rte_crypto_auth_xform *auth_xform = NULL;
+	size_t i;
 
 	auth_xform = &xform->auth;
 
@@ -507,6 +587,33 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
+		sess->auth.engine = CCP_ENGINE_AES;
+		sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
+		sess->auth.key_length = auth_xform->key.length;
+		/**<padding and hash result*/
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = AES_BLOCK_SIZE;
+		sess->auth.block_size = AES_BLOCK_SIZE;
+		if (sess->auth.key_length == 16)
+			sess->auth.ut.aes_type = CCP_AES_TYPE_128;
+		else if (sess->auth.key_length == 24)
+			sess->auth.ut.aes_type = CCP_AES_TYPE_192;
+		else if (sess->auth.key_length == 32)
+			sess->auth.ut.aes_type = CCP_AES_TYPE_256;
+		else {
+			CCP_LOG_ERR("Invalid CMAC key length");
+			return -1;
+		}
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   sess->auth.key_length);
+		for (i = 0; i < sess->auth.key_length; i++)
+			sess->auth.key_ccp[sess->auth.key_length - i - 1] =
+				sess->auth.key[i];
+		if (generate_cmac_subkeys(sess))
+			return -1;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported hash algo");
 		return -1;
@@ -697,6 +804,15 @@ ccp_auth_slot(struct ccp_session *session)
 		 * 6. Retrieve HMAC output from LSB to host memory
 		 */
 		break;
+	case CCP_AUTH_ALGO_AES_CMAC:
+		count = 4;
+		/**
+		 * op
+		 * extra descriptor in padding case
+		 * (k1/k2(255:128) with iv(127:0))
+		 * Retrieve result
+		 */
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported ALGO %d", session->cipher.algo);
 	}
@@ -1077,6 +1193,158 @@ ccp_perform_sha(struct rte_crypto_op *op,
 }
 
 static int
+ccp_perform_aes_cmac(struct rte_crypto_op *op,
+		     struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_passthru pst;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	uint8_t *src_tb, *append_ptr, *ctx_addr;
+	phys_addr_t src_addr, dest_addr, key_addr;
+	int length, non_align_len;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+	key_addr = rte_mem_virt2phy(session->auth.key_ccp);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
+	CCP_AES_MODE(&function) = session->auth.um.aes_mode;
+	CCP_AES_TYPE(&function) = session->auth.ut.aes_type;
+
+	if (op->sym->auth.data.length % session->auth.block_size == 0) {
+
+		ctx_addr = session->auth.pre_compute;
+		memset(ctx_addr, 0, AES_BLOCK_SIZE);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		desc = &cmd_q->qbase_desc[cmd_q->qidx];
+		memset(desc, 0, Q_DESC_SIZE);
+
+		/* prepare desc for aes-cmac command */
+		CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+		CCP_CMD_EOM(desc) = 1;
+		CCP_CMD_FUNCTION(desc) = function.raw;
+
+		CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+		CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+		CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+		CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+		CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+		cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+		rte_wmb();
+
+		tail =
+		(uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol | CMD_Q_RUN);
+	} else {
+		ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
+		memset(ctx_addr, 0, AES_BLOCK_SIZE);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		length = (op->sym->auth.data.length / AES_BLOCK_SIZE);
+		length *= AES_BLOCK_SIZE;
+		non_align_len = op->sym->auth.data.length - length;
+		/* prepare desc for aes-cmac command */
+		/*Command 1*/
+		desc = &cmd_q->qbase_desc[cmd_q->qidx];
+		memset(desc, 0, Q_DESC_SIZE);
+
+		CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+		CCP_CMD_INIT(desc) = 1;
+		CCP_CMD_FUNCTION(desc) = function.raw;
+
+		CCP_CMD_LEN(desc) = length;
+		CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+		CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+		CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+		CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+		cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+		/*Command 2*/
+		append_ptr = append_ptr + CCP_SB_BYTES;
+		memset(append_ptr, 0, AES_BLOCK_SIZE);
+		src_tb = rte_pktmbuf_mtod_offset(op->sym->m_src,
+						 uint8_t *,
+						 op->sym->auth.data.offset +
+						 length);
+		rte_memcpy(append_ptr, src_tb, non_align_len);
+		append_ptr[non_align_len] = CMAC_PAD_VALUE;
+
+		desc = &cmd_q->qbase_desc[cmd_q->qidx];
+		memset(desc, 0, Q_DESC_SIZE);
+
+		CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+		CCP_CMD_EOM(desc) = 1;
+		CCP_CMD_FUNCTION(desc) = function.raw;
+		CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+		CCP_CMD_SRC_LO(desc) = ((uint32_t)(dest_addr + CCP_SB_BYTES));
+		CCP_CMD_SRC_HI(desc) = high32_value(dest_addr + CCP_SB_BYTES);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+		CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+		CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+		cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+		rte_wmb();
+		tail =
+		(uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol | CMD_Q_RUN);
+	}
+	/* Retrieve result */
+	pst.dest_addr = dest_addr;
+	pst.src_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+	pst.len = CCP_SB_BYTES;
+	pst.dir = 0;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
 ccp_perform_aes(struct rte_crypto_op *op,
 		struct ccp_queue *cmd_q,
 		struct ccp_batch_info *b_info)
@@ -1513,6 +1781,10 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 		result = ccp_perform_hmac(op, cmd_q);
 		b_info->desccnt += 7;
 		break;
+	case CCP_AUTH_ALGO_AES_CMAC:
+		result = ccp_perform_aes_cmac(op, cmd_q);
+		b_info->desccnt += 4;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported Cipher algo");
 		result = -1;
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 675b5ae..21cc99f 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -47,6 +47,7 @@
 #include <ccp_dev.h>
 
 #define AES_BLOCK_SIZE 16
+#define CMAC_PAD_VALUE 0x80
 #define CTR_NONCE_SIZE 4
 #define CTR_IV_SIZE 8
 #define CCP_SHA_CTX_SIZE 200
@@ -233,6 +234,7 @@ enum ccp_hash_algo {
 	CCP_AUTH_ALGO_SHA384_HMAC,
 	CCP_AUTH_ALGO_SHA512,
 	CCP_AUTH_ALGO_SHA512_HMAC,
+	CCP_AUTH_ALGO_AES_CMAC,
 	CCP_AUTH_ALGO_AES_GCM,
 };
 
@@ -301,6 +303,7 @@ struct ccp_session {
 		int block_size;
 		/**<Buffer to store  Software generated precomute values*/
 		/**< For HMAC H(ipad ^ key) and H(opad ^ key) */
+		/**< For CMAC K1 IV and K2 IV*/
 		uint8_t pre_compute[2 * CCP_SHA_CTX_SIZE];
 		int aad_length;
 	} auth;
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index bc26020..af4d2b0 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -50,6 +50,7 @@
 #include <ccp_pmd_private.h>
 
 #include <openssl/sha.h> /*partial hash apis*/
+#include <openssl/cmac.h> /*sub key apis*/
 #include <openssl/evp.h> /*sub key apis*/
 
 struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 02080a5..3a5e03c 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -249,6 +249,27 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/*AES-CMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				 .block_size = 16,
+				 .key_size = {
+					 .min = 16,
+					 .max = 32,
+					 .increment = 8
+				 },
+				 .digest_size = {
+					 .min = 16,
+					 .max = 16,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			}, }
+		}, }
+	},
 	{       /* AES ECB */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH 05/11] crypto/ccp: add support for CPU based authentication
  2017-11-30 13:12 [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD Ravi Kumar
                   ` (2 preceding siblings ...)
  2017-11-30 13:12 ` [PATCH 04/11] crypto/ccp: add support for AES-CMAC Ravi Kumar
@ 2017-11-30 13:12 ` Ravi Kumar
  2017-12-11 19:40   ` De Lara Guarch, Pablo
  2017-11-30 13:12 ` [PATCH 06/11] crypto/ccp: add support for SHA3 family authentication Ravi Kumar
                   ` (7 subsequent siblings)
  11 siblings, 1 reply; 131+ messages in thread
From: Ravi Kumar @ 2017-11-30 13:12 UTC (permalink / raw)
  To: dev

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 config/common_base                   |   1 +
 drivers/crypto/ccp/ccp_crypto.c      | 264 +++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_crypto.h      |   8 ++
 drivers/crypto/ccp/ccp_pmd_ops.c     |  23 +++
 drivers/crypto/ccp/ccp_pmd_private.h |   4 +
 5 files changed, 300 insertions(+)

diff --git a/config/common_base b/config/common_base
index 88826c8..2974581 100644
--- a/config/common_base
+++ b/config/common_base
@@ -560,6 +560,7 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
 # Compile PMD for AMD CCP crypto device
 #
 CONFIG_RTE_LIBRTE_PMD_CCP=n
+CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=n
 
 #
 # Compile PMD for Marvell Crypto device
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 4d71ec1..1833929 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -54,6 +54,13 @@
 #include <openssl/cmac.h> /*sub key apis*/
 #include <openssl/evp.h> /*sub key apis*/
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+#include <openssl/conf.h>
+#include <openssl/err.h>
+#include <openssl/hmac.h>
+#endif
+
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
 /* SHA initial context values */
 static uint32_t ccp_sha1_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
 	SHA1_H4, SHA1_H3,
@@ -89,6 +96,7 @@ uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
 	SHA512_H3, SHA512_H2,
 	SHA512_H1, SHA512_H0,
 };
+#endif
 
 static enum ccp_cmd_order
 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
@@ -114,6 +122,7 @@ ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 	return res;
 }
 
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
 /**partial hash using openssl*/
 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
 {
@@ -354,6 +363,7 @@ generate_cmac_subkeys(struct ccp_session *sess)
 	CCP_LOG_ERR("CMAC Init failed");
 		return -1;
 }
+#endif
 
 /**configure session*/
 static int
@@ -452,7 +462,9 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   const struct rte_crypto_sym_xform *xform)
 {
 	const struct rte_crypto_auth_xform *auth_xform = NULL;
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
 	size_t i;
+#endif
 
 	auth_xform = &xform->auth;
 
@@ -461,6 +473,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.op = CCP_AUTH_OP_GENERATE;
 	else
 		sess->auth.op = CCP_AUTH_OP_VERIFY;
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
 	switch (auth_xform->algo) {
 	case RTE_CRYPTO_AUTH_SHA1:
 		sess->auth.engine = CCP_ENGINE_SHA;
@@ -618,6 +631,77 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		CCP_LOG_ERR("Unsupported hash algo");
 		return -1;
 	}
+#else
+	switch (auth_xform->algo) {
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		sess->auth.block_size = SHA1_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		sess->auth.block_size = SHA224_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		sess->auth.block_size = SHA256_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		sess->auth.block_size = SHA384_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		sess->auth.block_size = SHA512_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		sess->auth.algo = CCP_AUTH_ALGO_MD5_HMAC;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - MD5_DIGEST_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		sess->auth.block_size = MD5_BLOCK_SIZE;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported hash algo");
+		return -1;
+	}
+#endif
 	return 0;
 }
 
@@ -860,12 +944,16 @@ ccp_compute_slot_count(struct ccp_session *session)
 		count = ccp_cipher_slot(session);
 		break;
 	case CCP_CMD_AUTH:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
 		count = ccp_auth_slot(session);
+#endif
 		break;
 	case CCP_CMD_CIPHER_HASH:
 	case CCP_CMD_HASH_CIPHER:
 		count = ccp_cipher_slot(session);
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
 		count += ccp_auth_slot(session);
+#endif
 		break;
 	case CCP_CMD_COMBINED:
 		count = ccp_combined_mode_slot(session);
@@ -878,6 +966,123 @@ ccp_compute_slot_count(struct ccp_session *session)
 	return count;
 }
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+static uint8_t
+algo_select(int sessalgo,
+	    const EVP_MD **algo)
+{
+	int res = 0;
+
+	switch (sessalgo) {
+	case CCP_AUTH_ALGO_MD5_HMAC:
+		*algo = EVP_md5();
+		break;
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+		*algo = EVP_sha1();
+		break;
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+		*algo = EVP_sha224();
+		break;
+	case CCP_AUTH_ALGO_SHA256_HMAC:
+		*algo = EVP_sha256();
+		break;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+		*algo = EVP_sha384();
+		break;
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		*algo = EVP_sha512();
+		break;
+	default:
+		res = -EINVAL;
+		break;
+	}
+	return res;
+}
+
+static int
+process_cpu_auth_hmac(uint8_t *src, uint8_t *dst,
+		      __rte_unused uint8_t *iv,
+		      EVP_PKEY *pkey,
+		      int srclen,
+		      EVP_MD_CTX *ctx,
+		      const EVP_MD *algo,
+		      uint16_t d_len)
+{
+	size_t dstlen;
+	unsigned char temp_dst[64];
+
+	if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
+		goto process_auth_err;
+
+	if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
+		goto process_auth_err;
+
+	if (EVP_DigestSignFinal(ctx, temp_dst, &dstlen) <= 0)
+		goto process_auth_err;
+
+	rte_memcpy(dst, temp_dst, d_len);
+	return 0;
+process_auth_err:
+	CCP_LOG_ERR("Process cpu auth failed");
+	return -EINVAL;
+}
+
+static int cpu_crypto_auth(struct rte_crypto_op *op, struct ccp_session *sess,
+			   EVP_MD_CTX *ctx)
+{
+	uint8_t *src, *dst;
+	int srclen, status;
+	struct rte_mbuf *mbuf_src, *mbuf_dst;
+	const EVP_MD *algo = NULL;
+	EVP_PKEY *pkey;
+
+	algo_select(sess->auth.algo, &algo);
+	pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL, sess->auth.key,
+				    sess->auth.key_length);
+	mbuf_src = op->sym->m_src;
+	mbuf_dst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+	srclen = op->sym->auth.data.length;
+	src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+				      op->sym->auth.data.offset);
+
+	if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+		dst = (uint8_t *)rte_pktmbuf_append(mbuf_src,
+						    sess->auth.digest_length);
+	} else {
+		dst = op->sym->auth.digest.data;
+		if (dst == NULL) {
+			dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+						     op->sym->auth.data.offset +
+						     sess->auth.digest_length);
+		}
+	}
+	status = process_cpu_auth_hmac(src, dst, NULL,
+				       pkey, srclen,
+				       ctx,
+				       algo,
+				       sess->auth.digest_length);
+	if (status) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return status;
+	}
+
+	if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+		if (memcmp(dst, op->sym->auth.digest.data,
+			   sess->auth.digest_length) != 0) {
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+		} else {
+			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+		}
+		rte_pktmbuf_trim(mbuf_src,
+				 sess->auth.digest_length);
+	} else {
+		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	}
+	EVP_PKEY_free(pkey);
+	return 0;
+}
+#endif
+
 static void
 ccp_perform_passthru(struct ccp_passthru *pst,
 		     struct ccp_queue *cmd_q)
@@ -1831,11 +2036,22 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
 	int i, result = 0;
 	struct ccp_batch_info *b_info;
 	struct ccp_session *session;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX *auth_ctx = NULL;
+#endif
 
 	if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
 		CCP_LOG_ERR("batch info allocation failed");
 		return 0;
 	}
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	auth_ctx = EVP_MD_CTX_create();
+	if (unlikely(!auth_ctx)) {
+		CCP_LOG_ERR("Unable to create auth ctx");
+		return 0;
+	}
+	b_info->auth_ctr = 0;
+#endif
 	/* populate batch info necessary for dequeue */
 	b_info->op_idx = 0;
 	b_info->lsb_buf_idx = 0;
@@ -1856,16 +2072,29 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
 			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
 			break;
 		case CCP_CMD_AUTH:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
 			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+#else
+			b_info->auth_ctr++;
+			result = cpu_crypto_auth(op[i], session, auth_ctx);
+#endif
 			break;
 		case CCP_CMD_CIPHER_HASH:
 			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
 			if (result)
 				break;
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
 			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+#endif
 			break;
 		case CCP_CMD_HASH_CIPHER:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
 			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+#else
+			result = cpu_crypto_auth(op[i], session, auth_ctx);
+			if (op[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
+				continue;
+#endif
 			if (result)
 				break;
 			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
@@ -1899,6 +2128,9 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
 
 	rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX_destroy(auth_ctx);
+#endif
 	return i;
 }
 
@@ -1974,6 +2206,15 @@ ccp_prepare_ops(struct rte_crypto_op **op_d,
 	int i, min_ops;
 	struct ccp_session *session;
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX *auth_ctx = NULL;
+
+	auth_ctx = EVP_MD_CTX_create();
+	if (unlikely(!auth_ctx)) {
+		CCP_LOG_ERR("Unable to create auth ctx");
+		return 0;
+	}
+#endif
 	min_ops = RTE_MIN(nb_ops, b_info->opcnt);
 
 	for (i = 0; i < min_ops; i++) {
@@ -1986,8 +2227,24 @@ ccp_prepare_ops(struct rte_crypto_op **op_d,
 			op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 			break;
 		case CCP_CMD_AUTH:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			ccp_auth_dq_prepare(op_d[i]);
+#endif
+			break;
 		case CCP_CMD_CIPHER_HASH:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			cpu_crypto_auth(op_d[i], session, auth_ctx);
+#else
+			ccp_auth_dq_prepare(op_d[i]);
+#endif
+			break;
 		case CCP_CMD_HASH_CIPHER:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#else
+			ccp_auth_dq_prepare(op_d[i]);
+#endif
+			break;
 		case CCP_CMD_COMBINED:
 			ccp_auth_dq_prepare(op_d[i]);
 			break;
@@ -1996,6 +2253,9 @@ ccp_prepare_ops(struct rte_crypto_op **op_d,
 		}
 	}
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX_destroy(auth_ctx);
+#endif
 	b_info->opcnt -= min_ops;
 	return min_ops;
 }
@@ -2015,6 +2275,10 @@ process_ops_to_dequeue(struct ccp_qp *qp,
 	} else if (rte_ring_dequeue(qp->processed_pkts,
 				    (void **)&b_info))
 		return 0;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	if (b_info->auth_ctr == b_info->opcnt)
+		goto success;
+#endif
 	cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
 				       CMD_Q_HEAD_LO_BASE);
 
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 21cc99f..a350dfd 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -68,6 +68,11 @@
 #define HMAC_IPAD_VALUE 0x36
 #define HMAC_OPAD_VALUE 0x5c
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+#define MD5_DIGEST_SIZE         16
+#define MD5_BLOCK_SIZE          64
+#endif
+
 /**SHA */
 #define SHA1_DIGEST_SIZE        20
 #define SHA1_BLOCK_SIZE         64
@@ -236,6 +241,9 @@ enum ccp_hash_algo {
 	CCP_AUTH_ALGO_SHA512_HMAC,
 	CCP_AUTH_ALGO_AES_CMAC,
 	CCP_AUTH_ALGO_AES_GCM,
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	CCP_AUTH_ALGO_MD5_HMAC,
+#endif
 };
 
 /**
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 3a5e03c..e0160b3 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -39,6 +39,29 @@
 #include <ccp_crypto.h>
 
 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 16,
+					 .max = 16,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			}, }
+		}, }
+	},
+#endif
 	{	/* SHA1 */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
index 95433d7..9e288c5 100644
--- a/drivers/crypto/ccp/ccp_pmd_private.h
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -88,6 +88,10 @@ struct ccp_batch_info {
 	phys_addr_t lsb_buf_phys;
 	/**< LSB intermediate buf for passthru */
 	int lsb_buf_idx;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	uint16_t auth_ctr;
+	/**< auth only ops batch */
+#endif
 } __rte_cache_aligned;
 
 /**< CCP crypto queue pair */
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH 06/11] crypto/ccp: add support for SHA3 family authentication
  2017-11-30 13:12 [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD Ravi Kumar
                   ` (3 preceding siblings ...)
  2017-11-30 13:12 ` [PATCH 05/11] crypto/ccp: add support for CPU based authentication Ravi Kumar
@ 2017-11-30 13:12 ` Ravi Kumar
  2017-11-30 13:12 ` [PATCH 07/11] doc: add document for AMD CCP crypto PMD Ravi Kumar
                   ` (6 subsequent siblings)
  11 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2017-11-30 13:12 UTC (permalink / raw)
  To: dev

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 668 +++++++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_crypto.h  |  38 ++-
 drivers/crypto/ccp/ccp_pmd_ops.c | 168 ++++++++++
 3 files changed, 872 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 1833929..1fc6569 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -98,6 +98,74 @@ uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
 };
 #endif
 
+#if defined(_MSC_VER)
+#define SHA3_CONST(x) x
+#else
+#define SHA3_CONST(x) x##L
+#endif
+
+/** 'Words' here refers to uint64_t */
+#define SHA3_KECCAK_SPONGE_WORDS \
+	(((1600) / 8) / sizeof(uint64_t))
+typedef struct sha3_context_ {
+	uint64_t saved;
+	/**
+	 * The portion of the input message that we
+	 * didn't consume yet
+	 */
+	union {
+		uint64_t s[SHA3_KECCAK_SPONGE_WORDS];
+		/* Keccak's state */
+		uint8_t sb[SHA3_KECCAK_SPONGE_WORDS * 8];
+		/**total 200 ctx size**/
+	};
+	unsigned int byteIndex;
+	/**
+	 * 0..7--the next byte after the set one
+	 * (starts from 0; 0--none are buffered)
+	 */
+	unsigned int wordIndex;
+	/**
+	 * 0..24--the next word to integrate input
+	 * (starts from 0)
+	 */
+	unsigned int capacityWords;
+	/**
+	 * the double size of the hash output in
+	 * words (e.g. 16 for Keccak 512)
+	 */
+} sha3_context;
+
+#ifndef SHA3_ROTL64
+#define SHA3_ROTL64(x, y) \
+	(((x) << (y)) | ((x) >> ((sizeof(uint64_t)*8) - (y))))
+#endif
+
+static const uint64_t keccakf_rndc[24] = {
+	SHA3_CONST(0x0000000000000001UL), SHA3_CONST(0x0000000000008082UL),
+	SHA3_CONST(0x800000000000808aUL), SHA3_CONST(0x8000000080008000UL),
+	SHA3_CONST(0x000000000000808bUL), SHA3_CONST(0x0000000080000001UL),
+	SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008009UL),
+	SHA3_CONST(0x000000000000008aUL), SHA3_CONST(0x0000000000000088UL),
+	SHA3_CONST(0x0000000080008009UL), SHA3_CONST(0x000000008000000aUL),
+	SHA3_CONST(0x000000008000808bUL), SHA3_CONST(0x800000000000008bUL),
+	SHA3_CONST(0x8000000000008089UL), SHA3_CONST(0x8000000000008003UL),
+	SHA3_CONST(0x8000000000008002UL), SHA3_CONST(0x8000000000000080UL),
+	SHA3_CONST(0x000000000000800aUL), SHA3_CONST(0x800000008000000aUL),
+	SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008080UL),
+	SHA3_CONST(0x0000000080000001UL), SHA3_CONST(0x8000000080008008UL)
+};
+
+static const unsigned int keccakf_rotc[24] = {
+	1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62,
+	18, 39, 61, 20, 44
+};
+
+static const unsigned int keccakf_piln[24] = {
+	10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20,
+	14, 22, 9, 6, 1
+};
+
 static enum ccp_cmd_order
 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 {
@@ -182,7 +250,226 @@ static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
 		   SHA512_DIGEST_LENGTH);
 	return 0;
 }
+#endif
+
+static void
+keccakf(uint64_t s[25])
+{
+	int i, j, round;
+	uint64_t t, bc[5];
+#define KECCAK_ROUNDS 24
+
+	for (round = 0; round < KECCAK_ROUNDS; round++) {
+
+		/* Theta */
+		for (i = 0; i < 5; i++)
+			bc[i] = s[i] ^ s[i + 5] ^ s[i + 10] ^ s[i + 15] ^
+				s[i + 20];
+
+		for (i = 0; i < 5; i++) {
+			t = bc[(i + 4) % 5] ^ SHA3_ROTL64(bc[(i + 1) % 5], 1);
+			for (j = 0; j < 25; j += 5)
+				s[j + i] ^= t;
+		}
+
+		/* Rho Pi */
+		t = s[1];
+		for (i = 0; i < 24; i++) {
+			j = keccakf_piln[i];
+			bc[0] = s[j];
+			s[j] = SHA3_ROTL64(t, keccakf_rotc[i]);
+			t = bc[0];
+		}
+
+		/* Chi */
+		for (j = 0; j < 25; j += 5) {
+			for (i = 0; i < 5; i++)
+				bc[i] = s[j + i];
+			for (i = 0; i < 5; i++)
+				s[j + i] ^= (~bc[(i + 1) % 5]) &
+					    bc[(i + 2) % 5];
+		}
+
+		/* Iota */
+		s[0] ^= keccakf_rndc[round];
+	}
+}
+
+static void
+sha3_Init224(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 224 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init256(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 256 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init384(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 384 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init512(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 512 / (8 * sizeof(uint64_t));
+}
+
+
+/* This is simply the 'update' with the padding block.
+ * The padding block is 0x01 || 0x00* || 0x80. First 0x01 and last 0x80
+ * bytes are always present, but they can be the same byte.
+ */
+static void
+sha3_Update(void *priv, void const *bufIn, size_t len)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+	unsigned int old_tail = (8 - ctx->byteIndex) & 7;
+	size_t words;
+	unsigned int tail;
+	size_t i;
+	const uint8_t *buf = bufIn;
+
+	if (len < old_tail) {
+		while (len--)
+			ctx->saved |= (uint64_t) (*(buf++)) <<
+				      ((ctx->byteIndex++) * 8);
+		return;
+	}
+
+	if (old_tail) {
+		len -= old_tail;
+		while (old_tail--)
+			ctx->saved |= (uint64_t) (*(buf++)) <<
+				      ((ctx->byteIndex++) * 8);
+
+		ctx->s[ctx->wordIndex] ^= ctx->saved;
+		ctx->byteIndex = 0;
+		ctx->saved = 0;
+		if (++ctx->wordIndex ==
+		   (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+			keccakf(ctx->s);
+			ctx->wordIndex = 0;
+		}
+	}
+
+	words = len / sizeof(uint64_t);
+	tail = len - words * sizeof(uint64_t);
+
+	for (i = 0; i < words; i++, buf += sizeof(uint64_t)) {
+		const uint64_t t = (uint64_t) (buf[0]) |
+			((uint64_t) (buf[1]) << 8 * 1) |
+			((uint64_t) (buf[2]) << 8 * 2) |
+			((uint64_t) (buf[3]) << 8 * 3) |
+			((uint64_t) (buf[4]) << 8 * 4) |
+			((uint64_t) (buf[5]) << 8 * 5) |
+			((uint64_t) (buf[6]) << 8 * 6) |
+			((uint64_t) (buf[7]) << 8 * 7);
+		ctx->s[ctx->wordIndex] ^= t;
+		if (++ctx->wordIndex ==
+		   (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+			keccakf(ctx->s);
+			ctx->wordIndex = 0;
+		}
+	}
+
+	while (tail--)
+		ctx->saved |= (uint64_t) (*(buf++)) << ((ctx->byteIndex++) * 8);
+}
+
+int partial_hash_sha3_224(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init224(ctx);
+	sha3_Update(ctx, data_in, SHA3_224_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
+int partial_hash_sha3_256(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init256(ctx);
+	sha3_Update(ctx, data_in, SHA3_256_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
+int partial_hash_sha3_384(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init384(ctx);
+	sha3_Update(ctx, data_in, SHA3_384_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
+int partial_hash_sha3_512(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init512(ctx);
+	sha3_Update(ctx, data_in, SHA3_512_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
 
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
 static int generate_partial_hash(struct ccp_session *sess)
 {
 
@@ -192,6 +479,7 @@ static int generate_partial_hash(struct ccp_session *sess)
 	uint32_t *hash_value_be32, hash_temp32[8];
 	uint64_t *hash_value_be64, hash_temp64[8];
 	int i, count;
+	uint8_t *hash_value_sha3;
 
 	opad_t = ipad_t = (uint8_t *)sess->auth.key;
 
@@ -235,6 +523,16 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be32++)
 			*hash_value_be32 = hash_temp32[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA3_224_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_224(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha3_224(opad, hash_value_sha3))
+			return -1;
+		return 0;
 	case CCP_AUTH_ALGO_SHA256_HMAC:
 		count = SHA256_DIGEST_SIZE >> 2;
 
@@ -250,6 +548,16 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be32++)
 			*hash_value_be32 = hash_temp32[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA3_256_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_256(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					      + sess->auth.ctx_len);
+		if (partial_hash_sha3_256(opad, hash_value_sha3))
+			return -1;
+		return 0;
 	case CCP_AUTH_ALGO_SHA384_HMAC:
 		count = SHA512_DIGEST_SIZE >> 3;
 
@@ -265,6 +573,16 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be64++)
 			*hash_value_be64 = hash_temp64[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA3_384_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_384(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					      + sess->auth.ctx_len);
+		if (partial_hash_sha3_384(opad, hash_value_sha3))
+			return -1;
+		return 0;
 	case CCP_AUTH_ALGO_SHA512_HMAC:
 		count = SHA512_DIGEST_SIZE >> 3;
 
@@ -280,6 +598,16 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be64++)
 			*hash_value_be64 = hash_temp64[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA3_512_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_512(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					      + sess->auth.ctx_len);
+		if (partial_hash_sha3_512(opad, hash_value_sha3))
+			return -1;
+		return 0;
 	default:
 		CCP_LOG_ERR("Invalid session algo");
 		return -1;
@@ -525,6 +853,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
+	case RTE_CRYPTO_AUTH_SHA3_224:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_224;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_224_HMAC:
+		if (auth_xform->key.length > SHA3_224_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_224_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_224_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_SHA256:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA256;
 		sess->auth.engine = CCP_ENGINE_SHA;
@@ -550,6 +902,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
+	case RTE_CRYPTO_AUTH_SHA3_256:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_256;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_256_HMAC:
+		if (auth_xform->key.length > SHA3_256_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_256_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_256_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_SHA384:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA384;
 		sess->auth.engine = CCP_ENGINE_SHA;
@@ -575,6 +951,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
+	case RTE_CRYPTO_AUTH_SHA3_384:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_384;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_384_HMAC:
+		if (auth_xform->key.length > SHA3_384_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_384_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_384_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_SHA512:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA512;
 		sess->auth.engine = CCP_ENGINE_SHA;
@@ -600,6 +1000,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
+	case RTE_CRYPTO_AUTH_SHA3_512:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_512;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_512_HMAC:
+		if (auth_xform->key.length > SHA3_512_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_512_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_512_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_AES_CMAC:
 		sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
 		sess->auth.engine = CCP_ENGINE_AES;
@@ -888,6 +1312,26 @@ ccp_auth_slot(struct ccp_session *session)
 		 * 6. Retrieve HMAC output from LSB to host memory
 		 */
 		break;
+	case CCP_AUTH_ALGO_SHA3_224:
+	case CCP_AUTH_ALGO_SHA3_256:
+	case CCP_AUTH_ALGO_SHA3_384:
+	case CCP_AUTH_ALGO_SHA3_512:
+		count = 1;
+		/**< only op ctx and dst in host memory*/
+		break;
+	case CCP_AUTH_ALGO_SHA3_224_HMAC:
+	case CCP_AUTH_ALGO_SHA3_256_HMAC:
+		count = 3;
+		break;
+	case CCP_AUTH_ALGO_SHA3_384_HMAC:
+	case CCP_AUTH_ALGO_SHA3_512_HMAC:
+		count = 4;
+		/**
+		 * 1. Op to Perform Ihash
+		 * 2. Retrieve result from LSB to host memory
+		 * 3. Perform final hash
+		 */
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		count = 4;
 		/**
@@ -1398,6 +1842,213 @@ ccp_perform_sha(struct rte_crypto_op *op,
 }
 
 static int
+ccp_perform_sha3_hmac(struct rte_crypto_op *op,
+		      struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	struct ccp_passthru pst;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint8_t *append_ptr;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr, ctx_paddr, dest_addr_t;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	if (!append_ptr) {
+		CCP_LOG_ERR("CCP MBUF append failed\n");
+		return -1;
+	}
+	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
+	ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void
+						   *)session->auth.pre_compute);
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	/*desc1 for SHA3-Ihash operation */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = (cmd_q->sb_sha * CCP_SB_BYTES);
+	CCP_CMD_DST_HI(desc) = 0;
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+	CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Intermediate Hash value retrieve */
+	if ((session->auth.ut.sha_type == CCP_SHA3_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA3_TYPE_512)) {
+
+		pst.src_addr =
+			(phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+	} else {
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+	}
+
+	/**sha engine command descriptor for FinalHash*/
+	ctx_paddr += CCP_SHA3_CTX_SIZE;
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	if (session->auth.ut.sha_type == CCP_SHA3_TYPE_224) {
+		dest_addr_t += (CCP_SB_BYTES - SHA224_DIGEST_SIZE);
+		CCP_CMD_LEN(desc) = SHA224_DIGEST_SIZE;
+	} else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_256) {
+		CCP_CMD_LEN(desc) = SHA256_DIGEST_SIZE;
+	} else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_384) {
+		dest_addr_t += (2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE);
+		CCP_CMD_LEN(desc) = SHA384_DIGEST_SIZE;
+	} else {
+		CCP_CMD_LEN(desc) = SHA512_DIGEST_SIZE;
+	}
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)dest_addr_t);
+	CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = (uint32_t)dest_addr;
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+	CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
+ccp_perform_sha3(struct rte_crypto_op *op,
+		 struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint8_t *ctx_addr, *append_ptr;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr, ctx_paddr;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 cryptodev_driver_id);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	if (!append_ptr) {
+		CCP_LOG_ERR("CCP MBUF append failed\n");
+		return -1;
+	}
+	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	ctx_addr = session->auth.sha3_ctx;
+	ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	/* prepare desc for SHA3 operation */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+	CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
 ccp_perform_aes_cmac(struct rte_crypto_op *op,
 		     struct ccp_queue *cmd_q)
 {
@@ -1986,6 +2637,23 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 		result = ccp_perform_hmac(op, cmd_q);
 		b_info->desccnt += 7;
 		break;
+	case CCP_AUTH_ALGO_SHA3_224:
+	case CCP_AUTH_ALGO_SHA3_256:
+	case CCP_AUTH_ALGO_SHA3_384:
+	case CCP_AUTH_ALGO_SHA3_512:
+		result = ccp_perform_sha3(op, cmd_q);
+		b_info->desccnt += 1;
+		break;
+	case CCP_AUTH_ALGO_SHA3_224_HMAC:
+	case CCP_AUTH_ALGO_SHA3_256_HMAC:
+		result = ccp_perform_sha3_hmac(op, cmd_q);
+		b_info->desccnt += 3;
+		break;
+	case CCP_AUTH_ALGO_SHA3_384_HMAC:
+	case CCP_AUTH_ALGO_SHA3_512_HMAC:
+		result = ccp_perform_sha3_hmac(op, cmd_q);
+		b_info->desccnt += 4;
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		result = ccp_perform_aes_cmac(op, cmd_q);
 		b_info->desccnt += 4;
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index a350dfd..9d8d42e 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -50,7 +50,7 @@
 #define CMAC_PAD_VALUE 0x80
 #define CTR_NONCE_SIZE 4
 #define CTR_IV_SIZE 8
-#define CCP_SHA_CTX_SIZE 200
+#define CCP_SHA3_CTX_SIZE 200
 
 /**Macro helpers for CCP command creation*/
 #define	CCP_AES_SIZE(p)		((p)->aes.size)
@@ -79,15 +79,19 @@
 
 #define SHA224_DIGEST_SIZE      28
 #define SHA224_BLOCK_SIZE       64
+#define SHA3_224_BLOCK_SIZE     144
 
 #define SHA256_DIGEST_SIZE      32
 #define SHA256_BLOCK_SIZE       64
+#define SHA3_256_BLOCK_SIZE     136
 
 #define SHA384_DIGEST_SIZE      48
 #define SHA384_BLOCK_SIZE       128
+#define SHA3_384_BLOCK_SIZE	104
 
 #define SHA512_DIGEST_SIZE      64
 #define SHA512_BLOCK_SIZE       128
+#define SHA3_512_BLOCK_SIZE     72
 
 /**SHA LSB intialiazation values*/
 
@@ -203,6 +207,10 @@ enum ccp_sha_type {
 	CCP_SHA_TYPE_512,
 	CCP_SHA_TYPE_RSVD1,
 	CCP_SHA_TYPE_RSVD2,
+	CCP_SHA3_TYPE_224,
+	CCP_SHA3_TYPE_256,
+	CCP_SHA3_TYPE_384,
+	CCP_SHA3_TYPE_512,
 	CCP_SHA_TYPE__LAST,
 };
 
@@ -233,12 +241,20 @@ enum ccp_hash_algo {
 	CCP_AUTH_ALGO_SHA1_HMAC,
 	CCP_AUTH_ALGO_SHA224,
 	CCP_AUTH_ALGO_SHA224_HMAC,
+	CCP_AUTH_ALGO_SHA3_224,
+	CCP_AUTH_ALGO_SHA3_224_HMAC,
 	CCP_AUTH_ALGO_SHA256,
 	CCP_AUTH_ALGO_SHA256_HMAC,
+	CCP_AUTH_ALGO_SHA3_256,
+	CCP_AUTH_ALGO_SHA3_256_HMAC,
 	CCP_AUTH_ALGO_SHA384,
 	CCP_AUTH_ALGO_SHA384_HMAC,
+	CCP_AUTH_ALGO_SHA3_384,
+	CCP_AUTH_ALGO_SHA3_384_HMAC,
 	CCP_AUTH_ALGO_SHA512,
 	CCP_AUTH_ALGO_SHA512_HMAC,
+	CCP_AUTH_ALGO_SHA3_512,
+	CCP_AUTH_ALGO_SHA3_512_HMAC,
 	CCP_AUTH_ALGO_AES_CMAC,
 	CCP_AUTH_ALGO_AES_GCM,
 #ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
@@ -312,7 +328,9 @@ struct ccp_session {
 		/**<Buffer to store  Software generated precomute values*/
 		/**< For HMAC H(ipad ^ key) and H(opad ^ key) */
 		/**< For CMAC K1 IV and K2 IV*/
-		uint8_t pre_compute[2 * CCP_SHA_CTX_SIZE];
+		uint8_t pre_compute[2 * CCP_SHA3_CTX_SIZE];
+		/**<SHA3 initial ctx all zeros*/
+		uint8_t sha3_ctx[200];
 		int aad_length;
 	} auth;
 	/**< Authentication Parameters */
@@ -372,4 +390,20 @@ int process_ops_to_dequeue(struct ccp_qp *qp,
 			   uint16_t nb_ops);
 
 
+/**
+ * Apis for SHA3 partial hash generation
+ * @param data_in buffer pointer on which phash is applied
+ * @param data_out phash result in ccp be format is written
+ */
+int partial_hash_sha3_224(uint8_t *data_in,
+			  uint8_t *data_out);
+
+int partial_hash_sha3_256(uint8_t *data_in,
+			  uint8_t *data_out);
+
+int partial_hash_sha3_384(uint8_t *data_in,
+			  uint8_t *data_out);
+
+int partial_hash_sha3_512(uint8_t *data_in,
+			  uint8_t *data_out);
 #endif /* _CCP_CRYPTO_H_ */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index e0160b3..9e94dcf 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -146,6 +146,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA3-224 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_224,
+				 .block_size = 144,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-224  HMAC*/
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+				 .block_size = 144,
+				 .key_size = {
+					 .min = 1,
+					 .max = 144,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/* SHA256 */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -188,6 +230,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA3-256 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_256,
+				 .block_size = 136,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-256-HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+				 .block_size = 136,
+				 .key_size = {
+					 .min = 1,
+					 .max = 136,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/* SHA384 */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -230,6 +314,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA3-384 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_384,
+				 .block_size = 104,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-384-HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+				 .block_size = 104,
+				 .key_size = {
+					 .min = 1,
+					 .max = 104,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/* SHA512  */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -272,6 +398,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA3-512  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_512,
+				 .block_size = 72,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-512-HMAC  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+				 .block_size = 72,
+				 .key_size = {
+					 .min = 1,
+					 .max = 72,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			}, }
+		}, }
+	},
 	{	/*AES-CMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH 07/11] doc: add document for AMD CCP crypto PMD
  2017-11-30 13:12 [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD Ravi Kumar
                   ` (4 preceding siblings ...)
  2017-11-30 13:12 ` [PATCH 06/11] crypto/ccp: add support for SHA3 family authentication Ravi Kumar
@ 2017-11-30 13:12 ` Ravi Kumar
  2017-12-11 19:36   ` De Lara Guarch, Pablo
  2017-11-30 13:12 ` [PATCH 08/11] crypto/ccp: rename CCP crypto driver id Ravi Kumar
                   ` (5 subsequent siblings)
  11 siblings, 1 reply; 131+ messages in thread
From: Ravi Kumar @ 2017-11-30 13:12 UTC (permalink / raw)
  To: dev

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset=y, Size: 7096 bytes --]

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 doc/guides/cryptodevs/ccp.rst          | 124 +++++++++++++++++++++++++++++++++
 doc/guides/cryptodevs/features/ccp.ini |  57 +++++++++++++++
 doc/guides/cryptodevs/index.rst        |   1 +
 3 files changed, 182 insertions(+)
 create mode 100644 doc/guides/cryptodevs/ccp.rst
 create mode 100644 doc/guides/cryptodevs/features/ccp.ini

diff --git a/doc/guides/cryptodevs/ccp.rst b/doc/guides/cryptodevs/ccp.rst
new file mode 100644
index 0000000..b39af1d
--- /dev/null
+++ b/doc/guides/cryptodevs/ccp.rst
@@ -0,0 +1,124 @@
+.. Copyright(c) 2017 Advanced Micro Devices, Inc.
+   All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions
+   are met:
+
+   * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+   * Neither the name of the copyright holder nor the names of its
+   contributors may be used to endorse or promote products derived from
+   this software without specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+AMD CCP Poll Mode Driver
+========================
+
+This code provides the initial implementation of the ccp poll mode driver.
+The CCP poll mode driver library (librte_pmd_ccp) implements support for
+AMD’s cryptographic co-processor (CCP). The CCP PMD is a virtual crypto
+poll mode driver which schedules crypto operations to one or more available
+CCP hardware engines on the platform. The CCP PMD provides poll mode crypto
+driver support for the following hardware accelerator devices::
+
+	AMD Cryptographic Co-processor (0x1456)
+	AMD Cryptographic Co-processor (0x1468)
+
+Features
+--------
+
+CCP PMD has support for:
+
+Supported cipher algorithms:
+* ``RTE_CRYPTO_CIPHER_AES_CBC``
+* ``RTE_CRYPTO_CIPHER_AES_ECB``
+* ``RTE_CRYPTO_CIPHER_AES_CTR``
+* ``RTE_CRYPTO_CIPHER_3DES_CBC``
+
+Supported authentication algorithms:
+* ``RTE_CRYPTO_AUTH_SHA1``
+* ``RTE_CRYPTO_AUTH_SHA1_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA224``
+* ``RTE_CRYPTO_AUTH_SHA224_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA256``
+* ``RTE_CRYPTO_AUTH_SHA256_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA384``
+* ``RTE_CRYPTO_AUTH_SHA384_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA512``
+* ``RTE_CRYPTO_AUTH_SHA512_HMAC``
+* ``RTE_CRYPTO_AUTH_MD5_HMAC``
+* ``RTE_CRYPTO_AUTH_AES_CMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_224``
+* ``RTE_CRYPTO_AUTH_SHA3_224_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_256``
+* ``RTE_CRYPTO_AUTH_SHA3_256_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_384``
+* ``RTE_CRYPTO_AUTH_SHA3_384_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_512``
+* ``RTE_CRYPTO_AUTH_SHA3_512_HMAC``
+
+Supported AEAD algorithms:
+* ``RTE_CRYPTO_AEAD_AES_GCM``
+
+Installation
+------------
+
+To compile CCP PMD, it has to be enabled in the config/common_base file.
+* ``CONFIG_RTE_LIBRTE_PMD_CCP=y``
+
+The CCP PMD also supports computing authentication over CPU with cipher offloaded
+to CCP. To enable this feature, enable following in the configuration.
+* ``CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=y``
+
+This code was verified on Ubuntu 16.04.
+
+Initialization
+--------------
+
+Bind the CCP devices to DPDK UIO driver module before running the CCP PMD stack.
+e.g. for the 0x1456 device::
+
+	cd to the top-level DPDK directory
+	modprobe uio
+	insmod ./build/kmod/igb_uio.ko
+	echo "1022 1456" > /sys/bus/pci/drivers/igb_uio/new_id
+
+Another way to bind the CCP devices to DPDK UIO driver is by using the ``dpdk-devbind.py`` script.
+The following command assumes ``BFD`` of ``0000:09:00.2``::
+
+	cd to the top-level DPDK directory
+	./usertools/dpdk-devbind.py -b igb_uio 0000:09:00.2
+
+To verify real traffic l2fwd-crypto example can be used with following command:
+
+.. code-block:: console
+
+	sudo ./build/l2fwd-crypto -l 1 -n 4 --vdev "crypto_ccp" -- -p 0x1
+	--chain CIPHER_HASH --cipher_op ENCRYPT --cipher_algo AES_CBC
+	--cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f
+	--iv 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:ff
+	--auth_op GENERATE --auth_algo SHA1_HMAC
+	--auth_key 11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+	:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+	:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+
+Limitations
+-----------
+
+* Chained mbufs are not supported
+* MD5_HMAC is supported only if ``CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=y`` is enabled in configuration
diff --git a/doc/guides/cryptodevs/features/ccp.ini b/doc/guides/cryptodevs/features/ccp.ini
new file mode 100644
index 0000000..6184a67
--- /dev/null
+++ b/doc/guides/cryptodevs/features/ccp.ini
@@ -0,0 +1,57 @@
+;
+; Supported features of the 'ccp' crypto driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Symmetric crypto       = Y
+Sym operation chaining = Y
+HW Accelerated         = Y
+
+;
+; Supported crypto algorithms of the 'ccp' crypto driver.
+;
+[Cipher]
+AES CBC (128)  = Y
+AES CBC (192)  = Y
+AES CBC (256)  = Y
+AES ECB (128)  = Y
+AES ECB (192)  = Y
+AES ECB (256)  = Y
+AES CTR (128)  = Y
+AES CTR (192)  = Y
+AES CTR (256)  = Y
+3DES CBC       = Y
+
+;
+; Supported authentication algorithms of the 'ccp' crypto driver.
+;
+[Auth]
+MD5 HMAC       = Y
+SHA1           = Y
+SHA1 HMAC      = Y
+SHA224	       = Y
+SHA224 HMAC    = Y
+SHA256         = Y
+SHA256 HMAC    = Y
+SHA384         = Y
+SHA384 HMAC    = Y
+SHA512         = Y
+SHA512 HMAC    = Y
+AES CMAC       = Y
+SHA3_224       = Y
+SHA3_224 HMAC  = Y
+SHA3_256       = Y
+SHA3_256 HMAC  = Y
+SHA3_384       = Y
+SHA3_384 HMAC  = Y
+SHA3_512       = Y
+SHA3_512 HMAC  = Y
+
+;
+; Supported AEAD algorithms of the 'ccp' crypto driver.
+;
+[AEAD]
+AES GCM (128) = Y
+AES GCM (192) = Y
+AES GCM (256) = Y
diff --git a/doc/guides/cryptodevs/index.rst b/doc/guides/cryptodevs/index.rst
index 6d4e15b..3202b48 100644
--- a/doc/guides/cryptodevs/index.rst
+++ b/doc/guides/cryptodevs/index.rst
@@ -39,6 +39,7 @@ Crypto Device Drivers
     aesni_mb
     aesni_gcm
     armv8
+    ccp
     dpaa2_sec
     dpaa_sec
     kasumi
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH 08/11] crypto/ccp: rename CCP crypto driver id
  2017-11-30 13:12 [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD Ravi Kumar
                   ` (5 preceding siblings ...)
  2017-11-30 13:12 ` [PATCH 07/11] doc: add document for AMD CCP crypto PMD Ravi Kumar
@ 2017-11-30 13:12 ` Ravi Kumar
  2017-12-11 14:30   ` De Lara Guarch, Pablo
  2017-11-30 13:12 ` [PATCH 09/11] crypto/ccp: update queue-pair release to enable reset Ravi Kumar
                   ` (4 subsequent siblings)
  11 siblings, 1 reply; 131+ messages in thread
From: Ravi Kumar @ 2017-11-30 13:12 UTC (permalink / raw)
  To: dev

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 28 ++++++++++++++--------------
 drivers/crypto/ccp/ccp_crypto.h  |  2 +-
 drivers/crypto/ccp/rte_ccp_pmd.c | 10 +++++-----
 3 files changed, 20 insertions(+), 20 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 1fc6569..254fed8 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -1593,7 +1593,7 @@ ccp_perform_hmac(struct rte_crypto_op *op,
 
 	session = (struct ccp_session *)get_session_private_data(
 					 op->sym->session,
-					 cryptodev_driver_id);
+					 ccp_cryptodev_driver_id);
 	addr = session->auth.pre_compute;
 
 	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
@@ -1766,7 +1766,7 @@ ccp_perform_sha(struct rte_crypto_op *op,
 
 	session = (struct ccp_session *)get_session_private_data(
 					 op->sym->session,
-					 cryptodev_driver_id);
+					ccp_cryptodev_driver_id);
 
 	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
 					      op->sym->auth.data.offset);
@@ -1855,7 +1855,7 @@ ccp_perform_sha3_hmac(struct rte_crypto_op *op,
 
 	session = (struct ccp_session *)get_session_private_data(
 					 op->sym->session,
-					 cryptodev_driver_id);
+					ccp_cryptodev_driver_id);
 
 	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
 					      op->sym->auth.data.offset);
@@ -1995,7 +1995,7 @@ ccp_perform_sha3(struct rte_crypto_op *op,
 
 	session = (struct ccp_session *)get_session_private_data(
 					 op->sym->session,
-					 cryptodev_driver_id);
+					ccp_cryptodev_driver_id);
 
 	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
 					      op->sym->auth.data.offset);
@@ -2063,7 +2063,7 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
 
 	session = (struct ccp_session *)get_session_private_data(
 					 op->sym->session,
-					 cryptodev_driver_id);
+					ccp_cryptodev_driver_id);
 	key_addr = rte_mem_virt2phy(session->auth.key_ccp);
 
 	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
@@ -2215,7 +2215,7 @@ ccp_perform_aes(struct rte_crypto_op *op,
 
 	session = (struct ccp_session *)get_session_private_data(
 					 op->sym->session,
-					 cryptodev_driver_id);
+					ccp_cryptodev_driver_id);
 	function.raw = 0;
 
 	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
@@ -2303,7 +2303,7 @@ ccp_perform_3des(struct rte_crypto_op *op,
 
 	session = (struct ccp_session *)get_session_private_data(
 					 op->sym->session,
-					 cryptodev_driver_id);
+					ccp_cryptodev_driver_id);
 
 	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
 	switch (session->cipher.um.des_mode) {
@@ -2406,7 +2406,7 @@ ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
 
 	session = (struct ccp_session *)get_session_private_data(
 					 op->sym->session,
-					 cryptodev_driver_id);
+					 ccp_cryptodev_driver_id);
 	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
 	key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
 
@@ -2578,7 +2578,7 @@ ccp_crypto_cipher(struct rte_crypto_op *op,
 
 	session = (struct ccp_session *)get_session_private_data(
 					 op->sym->session,
-					 cryptodev_driver_id);
+					ccp_cryptodev_driver_id);
 
 	switch (session->cipher.algo) {
 	case CCP_CIPHER_ALGO_AES_CBC:
@@ -2615,7 +2615,7 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 
 	session = (struct ccp_session *)get_session_private_data(
 					 op->sym->session,
-					 cryptodev_driver_id);
+					ccp_cryptodev_driver_id);
 
 	switch (session->auth.algo) {
 	case CCP_AUTH_ALGO_SHA1:
@@ -2676,7 +2676,7 @@ ccp_crypto_combined(struct rte_crypto_op *op,
 
 	session = (struct ccp_session *)get_session_private_data(
 					 op->sym->session,
-					 cryptodev_driver_id);
+					ccp_cryptodev_driver_id);
 
 	switch (session->auth.algo) {
 	case CCP_AUTH_ALGO_AES_GCM:
@@ -2734,7 +2734,7 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
 	for (i = 0; i < nb_ops; i++) {
 		session = (struct ccp_session *)get_session_private_data(
 						 op[i]->sym->session,
-						 cryptodev_driver_id);
+						 ccp_cryptodev_driver_id);
 		switch (session->cmd_id) {
 		case CCP_CMD_CIPHER:
 			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
@@ -2812,7 +2812,7 @@ static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
 
 	session = (struct ccp_session *)get_session_private_data(
 					 op->sym->session,
-					 cryptodev_driver_id);
+					ccp_cryptodev_driver_id);
 
 	if (session->cmd_id == CCP_CMD_COMBINED) {
 		digest_data = op->sym->aead.digest.data;
@@ -2889,7 +2889,7 @@ ccp_prepare_ops(struct rte_crypto_op **op_d,
 		op_d[i] = b_info->op[b_info->op_idx++];
 		session = (struct ccp_session *)get_session_private_data(
 						 op_d[i]->sym->session,
-						 cryptodev_driver_id);
+						ccp_cryptodev_driver_id);
 		switch (session->cmd_id) {
 		case CCP_CMD_CIPHER:
 			op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 9d8d42e..ad3251b 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -340,7 +340,7 @@ struct ccp_session {
 	uint32_t reserved;
 } __rte_cache_aligned;
 
-extern uint8_t cryptodev_driver_id;
+extern uint8_t ccp_cryptodev_driver_id;
 
 struct ccp_qp;
 
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index 9410470..d46c07c 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -44,7 +44,7 @@
  * Global static parameter used to find if CCP device is already initialized.
  */
 static unsigned int ccp_pmd_init_done;
-uint8_t cryptodev_driver_id;
+uint8_t ccp_cryptodev_driver_id;
 
 static struct ccp_session *
 get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op)
@@ -58,7 +58,7 @@ get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op)
 		sess = (struct ccp_session *)
 			get_session_private_data(
 				op->sym->session,
-				cryptodev_driver_id);
+				ccp_cryptodev_driver_id);
 	} else {
 		void *_sess;
 		void *_sess_private_data = NULL;
@@ -77,7 +77,7 @@ get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op)
 			sess = NULL;
 		}
 		op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
-		set_session_private_data(op->sym->session, cryptodev_driver_id,
+		set_session_private_data(op->sym->session, ccp_cryptodev_driver_id,
 					 _sess_private_data);
 	}
 
@@ -203,7 +203,7 @@ cryptodev_ccp_create(const char *name,
 	}
 
 	printf("CCP : Crypto device count = %d\n", cryptodev_cnt);
-	dev->driver_id = cryptodev_driver_id;
+	dev->driver_id = ccp_cryptodev_driver_id;
 
 	/* register rx/tx burst functions for data path */
 	dev->dev_ops = ccp_pmd_ops;
@@ -278,4 +278,4 @@ static struct rte_vdev_driver cryptodev_ccp_pmd_drv = {
 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv);
 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD,
 	"max_nb_queue_pairs=<int> max_nb_sessions=<int> socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_ccp_pmd_drv, cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_ccp_pmd_drv, ccp_cryptodev_driver_id);
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH 09/11] crypto/ccp: update queue-pair release to enable reset
  2017-11-30 13:12 [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD Ravi Kumar
                   ` (6 preceding siblings ...)
  2017-11-30 13:12 ` [PATCH 08/11] crypto/ccp: rename CCP crypto driver id Ravi Kumar
@ 2017-11-30 13:12 ` Ravi Kumar
  2017-11-30 13:12 ` [PATCH 10/11] test/test: add test for AMD CCP crypto PMD Ravi Kumar
                   ` (3 subsequent siblings)
  11 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2017-11-30 13:12 UTC (permalink / raw)
  To: dev

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_pmd_ops.c | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 9e94dcf..daeb59c 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -647,8 +647,13 @@ ccp_pmd_info_get(struct rte_cryptodev *dev,
 static int
 ccp_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
 {
+	struct ccp_qp *qp;
+
 	if (dev->data->queue_pairs[qp_id] != NULL) {
-		rte_free(dev->data->queue_pairs[qp_id]);
+		qp = (struct ccp_qp *)dev->data->queue_pairs[qp_id];
+		rte_ring_free(qp->processed_pkts);
+		rte_mempool_free(qp->batch_mp);
+		rte_free(qp);
 		dev->data->queue_pairs[qp_id] = NULL;
 	}
 	return 0;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH 10/11] test/test: add test for AMD CCP crypto PMD
  2017-11-30 13:12 [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD Ravi Kumar
                   ` (7 preceding siblings ...)
  2017-11-30 13:12 ` [PATCH 09/11] crypto/ccp: update queue-pair release to enable reset Ravi Kumar
@ 2017-11-30 13:12 ` Ravi Kumar
  2017-12-11 14:27   ` De Lara Guarch, Pablo
  2017-11-30 13:12 ` [PATCH 11/11] crypto/ccp: update CCP PMD code-base Ravi Kumar
                   ` (2 subsequent siblings)
  11 siblings, 1 reply; 131+ messages in thread
From: Ravi Kumar @ 2017-11-30 13:12 UTC (permalink / raw)
  To: dev

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 test/test/test_cryptodev.c                   | 161 +++++++++++++++++++++++++++
 test/test/test_cryptodev.h                   |   1 +
 test/test/test_cryptodev_aes_test_vectors.h  |  93 ++++++++++------
 test/test/test_cryptodev_blockcipher.c       |   9 +-
 test/test/test_cryptodev_blockcipher.h       |   1 +
 test/test/test_cryptodev_des_test_vectors.h  |  42 ++++---
 test/test/test_cryptodev_hash_test_vectors.h |  60 ++++++----
 7 files changed, 301 insertions(+), 66 deletions(-)

diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index 1bed65d..d99be69 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -364,6 +364,23 @@ testsuite_setup(void)
 		}
 	}
 
+	/* Create an CCP device if required */
+	if (gbl_driver_id == rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD))) {
+		nb_devs = rte_cryptodev_device_count_by_driver(
+				rte_cryptodev_driver_id_get(
+				RTE_STR(CRYPTODEV_NAME_CCP_PMD)));
+		if (nb_devs < 1) {
+			ret = rte_vdev_init(
+				RTE_STR(CRYPTODEV_NAME_CCP_PMD),
+				NULL);
+
+			TEST_ASSERT(ret == 0, "Failed to create "
+				"instance of pmd : %s",
+				RTE_STR(CRYPTODEV_NAME_CCP_PMD));
+		}
+	}
+
 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
 	if (gbl_driver_id == rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD))) {
@@ -1753,6 +1770,44 @@ test_AES_cipheronly_openssl_all(void)
 }
 
 static int
+test_AES_chain_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_AES_CHAIN_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_AES_cipheronly_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_AES_CIPHERONLY_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
 test_AES_chain_qat_all(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -1924,6 +1979,25 @@ test_authonly_openssl_all(void)
 }
 
 static int
+test_authonly_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_AUTHONLY_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
 test_AES_chain_armv8_all(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -4999,6 +5073,44 @@ test_3DES_cipheronly_dpaa2_sec_all(void)
 }
 
 static int
+test_3DES_chain_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_3DES_CHAIN_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_3DES_cipheronly_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_3DES_CIPHERONLY_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
 test_3DES_cipheronly_qat_all(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -9580,6 +9692,38 @@ static struct unit_test_suite cryptodev_mrvl_testsuite  = {
 	}
 };
 
+static struct unit_test_suite cryptodev_ccp_testsuite  = {
+	.suite_name = "Crypto Device CCP Unit Test Suite",
+	.setup = testsuite_setup,
+	.teardown = testsuite_teardown,
+	.unit_test_cases = {
+		TEST_CASE_ST(ut_setup, ut_teardown, test_multi_session),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_multi_session_random_usage),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_AES_chain_ccp_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_AES_cipheronly_ccp_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_3DES_chain_ccp_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_3DES_cipheronly_ccp_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_authonly_ccp_all),
+
+		/** Negative tests */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			authentication_verify_HMAC_SHA1_fail_data_corrupt),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			authentication_verify_HMAC_SHA1_fail_tag_corrupt),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			auth_decryption_AES128CBC_HMAC_SHA1_fail_data_corrupt),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			auth_decryption_AES128CBC_HMAC_SHA1_fail_tag_corrupt),
+
+		TEST_CASES_END() /**< NULL terminate unit test array */
+	}
+};
 
 static int
 test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
@@ -9801,6 +9945,22 @@ test_cryptodev_dpaa_sec(void /*argv __rte_unused, int argc __rte_unused*/)
 	return unit_test_suite_runner(&cryptodev_dpaa_sec_testsuite);
 }
 
+static int
+test_cryptodev_ccp(void)
+{
+	gbl_driver_id = rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD));
+
+	if (gbl_driver_id == -1) {
+		RTE_LOG(ERR, USER1, "CCP PMD must be loaded. Check if "
+				"CONFIG_RTE_LIBRTE_PMD_CCP is enabled "
+				"in config file to run this testsuite.\n");
+		return TEST_FAILED;
+	}
+
+	return unit_test_suite_runner(&cryptodev_ccp_testsuite);
+}
+
 REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
 REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb);
 REGISTER_TEST_COMMAND(cryptodev_openssl_autotest, test_cryptodev_openssl);
@@ -9813,3 +9973,4 @@ REGISTER_TEST_COMMAND(cryptodev_sw_armv8_autotest, test_cryptodev_armv8);
 REGISTER_TEST_COMMAND(cryptodev_sw_mrvl_autotest, test_cryptodev_mrvl);
 REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_autotest, test_cryptodev_dpaa2_sec);
 REGISTER_TEST_COMMAND(cryptodev_dpaa_sec_autotest, test_cryptodev_dpaa_sec);
+REGISTER_TEST_COMMAND(cryptodev_ccp_autotest, test_cryptodev_ccp);
diff --git a/test/test/test_cryptodev.h b/test/test/test_cryptodev.h
index 26bfbe6..9fa68f9 100644
--- a/test/test/test_cryptodev.h
+++ b/test/test/test_cryptodev.h
@@ -89,6 +89,7 @@
 #define CRYPTODEV_NAME_DPAA2_SEC_PMD	crypto_dpaa2_sec
 #define CRYPTODEV_NAME_SCHEDULER_PMD	crypto_scheduler
 #define CRYPTODEV_NAME_MRVL_PMD		crypto_mrvl
+#define CRYPTODEV_NAME_CCP_PMD		crypto_ccp
 
 /**
  * Write (spread) data from buffer to mbuf data
diff --git a/test/test/test_cryptodev_aes_test_vectors.h b/test/test/test_cryptodev_aes_test_vectors.h
index 9c13041..59ea211 100644
--- a/test/test/test_cryptodev_aes_test_vectors.h
+++ b/test/test/test_cryptodev_aes_test_vectors.h
@@ -1199,7 +1199,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CTR HMAC-SHA1 Decryption Digest "
@@ -1212,7 +1213,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CTR XCBC Encryption Digest",
@@ -1251,7 +1253,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CTR HMAC-SHA1 Decryption Digest "
@@ -1264,7 +1267,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest",
@@ -1277,7 +1281,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1311,7 +1316,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
@@ -1332,7 +1338,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA256 Encryption Digest "
@@ -1354,7 +1361,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA256 Decryption Digest "
@@ -1374,7 +1382,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
@@ -1383,7 +1392,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
@@ -1407,7 +1417,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA512 Decryption Digest "
@@ -1470,7 +1481,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA224 Decryption Digest "
@@ -1482,7 +1494,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA384 Encryption Digest",
@@ -1494,7 +1507,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA384 Decryption Digest "
@@ -1507,7 +1521,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1516,7 +1531,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
-			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr =
@@ -1526,7 +1542,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
-			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 };
 
@@ -1541,7 +1558,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC Decryption",
@@ -1553,7 +1571,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CBC Encryption",
@@ -1564,7 +1583,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CBC Encryption Scater gather",
@@ -1583,7 +1603,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CBC Encryption",
@@ -1595,7 +1616,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CBC Decryption",
@@ -1607,7 +1629,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CBC OOP Encryption",
@@ -1617,7 +1640,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CBC OOP Decryption",
@@ -1627,7 +1651,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CTR Encryption",
@@ -1639,7 +1664,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CTR Decryption",
@@ -1651,7 +1677,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CTR Encryption",
@@ -1662,7 +1689,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CTR Decryption",
@@ -1673,7 +1701,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CTR Encryption",
@@ -1685,7 +1714,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CTR Decryption",
@@ -1697,7 +1727,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CTR Encryption (12-byte IV)",
diff --git a/test/test/test_cryptodev_blockcipher.c b/test/test/test_cryptodev_blockcipher.c
index 20f3296..0091a81 100644
--- a/test/test/test_cryptodev_blockcipher.c
+++ b/test/test/test_cryptodev_blockcipher.c
@@ -82,6 +82,8 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
 
 	int openssl_pmd = rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+	int ccp_pmd = rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD));
 	int scheduler_pmd = rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD));
 	int armv8_pmd = rte_cryptodev_driver_id_get(
@@ -122,7 +124,8 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
 			driver_id == qat_pmd ||
 			driver_id == openssl_pmd ||
 			driver_id == armv8_pmd ||
-			driver_id == mrvl_pmd) { /* Fall through */
+			driver_id == mrvl_pmd) ||
+			driver_id == ccp_pmd) { /* Fall through */
 		digest_len = tdata->digest.len;
 	} else if (driver_id == aesni_mb_pmd ||
 			driver_id == scheduler_pmd) {
@@ -583,6 +586,8 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
 
 	int openssl_pmd = rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+	int ccp_pmd = rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD));
 	int dpaa2_sec_pmd = rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD));
 	int dpaa_sec_pmd = rte_cryptodev_driver_id_get(
@@ -655,6 +660,8 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
 		target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER;
 	else if (driver_id == dpaa2_sec_pmd)
 		target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC;
+	else if (driver_id == ccp_pmd)
+		target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_CCP;
 	else if (driver_id == dpaa_sec_pmd)
 		target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC;
 	else if (driver_id == mrvl_pmd)
diff --git a/test/test/test_cryptodev_blockcipher.h b/test/test/test_cryptodev_blockcipher.h
index 67c78d4..b6824f6 100644
--- a/test/test/test_cryptodev_blockcipher.h
+++ b/test/test/test_cryptodev_blockcipher.h
@@ -55,6 +55,7 @@
 #define BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC	0x0020 /* DPAA2_SEC flag */
 #define BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC	0x0040 /* DPAA_SEC flag */
 #define BLOCKCIPHER_TEST_TARGET_PMD_MRVL	0x0080 /* Marvell flag */
+#define BLOCKCIPHER_TEST_TARGET_PMD_CCP		0x0040 /* CCP flag */
 
 #define BLOCKCIPHER_TEST_OP_CIPHER	(BLOCKCIPHER_TEST_OP_ENCRYPT | \
 					BLOCKCIPHER_TEST_OP_DECRYPT)
diff --git a/test/test/test_cryptodev_des_test_vectors.h b/test/test/test_cryptodev_des_test_vectors.h
index d09e23d..1a794c4 100644
--- a/test/test/test_cryptodev_des_test_vectors.h
+++ b/test/test/test_cryptodev_des_test_vectors.h
@@ -1072,7 +1072,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CBC HMAC-SHA1 Decryption Digest Verify",
@@ -1081,19 +1082,22 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CBC SHA1 Encryption Digest",
 		.test_data = &triple_des128cbc_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CBC SHA1 Decryption Digest Verify",
 		.test_data = &triple_des128cbc_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC HMAC-SHA1 Encryption Digest",
@@ -1103,7 +1107,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC HMAC-SHA1 Decryption Digest Verify",
@@ -1113,21 +1118,24 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC SHA1 Encryption Digest",
 		.test_data = &triple_des192cbc_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC SHA1 Decryption Digest Verify",
 		.test_data = &triple_des192cbc_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CTR HMAC-SHA1 Encryption Digest",
@@ -1208,7 +1216,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.test_data = &triple_des128cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr =
@@ -1217,7 +1226,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.test_data = &triple_des128cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 };
 
@@ -1229,7 +1239,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CBC Decryption",
@@ -1238,7 +1249,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC Encryption",
@@ -1248,7 +1260,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC Decryption",
@@ -1258,7 +1271,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CTR Encryption",
diff --git a/test/test/test_cryptodev_hash_test_vectors.h b/test/test/test_cryptodev_hash_test_vectors.h
index 3b840ce..e0a8f2e 100644
--- a/test/test/test_cryptodev_hash_test_vectors.h
+++ b/test/test/test_cryptodev_hash_test_vectors.h
@@ -386,13 +386,15 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.test_descr = "SHA1 Digest",
 		.test_data = &sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA1 Digest Verify",
 		.test_data = &sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA1 Digest",
@@ -403,7 +405,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA1 Digest Verify",
@@ -414,19 +417,22 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA224 Digest",
 		.test_data = &sha224_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA224 Digest Verify",
 		.test_data = &sha224_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA224 Digest",
@@ -437,7 +443,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA224 Digest Verify",
@@ -448,19 +455,22 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA256 Digest",
 		.test_data = &sha256_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA256 Digest Verify",
 		.test_data = &sha256_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA256 Digest",
@@ -471,7 +481,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA256 Digest Verify",
@@ -482,19 +493,22 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA384 Digest",
 		.test_data = &sha384_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA384 Digest Verify",
 		.test_data = &sha384_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA384 Digest",
@@ -505,7 +519,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA384 Digest Verify",
@@ -516,19 +531,22 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA512 Digest",
 		.test_data = &sha512_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA512 Digest Verify",
 		.test_data = &sha512_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA512 Digest",
@@ -539,7 +557,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA512 Digest Verify",
@@ -550,7 +569,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH 11/11] crypto/ccp: update CCP PMD code-base
  2017-11-30 13:12 [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD Ravi Kumar
                   ` (8 preceding siblings ...)
  2017-11-30 13:12 ` [PATCH 10/11] test/test: add test for AMD CCP crypto PMD Ravi Kumar
@ 2017-11-30 13:12 ` Ravi Kumar
  2017-12-11 19:30   ` De Lara Guarch, Pablo
  2017-12-11 13:39 ` [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD De Lara Guarch, Pablo
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
  11 siblings, 1 reply; 131+ messages in thread
From: Ravi Kumar @ 2017-11-30 13:12 UTC (permalink / raw)
  To: dev

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_dev.h     |  1 +
 drivers/crypto/ccp/ccp_pci.h     |  2 +-
 drivers/crypto/ccp/rte_ccp_pmd.c | 31 ++++++++++++++++++-------------
 3 files changed, 20 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index 28eb68e..832407a 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -36,6 +36,7 @@
 #include <stdint.h>
 #include <string.h>
 
+#include <rte_bus_pci.h>
 #include <rte_atomic.h>
 #include <rte_byteorder.h>
 #include <rte_io.h>
diff --git a/drivers/crypto/ccp/ccp_pci.h b/drivers/crypto/ccp/ccp_pci.h
index a0edf75..292234d 100644
--- a/drivers/crypto/ccp/ccp_pci.h
+++ b/drivers/crypto/ccp/ccp_pci.h
@@ -33,7 +33,7 @@
 
 #include <stdint.h>
 
-#include <rte_pci.h>
+#include <rte_bus_pci.h>
 
 #define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
 #define PROC_MODULES "/proc/modules"
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index d46c07c..f1369be 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -28,12 +28,14 @@
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
 #include <rte_common.h>
 #include <rte_config.h>
 #include <rte_cryptodev.h>
 #include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_vdev.h>
-#include <rte_vdev.h>
+#include <rte_pci.h>
+#include <rte_dev.h>
 #include <rte_malloc.h>
 
 #include <ccp_crypto.h>
@@ -176,7 +178,7 @@ cryptodev_ccp_remove(struct rte_vdev_device *dev)
 static int
 cryptodev_ccp_create(const char *name,
 		     struct rte_vdev_device *vdev,
-		     struct rte_crypto_vdev_init_params *init_params)
+		     struct rte_cryptodev_pmd_init_params *init_params)
 {
 	struct rte_cryptodev *dev;
 	struct ccp_private *internals;
@@ -186,10 +188,9 @@ cryptodev_ccp_create(const char *name,
 		snprintf(init_params->name, sizeof(init_params->name),
 				"%s", name);
 
-	dev = rte_cryptodev_vdev_pmd_init(init_params->name,
-			sizeof(struct ccp_private),
-			init_params->socket_id,
-			vdev);
+	dev = rte_cryptodev_pmd_create(init_params->name,
+				       &vdev->device,
+				       init_params);
 	if (dev == NULL) {
 		CCP_LOG_ERR("failed to create cryptodev vdev");
 		goto init_error;
@@ -236,11 +237,12 @@ cryptodev_ccp_probe(struct rte_vdev_device *vdev)
 {
 	int rc = 0;
 	const char *name;
-	struct rte_crypto_vdev_init_params init_params = {
-		CCP_PMD_MAX_QUEUE_PAIRS,
-		RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+	struct rte_cryptodev_pmd_init_params init_params = {
+		"",
+		sizeof(struct ccp_private),
 		rte_socket_id(),
-		{0}
+		CCP_PMD_MAX_QUEUE_PAIRS,
+		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
 	};
 	const char *input_args;
 
@@ -253,7 +255,7 @@ cryptodev_ccp_probe(struct rte_vdev_device *vdev)
 		return -EINVAL;
 
 	input_args = rte_vdev_device_args(vdev);
-	rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
+	rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
 	init_params.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS;
 
 	RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
@@ -275,7 +277,10 @@ static struct rte_vdev_driver cryptodev_ccp_pmd_drv = {
 	.remove = cryptodev_ccp_remove
 };
 
+static struct cryptodev_driver ccp_crypto_drv;
+
 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv);
 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD,
 	"max_nb_queue_pairs=<int> max_nb_sessions=<int> socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_ccp_pmd_drv, ccp_cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv,
+			       ccp_cryptodev_driver_id);
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* Re: [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD
  2017-11-30 13:12 [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD Ravi Kumar
                   ` (9 preceding siblings ...)
  2017-11-30 13:12 ` [PATCH 11/11] crypto/ccp: update CCP PMD code-base Ravi Kumar
@ 2017-12-11 13:39 ` De Lara Guarch, Pablo
  2017-12-11 13:41   ` Kumar, Ravi1
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
  11 siblings, 1 reply; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2017-12-11 13:39 UTC (permalink / raw)
  To: Ravi Kumar, dev

Hi Ravi,

> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ravi Kumar
> Sent: Thursday, November 30, 2017 1:12 PM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH 01/11] cryptodev: add compile support for
> AMD CCP crypto PMD
> 

Thanks for splitting the original patch into multiple ones.
However, in this case, this should get merged to the other patches,
as all the changes are referencing files/folders that do not exist yet.

Apart from this, two comments below.

Thanks,
Pablo

> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
> ---
>  MAINTAINERS             | 6 ++++++
>  config/common_base      | 5 +++++
>  drivers/crypto/Makefile | 1 +
>  mk/rte.app.mk           | 2 ++
>  4 files changed, 14 insertions(+)
> 
> diff --git a/MAINTAINERS b/MAINTAINERS
> index f0baeb4..daac82e 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -588,6 +588,12 @@ M: Pablo de Lara
> <pablo.de.lara.guarch@intel.com>
>  T: git://dpdk.org/next/dpdk-next-crypto
>  F: doc/guides/cryptodevs/features/default.ini
> 
> +AMD CCP Crypto PMD

Remove trailing whitespace here.

> +M: Ravi Kumar <ravi1.kumar@amd.com>
> +F: drivers/crypto/ccp/
> +F: doc/guides/cryptodevs/ccp.rst
> +F: doc/guides/cryptodevs/features/ccp.ini

Add these lines, as you add the files.

> +

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD
  2017-12-11 13:39 ` [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD De Lara Guarch, Pablo
@ 2017-12-11 13:41   ` Kumar, Ravi1
  0 siblings, 0 replies; 131+ messages in thread
From: Kumar, Ravi1 @ 2017-12-11 13:41 UTC (permalink / raw)
  To: De Lara Guarch, Pablo, dev

>Hi Ravi,
>
>> -----Original Message-----
>> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ravi Kumar
>> Sent: Thursday, November 30, 2017 1:12 PM
>> To: dev@dpdk.org
>> Subject: [dpdk-dev] [PATCH 01/11] cryptodev: add compile support for 
>> AMD CCP crypto PMD
>> 
>
>Thanks for splitting the original patch into multiple ones.
>However, in this case, this should get merged to the other patches, as all the changes are referencing files/folders that do not exist yet.
>
>Apart from this, two comments below.
>
>Thanks,
>Pablo
>
>> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
>> ---
>>  MAINTAINERS             | 6 ++++++
>>  config/common_base      | 5 +++++
>>  drivers/crypto/Makefile | 1 +
>>  mk/rte.app.mk           | 2 ++
>>  4 files changed, 14 insertions(+)
>> 
>> diff --git a/MAINTAINERS b/MAINTAINERS index f0baeb4..daac82e 100644
>> --- a/MAINTAINERS
>> +++ b/MAINTAINERS
>> @@ -588,6 +588,12 @@ M: Pablo de Lara
>> <pablo.de.lara.guarch@intel.com>
>>  T: git://dpdk.org/next/dpdk-next-crypto
>>  F: doc/guides/cryptodevs/features/default.ini
>> 
>> +AMD CCP Crypto PMD
>
>Remove trailing whitespace here.
>
>> +M: Ravi Kumar <ravi1.kumar@amd.com>
>> +F: drivers/crypto/ccp/
>> +F: doc/guides/cryptodevs/ccp.rst
>> +F: doc/guides/cryptodevs/features/ccp.ini
>
>Add these lines, as you add the files.
>
>> +
>
>
Hi Pablo,

Thanks for the review comments. Will work on it.

Regards,
Ravi

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH 10/11] test/test: add test for AMD CCP crypto PMD
  2017-11-30 13:12 ` [PATCH 10/11] test/test: add test for AMD CCP crypto PMD Ravi Kumar
@ 2017-12-11 14:27   ` De Lara Guarch, Pablo
  0 siblings, 0 replies; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2017-12-11 14:27 UTC (permalink / raw)
  To: Ravi Kumar, dev



> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ravi Kumar
> Sent: Thursday, November 30, 2017 1:13 PM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH 10/11] test/test: add test for AMD CCP crypto
> PMD
> 
> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>

Hi Ravi,

There is a compilation issue here:

test/test/test_cryptodev_blockcipher.c:127:27: error:
expected expression before '||' token
    driver_id == mrvl_pmd) ||

Also, for crypto tests like this, you should use "test/crypto: "

Thanks,
Pablo

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH 08/11] crypto/ccp: rename CCP crypto driver id
  2017-11-30 13:12 ` [PATCH 08/11] crypto/ccp: rename CCP crypto driver id Ravi Kumar
@ 2017-12-11 14:30   ` De Lara Guarch, Pablo
  0 siblings, 0 replies; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2017-12-11 14:30 UTC (permalink / raw)
  To: Ravi Kumar, dev



> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ravi Kumar
> Sent: Thursday, November 30, 2017 1:13 PM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH 08/11] crypto/ccp: rename CCP crypto driver id
> 
> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>

Hi Ravi,

I think this patch is not necessary, you can just use this new driver id
directly from the beginning of the patchset, right (although I don't see why this is necessary).

Thanks,
Pablo

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH 11/11] crypto/ccp: update CCP PMD code-base
  2017-11-30 13:12 ` [PATCH 11/11] crypto/ccp: update CCP PMD code-base Ravi Kumar
@ 2017-12-11 19:30   ` De Lara Guarch, Pablo
  0 siblings, 0 replies; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2017-12-11 19:30 UTC (permalink / raw)
  To: Ravi Kumar, dev



> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ravi Kumar
> Sent: Thursday, November 30, 2017 1:13 PM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH 11/11] crypto/ccp: update CCP PMD code-base
> 
> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>

It looks like this is an incremental patch for the latest changes made in cryptodev.
This patch should be integrated mostly (if not all), in patch 2, otherwise, that patch won't compile
(the patchset must be able to be compiled for every single patch, so for instance, patch 2 must be able
to be compiled, having only patch 1 applied (so you don't break git bisect).

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH 07/11] doc: add document for AMD CCP crypto PMD
  2017-11-30 13:12 ` [PATCH 07/11] doc: add document for AMD CCP crypto PMD Ravi Kumar
@ 2017-12-11 19:36   ` De Lara Guarch, Pablo
  0 siblings, 0 replies; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2017-12-11 19:36 UTC (permalink / raw)
  To: Ravi Kumar, dev

Hi Ravi,

> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ravi Kumar
> Sent: Thursday, November 30, 2017 1:12 PM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH 07/11] doc: add document for AMD CCP crypto
> PMD
> 
> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
> ---
>  doc/guides/cryptodevs/ccp.rst          | 124
> +++++++++++++++++++++++++++++++++
>  doc/guides/cryptodevs/features/ccp.ini |  57 +++++++++++++++
>  doc/guides/cryptodevs/index.rst        |   1 +
>  3 files changed, 182 insertions(+)
>  create mode 100644 doc/guides/cryptodevs/ccp.rst  create mode 100644
> doc/guides/cryptodevs/features/ccp.ini
> 
> diff --git a/doc/guides/cryptodevs/ccp.rst b/doc/guides/cryptodevs/ccp.rst
> new file mode 100644 index 0000000..b39af1d
> --- /dev/null
> +++ b/doc/guides/cryptodevs/ccp.rst
> @@ -0,0 +1,124 @@
> +.. Copyright(c) 2017 Advanced Micro Devices, Inc.
> +   All rights reserved.
> +
> +   Redistribution and use in source and binary forms, with or without
> +   modification, are permitted provided that the following conditions
> +   are met:
> +
> +   * Redistributions of source code must retain the above copyright
> +   notice, this list of conditions and the following disclaimer.
> +   * Redistributions in binary form must reproduce the above copyright
> +   notice, this list of conditions and the following disclaimer in the
> +   documentation and/or other materials provided with the distribution.
> +   * Neither the name of the copyright holder nor the names of its
> +   contributors may be used to endorse or promote products derived from
> +   this software without specific prior written permission.
> +
> +   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> CONTRIBUTORS
> +   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
> NOT
> +   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
> FITNESS FOR
> +   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> COPYRIGHT
> +   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> INCIDENTAL,
> +   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
> NOT
> +   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
> OF USE,
> +   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
> AND ON ANY
> +   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> +   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
> THE USE
> +   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
> DAMAGE.
> +
> +AMD CCP Poll Mode Driver
> +========================
> +
> +This code provides the initial implementation of the ccp poll mode driver.
> +The CCP poll mode driver library (librte_pmd_ccp) implements support
> +for AMD’s cryptographic co-processor (CCP). The CCP PMD is a virtual
> +crypto poll mode driver which schedules crypto operations to one or
> +more available CCP hardware engines on the platform. The CCP PMD
> +provides poll mode crypto driver support for the following hardware
> accelerator devices::
> +
> +	AMD Cryptographic Co-processor (0x1456)
> +	AMD Cryptographic Co-processor (0x1468)
> +
> +Features
> +--------
> +
> +CCP PMD has support for:
> +
> +Supported cipher algorithms:

A blank line is required before starting a list
(same for the other two lists).

> +* ``RTE_CRYPTO_CIPHER_AES_CBC``
> +* ``RTE_CRYPTO_CIPHER_AES_ECB``
> +* ``RTE_CRYPTO_CIPHER_AES_CTR``
> +* ``RTE_CRYPTO_CIPHER_3DES_CBC``
> +


^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH 05/11] crypto/ccp: add support for CPU based authentication
  2017-11-30 13:12 ` [PATCH 05/11] crypto/ccp: add support for CPU based authentication Ravi Kumar
@ 2017-12-11 19:40   ` De Lara Guarch, Pablo
  0 siblings, 0 replies; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2017-12-11 19:40 UTC (permalink / raw)
  To: Ravi Kumar, dev



> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ravi Kumar
> Sent: Thursday, November 30, 2017 1:12 PM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH 05/11] crypto/ccp: add support for CPU based
> authentication

When enabling this, I got the following error:

In file included from build/include/rte_mempool.h:79:0,
                 from build/include/rte_mbuf.h:65,
                 from build/include/rte_cryptodev_pmd.h:51,
                 from drivers/crypto/ccp/ccp_crypto.c:46:
drivers/crypto/ccp/ccp_crypto.c: In function 'cpu_crypto_auth':
build/include/rte_memcpy.h:367:2: error: array subscript is above array bounds [-Werror=array-bounds]
  rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);

Also, could you add a commit message in this and the other patches?
If the patch is simple enough, a commit message might not be necessary,
but for patches like this one, I think it is quite useful.

Also, I have an extra comment below.

Thanks,
Pablo

> 
> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>

...

> a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c index
> 4d71ec1..1833929 100644
> --- a/drivers/crypto/ccp/ccp_crypto.c
> +++ b/drivers/crypto/ccp/ccp_crypto.c

...
> +static int cpu_crypto_auth(struct rte_crypto_op *op, struct ccp_session
> *sess,
> +			   EVP_MD_CTX *ctx)
> +{
> +	uint8_t *src, *dst;
> +	int srclen, status;
> +	struct rte_mbuf *mbuf_src, *mbuf_dst;
> +	const EVP_MD *algo = NULL;
> +	EVP_PKEY *pkey;
> +
> +	algo_select(sess->auth.algo, &algo);
> +	pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL, sess-
> >auth.key,
> +				    sess->auth.key_length);
> +	mbuf_src = op->sym->m_src;
> +	mbuf_dst = op->sym->m_dst ? op->sym->m_dst : op->sym-
> >m_src;
> +	srclen = op->sym->auth.data.length;
> +	src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
> +				      op->sym->auth.data.offset);
> +
> +	if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
> +		dst = (uint8_t *)rte_pktmbuf_append(mbuf_src,
> +						    sess->auth.digest_length);

There was a change in the previous release, that removed any append in the source mbuf,
to allocate memory for a temporary digest (like in this case). Instead, memory in the queue pair 
structure was reserved. This way, you won't have to worry about not having enough space in the mbuf.

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH 02/11] crypto/ccp: add support for AMD CCP crypto PMD
  2017-11-30 13:12 ` [PATCH 02/11] crypto/ccp: add " Ravi Kumar
@ 2017-12-11 19:50   ` De Lara Guarch, Pablo
  2017-12-12 11:46     ` Kumar, Ravi1
  0 siblings, 1 reply; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2017-12-11 19:50 UTC (permalink / raw)
  To: Ravi Kumar, dev

Hi Ravi,

> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ravi Kumar
> Sent: Thursday, November 30, 2017 1:12 PM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH 02/11] crypto/ccp: add support for AMD CCP
> crypto PMD
> 

As I said in the another patch, you should add a commit message for complex patches like this one.
Another thing that would be useful for improving the readability of the patchset and therefore,
make a review easier, is to split a long patch in smaller patches.

>From what I have seen, you have some patches where you are adding support for CMAC and SHA3
algorithms. I suggest to do the same for the other algorithms that this PMD supports.
So, the first patch would have the skeleton of the PMD, and then you can have a separate patch per algorithm.
Well, not necessarily, a patch per algorithm, but per subset of algorithms.
I would propose one for AES-CBC/ECB/CTR, one for 3DES-CBC, one for AES-GCM
and another one for SHA-x algorithms.

Thanks,
Pablo

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH 02/11] crypto/ccp: add support for AMD CCP crypto PMD
  2017-12-11 19:50   ` De Lara Guarch, Pablo
@ 2017-12-12 11:46     ` Kumar, Ravi1
  0 siblings, 0 replies; 131+ messages in thread
From: Kumar, Ravi1 @ 2017-12-12 11:46 UTC (permalink / raw)
  To: De Lara Guarch, Pablo, dev

>Hi Ravi,
>
>> -----Original Message-----
>> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ravi Kumar
>> Sent: Thursday, November 30, 2017 1:12 PM
>> To: dev@dpdk.org
>> Subject: [dpdk-dev] [PATCH 02/11] crypto/ccp: add support for AMD CCP 
>> crypto PMD
>> 
>
>As I said in the another patch, you should add a commit message for complex patches like this one.
>Another thing that would be useful for improving the readability of the patchset and therefore, make a review easier, is to split a long patch in smaller patches.
>
>From what I have seen, you have some patches where you are adding support for CMAC and SHA3 algorithms. I suggest to do the same for the other algorithms that this PMD supports.
>So, the first patch would have the skeleton of the PMD, and then you can have a separate patch per algorithm.
>Well, not necessarily, a patch per algorithm, but per subset of algorithms.
>I would propose one for AES-CBC/ECB/CTR, one for 3DES-CBC, one for AES-GCM and another one for SHA-x algorithms.
>
>Thanks,
>Pablo
>
Hi Pablo,

Thanks for all the comments. We are working on them.

Regards,
Ravi

^ permalink raw reply	[flat|nested] 131+ messages in thread

* [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support
  2017-11-30 13:12 [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD Ravi Kumar
                   ` (10 preceding siblings ...)
  2017-12-11 13:39 ` [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD De Lara Guarch, Pablo
@ 2018-01-05  9:39 ` Ravi Kumar
  2018-01-05  9:39   ` [PATCH v2 02/20] crypto/ccp: add ccp device initialization and remove Ravi Kumar
                     ` (21 more replies)
  11 siblings, 22 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-05  9:39 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 config/common_base                         |  5 +++
 drivers/crypto/Makefile                    |  1 +
 drivers/crypto/ccp/Makefile                | 55 ++++++++++++++++++++++++++
 drivers/crypto/ccp/rte_ccp_pmd.c           | 62 ++++++++++++++++++++++++++++++
 drivers/crypto/ccp/rte_pmd_ccp_version.map |  4 ++
 mk/rte.app.mk                              |  2 +
 6 files changed, 129 insertions(+)
 create mode 100644 drivers/crypto/ccp/Makefile
 create mode 100644 drivers/crypto/ccp/rte_ccp_pmd.c
 create mode 100644 drivers/crypto/ccp/rte_pmd_ccp_version.map

diff --git a/config/common_base b/config/common_base
index e74febe..88826c8 100644
--- a/config/common_base
+++ b/config/common_base
@@ -557,6 +557,11 @@ CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n
 CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
 
 #
+# Compile PMD for AMD CCP crypto device
+#
+CONFIG_RTE_LIBRTE_PMD_CCP=n
+
+#
 # Compile PMD for Marvell Crypto device
 #
 CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO=n
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 645b696..b378643 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -44,5 +44,6 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += mrvl
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp
 
 include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
new file mode 100644
index 0000000..51c5e5b
--- /dev/null
+++ b/drivers/crypto/ccp/Makefile
@@ -0,0 +1,55 @@
+#
+#   Copyright(c) 2018 Advanced Micro Devices, Inc.
+#   All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+# 	* Redistributions of source code must retain the above copyright
+# 	notice, this list of conditions and the following disclaimer.
+# 	* Redistributions in binary form must reproduce the above copyright
+# 	notice, this list of conditions and the following disclaimer in the
+# 	documentation and/or other materials provided with the distribution.
+# 	* Neither the name of the copyright holder nor the names of its
+# 	contributors may be used to endorse or promote products derived from
+# 	this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_ccp.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += -I$(SRCDIR)
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# external library include paths
+LDLIBS += -lcrypto
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_pci -lrte_bus_pci
+
+# versioning export map
+EXPORT_MAP := rte_pmd_ccp_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
new file mode 100644
index 0000000..6fa14bd
--- /dev/null
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -0,0 +1,62 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_bus_vdev.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+
+uint8_t ccp_cryptodev_driver_id;
+
+/** Remove ccp pmd */
+static int
+cryptodev_ccp_remove(struct rte_vdev_device *dev __rte_unused)
+{
+	return 0;
+}
+
+/** Probe ccp pmd */
+static int
+cryptodev_ccp_probe(struct rte_vdev_device *vdev __rte_unused)
+{
+	return 0;
+}
+
+static struct rte_vdev_driver cryptodev_ccp_pmd_drv = {
+	.probe = cryptodev_ccp_probe,
+	.remove = cryptodev_ccp_remove
+};
+
+static struct cryptodev_driver ccp_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD,
+	"max_nb_queue_pairs=<int> max_nb_sessions=<int> socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv,
+			       ccp_cryptodev_driver_id);
diff --git a/drivers/crypto/ccp/rte_pmd_ccp_version.map b/drivers/crypto/ccp/rte_pmd_ccp_version.map
new file mode 100644
index 0000000..179140f
--- /dev/null
+++ b/drivers/crypto/ccp/rte_pmd_ccp_version.map
@@ -0,0 +1,4 @@
+DPDK_18.02 {
+
+	local: *;
+};
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 6a6a745..0453b7f 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -191,6 +191,8 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC)   += -lrte_bus_dpaa
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC)   += -lrte_pmd_dpaa_sec
 endif # CONFIG_RTE_LIBRTE_DPAA_BUS
 
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_CCP)   += -lrte_pmd_ccp -lcrypto
+
 endif # CONFIG_RTE_LIBRTE_CRYPTODEV
 
 ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y)
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v2 02/20] crypto/ccp: add ccp device initialization and remove
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
@ 2018-01-05  9:39   ` Ravi Kumar
  2018-01-05  9:39   ` [PATCH v2 03/20] crypto/ccp: add basic pmd ops support for start, stop, config etc Ravi Kumar
                     ` (20 subsequent siblings)
  21 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-05  9:39 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

CCP PMD is a virtual crypto PMD which schedules all the
available actual hardware engines. The PMD creates a
linked list of all CCP engines which will be scheduled
in a round-robin fashion to the CPU core requesting crypto
operations.
---
 drivers/crypto/ccp/Makefile          |   3 +
 drivers/crypto/ccp/ccp_dev.c         | 787 +++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_dev.h         | 310 ++++++++++++++
 drivers/crypto/ccp/ccp_pci.c         | 262 ++++++++++++
 drivers/crypto/ccp/ccp_pci.h         |  53 +++
 drivers/crypto/ccp/ccp_pmd_ops.c     |  55 +++
 drivers/crypto/ccp/ccp_pmd_private.h |  82 ++++
 drivers/crypto/ccp/rte_ccp_pmd.c     | 151 ++++++-
 8 files changed, 1701 insertions(+), 2 deletions(-)
 create mode 100644 drivers/crypto/ccp/ccp_dev.c
 create mode 100644 drivers/crypto/ccp/ccp_dev.h
 create mode 100644 drivers/crypto/ccp/ccp_pci.c
 create mode 100644 drivers/crypto/ccp/ccp_pci.h
 create mode 100644 drivers/crypto/ccp/ccp_pmd_ops.c
 create mode 100644 drivers/crypto/ccp/ccp_pmd_private.h

diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 51c5e5b..5e58c31 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -51,5 +51,8 @@ EXPORT_MAP := rte_pmd_ccp_version.map
 
 # library source files
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
new file mode 100644
index 0000000..5af2b49
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -0,0 +1,787 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <sys/file.h>
+#include <unistd.h>
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+
+#include "ccp_dev.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
+static int ccp_dev_id;
+
+static const struct rte_memzone *
+ccp_queue_dma_zone_reserve(const char *queue_name,
+			   uint32_t queue_size,
+			   int socket_id)
+{
+	const struct rte_memzone *mz;
+	unsigned int memzone_flags = 0;
+	const struct rte_memseg *ms;
+
+	mz = rte_memzone_lookup(queue_name);
+	if (mz != 0)
+		return mz;
+
+	ms = rte_eal_get_physmem_layout();
+	switch (ms[0].hugepage_sz) {
+	case(RTE_PGSIZE_2M):
+		memzone_flags = RTE_MEMZONE_2MB;
+		break;
+	case(RTE_PGSIZE_1G):
+		memzone_flags = RTE_MEMZONE_1GB;
+		break;
+	case(RTE_PGSIZE_16M):
+		memzone_flags = RTE_MEMZONE_16MB;
+		break;
+	case(RTE_PGSIZE_16G):
+		memzone_flags = RTE_MEMZONE_16GB;
+		break;
+	default:
+		memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
+	}
+
+	return rte_memzone_reserve_aligned(queue_name,
+					   queue_size,
+					   socket_id,
+					   memzone_flags,
+					   queue_size);
+}
+
+/* bitmap support apis */
+static inline void
+ccp_set_bit(unsigned long *bitmap, int n)
+{
+	__sync_fetch_and_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)));
+}
+
+static inline void
+ccp_clear_bit(unsigned long *bitmap, int n)
+{
+	__sync_fetch_and_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)));
+}
+
+static inline uint32_t
+ccp_get_bit(unsigned long *bitmap, int n)
+{
+	return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0);
+}
+
+
+static inline uint32_t
+ccp_ffz(unsigned long word)
+{
+	unsigned long first_zero;
+
+	first_zero = __builtin_ffsl(~word);
+	return first_zero ? (first_zero - 1) :
+		BITS_PER_WORD;
+}
+
+static inline uint32_t
+ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit)
+{
+	uint32_t i;
+	uint32_t nwords = 0;
+
+	nwords = (limit - 1) / BITS_PER_WORD + 1;
+	for (i = 0; i < nwords; i++) {
+		if (addr[i] == 0UL)
+			return i * BITS_PER_WORD;
+		if (addr[i] < ~(0UL))
+			break;
+	}
+	return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]);
+}
+
+static void
+ccp_bitmap_set(unsigned long *map, unsigned int start, int len)
+{
+	unsigned long *p = map + WORD_OFFSET(start);
+	const unsigned int size = start + len;
+	int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD);
+	unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+	while (len - bits_to_set >= 0) {
+		*p |= mask_to_set;
+		len -= bits_to_set;
+		bits_to_set = BITS_PER_WORD;
+		mask_to_set = ~0UL;
+		p++;
+	}
+	if (len) {
+		mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size);
+		*p |= mask_to_set;
+	}
+}
+
+static void
+ccp_bitmap_clear(unsigned long *map, unsigned int start, int len)
+{
+	unsigned long *p = map + WORD_OFFSET(start);
+	const unsigned int size = start + len;
+	int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD);
+	unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+	while (len - bits_to_clear >= 0) {
+		*p &= ~mask_to_clear;
+		len -= bits_to_clear;
+		bits_to_clear = BITS_PER_WORD;
+		mask_to_clear = ~0UL;
+		p++;
+	}
+	if (len) {
+		mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size);
+		*p &= ~mask_to_clear;
+	}
+}
+
+
+static unsigned long
+_ccp_find_next_bit(const unsigned long *addr,
+		   unsigned long nbits,
+		   unsigned long start,
+		   unsigned long invert)
+{
+	unsigned long tmp;
+
+	if (!nbits || start >= nbits)
+		return nbits;
+
+	tmp = addr[start / BITS_PER_WORD] ^ invert;
+
+	/* Handle 1st word. */
+	tmp &= CCP_BITMAP_FIRST_WORD_MASK(start);
+	start = ccp_round_down(start, BITS_PER_WORD);
+
+	while (!tmp) {
+		start += BITS_PER_WORD;
+		if (start >= nbits)
+			return nbits;
+
+		tmp = addr[start / BITS_PER_WORD] ^ invert;
+	}
+
+	return RTE_MIN(start + (ffs(tmp) - 1), nbits);
+}
+
+static unsigned long
+ccp_find_next_bit(const unsigned long *addr,
+		  unsigned long size,
+		  unsigned long offset)
+{
+	return _ccp_find_next_bit(addr, size, offset, 0UL);
+}
+
+static unsigned long
+ccp_find_next_zero_bit(const unsigned long *addr,
+		       unsigned long size,
+		       unsigned long offset)
+{
+	return _ccp_find_next_bit(addr, size, offset, ~0UL);
+}
+
+/**
+ * bitmap_find_next_zero_area - find a contiguous aligned zero area
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ */
+static unsigned long
+ccp_bitmap_find_next_zero_area(unsigned long *map,
+			       unsigned long size,
+			       unsigned long start,
+			       unsigned int nr)
+{
+	unsigned long index, end, i;
+
+again:
+	index = ccp_find_next_zero_bit(map, size, start);
+
+	end = index + nr;
+	if (end > size)
+		return end;
+	i = ccp_find_next_bit(map, end, index);
+	if (i < end) {
+		start = i + 1;
+		goto again;
+	}
+	return index;
+}
+
+static uint32_t
+ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count)
+{
+	struct ccp_device *ccp;
+	int start;
+
+	/* First look at the map for the queue */
+	if (cmd_q->lsb >= 0) {
+		start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap,
+								 LSB_SIZE, 0,
+								 count);
+		if (start < LSB_SIZE) {
+			ccp_bitmap_set(cmd_q->lsbmap, start, count);
+			return start + cmd_q->lsb * LSB_SIZE;
+		}
+	}
+
+	/* try to get an entry from the shared blocks */
+	ccp = cmd_q->dev;
+
+	rte_spinlock_lock(&ccp->lsb_lock);
+
+	start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap,
+						    MAX_LSB_CNT * LSB_SIZE,
+						    0, count);
+	if (start <= MAX_LSB_CNT * LSB_SIZE) {
+		ccp_bitmap_set(ccp->lsbmap, start, count);
+		rte_spinlock_unlock(&ccp->lsb_lock);
+		return start * LSB_ITEM_SIZE;
+	}
+	CCP_LOG_ERR("NO LSBs available");
+
+	rte_spinlock_unlock(&ccp->lsb_lock);
+
+	return 0;
+}
+
+static void __rte_unused
+ccp_lsb_free(struct ccp_queue *cmd_q,
+	     unsigned int start,
+	     unsigned int count)
+{
+	int lsbno = start / LSB_SIZE;
+
+	if (!start)
+		return;
+
+	if (cmd_q->lsb == lsbno) {
+		/* An entry from the private LSB */
+		ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
+	} else {
+		/* From the shared LSBs */
+		struct ccp_device *ccp = cmd_q->dev;
+
+		rte_spinlock_lock(&ccp->lsb_lock);
+		ccp_bitmap_clear(ccp->lsbmap, start, count);
+		rte_spinlock_unlock(&ccp->lsb_lock);
+	}
+}
+
+static int
+ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status)
+{
+	int q_mask = 1 << cmd_q->id;
+	int weight = 0;
+	int j;
+
+	/* Build a bit mask to know which LSBs
+	 * this queue has access to.
+	 * Don't bother with segment 0
+	 * as it has special
+	 * privileges.
+	 */
+	cmd_q->lsbmask = 0;
+	status >>= LSB_REGION_WIDTH;
+	for (j = 1; j < MAX_LSB_CNT; j++) {
+		if (status & q_mask)
+			ccp_set_bit(&cmd_q->lsbmask, j);
+
+		status >>= LSB_REGION_WIDTH;
+	}
+
+	for (j = 0; j < MAX_LSB_CNT; j++)
+		if (ccp_get_bit(&cmd_q->lsbmask, j))
+			weight++;
+
+	printf("Queue %d can access %d LSB regions  of mask  %lu\n",
+	       (int)cmd_q->id, weight, cmd_q->lsbmask);
+
+	return weight ? 0 : -EINVAL;
+}
+
+static int
+ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
+			     int lsb_cnt, int n_lsbs,
+			     unsigned long *lsb_pub)
+{
+	unsigned long qlsb = 0;
+	int bitno = 0;
+	int qlsb_wgt = 0;
+	int i, j;
+
+	/* For each queue:
+	 * If the count of potential LSBs available to a queue matches the
+	 * ordinal given to us in lsb_cnt:
+	 * Copy the mask of possible LSBs for this queue into "qlsb";
+	 * For each bit in qlsb, see if the corresponding bit in the
+	 * aggregation mask is set; if so, we have a match.
+	 *     If we have a match, clear the bit in the aggregation to
+	 *     mark it as no longer available.
+	 *     If there is no match, clear the bit in qlsb and keep looking.
+	 */
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		struct ccp_queue *cmd_q = &ccp->cmd_q[i];
+
+		qlsb_wgt = 0;
+		for (j = 0; j < MAX_LSB_CNT; j++)
+			if (ccp_get_bit(&cmd_q->lsbmask, j))
+				qlsb_wgt++;
+
+		if (qlsb_wgt == lsb_cnt) {
+			qlsb = cmd_q->lsbmask;
+
+			bitno = ffs(qlsb) - 1;
+			while (bitno < MAX_LSB_CNT) {
+				if (ccp_get_bit(lsb_pub, bitno)) {
+					/* We found an available LSB
+					 * that this queue can access
+					 */
+					cmd_q->lsb = bitno;
+					ccp_clear_bit(lsb_pub, bitno);
+					break;
+				}
+				ccp_clear_bit(&qlsb, bitno);
+				bitno = ffs(qlsb) - 1;
+			}
+			if (bitno >= MAX_LSB_CNT)
+				return -EINVAL;
+			n_lsbs--;
+		}
+	}
+	return n_lsbs;
+}
+
+/* For each queue, from the most- to least-constrained:
+ * find an LSB that can be assigned to the queue. If there are N queues that
+ * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
+ * dedicated LSB. Remaining LSB regions become a shared resource.
+ * If we have fewer LSBs than queues, all LSB regions become shared
+ * resources.
+ */
+static int
+ccp_assign_lsbs(struct ccp_device *ccp)
+{
+	unsigned long lsb_pub = 0, qlsb = 0;
+	int n_lsbs = 0;
+	int bitno;
+	int i, lsb_cnt;
+	int rc = 0;
+
+	rte_spinlock_init(&ccp->lsb_lock);
+
+	/* Create an aggregate bitmap to get a total count of available LSBs */
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		lsb_pub |= ccp->cmd_q[i].lsbmask;
+
+	for (i = 0; i < MAX_LSB_CNT; i++)
+		if (ccp_get_bit(&lsb_pub, i))
+			n_lsbs++;
+
+	if (n_lsbs >= ccp->cmd_q_count) {
+		/* We have enough LSBS to give every queue a private LSB.
+		 * Brute force search to start with the queues that are more
+		 * constrained in LSB choice. When an LSB is privately
+		 * assigned, it is removed from the public mask.
+		 * This is an ugly N squared algorithm with some optimization.
+		 */
+		for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
+		     lsb_cnt++) {
+			rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
+							  &lsb_pub);
+			if (rc < 0)
+				return -EINVAL;
+			n_lsbs = rc;
+		}
+	}
+
+	rc = 0;
+	/* What's left of the LSBs, according to the public mask, now become
+	 * shared. Any zero bits in the lsb_pub mask represent an LSB region
+	 * that can't be used as a shared resource, so mark the LSB slots for
+	 * them as "in use".
+	 */
+	qlsb = lsb_pub;
+	bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+	while (bitno < MAX_LSB_CNT) {
+		ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
+		ccp_set_bit(&qlsb, bitno);
+		bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+	}
+
+	return rc;
+}
+
+static int
+ccp_add_device(struct ccp_device *dev, int type)
+{
+	int i;
+	uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi;
+	uint64_t status;
+	struct ccp_queue *cmd_q;
+	const struct rte_memzone *q_mz;
+	void *vaddr;
+
+	if (dev == NULL)
+		return -1;
+
+	dev->id = ccp_dev_id++;
+	dev->qidx = 0;
+	vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+	if (type == CCP_VERSION_5B) {
+		CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57);
+		CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003);
+		for (i = 0; i < 12; i++) {
+			CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET,
+				      CCP_READ_REG(vaddr, TRNG_OUT_REG));
+		}
+		CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F);
+		CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D);
+		CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000);
+
+		CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF);
+		CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF);
+
+		CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
+	}
+	CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
+
+	/* Copy the private LSB mask to the public registers */
+	status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
+	status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET);
+	CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo);
+	CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi);
+	status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo);
+
+	dev->cmd_q_count = 0;
+	/* Find available queues */
+	qmr = CCP_READ_REG(vaddr, Q_MASK_REG);
+	for (i = 0; i < MAX_HW_QUEUES; i++) {
+		if (!(qmr & (1 << i)))
+			continue;
+		cmd_q = &dev->cmd_q[dev->cmd_q_count++];
+		cmd_q->dev = dev;
+		cmd_q->id = i;
+		cmd_q->qidx = 0;
+		cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+
+		cmd_q->reg_base = (uint8_t *)vaddr +
+			CMD_Q_STATUS_INCR * (i + 1);
+
+		/* CCP queue memory */
+		snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name),
+			 "%s_%d_%s_%d_%s",
+			 "ccp_dev",
+			 (int)dev->id, "queue",
+			 (int)cmd_q->id, "mem");
+		q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name,
+						  cmd_q->qsize, SOCKET_ID_ANY);
+		cmd_q->qbase_addr = (void *)q_mz->addr;
+		cmd_q->qbase_desc = (void *)q_mz->addr;
+		cmd_q->qbase_phys_addr =  q_mz->phys_addr;
+
+		cmd_q->qcontrol = 0;
+		/* init control reg to zero */
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol);
+
+		/* Disable the interrupts */
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00);
+		CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE);
+		CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE);
+
+		/* Clear the interrupts */
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE,
+			      ALL_INTERRUPTS);
+
+		/* Configure size of each virtual queue accessible to host */
+		cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT);
+		cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT;
+
+		dma_addr_lo = low32_value(cmd_q->qbase_phys_addr);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+			      (uint32_t)dma_addr_lo);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE,
+			      (uint32_t)dma_addr_lo);
+
+		dma_addr_hi = high32_value(cmd_q->qbase_phys_addr);
+		cmd_q->qcontrol |= (dma_addr_hi << 16);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol);
+
+		/* create LSB Mask map */
+		if (ccp_find_lsb_regions(cmd_q, status))
+			CCP_LOG_ERR("queue doesn't have lsb regions");
+		cmd_q->lsb = -1;
+
+		rte_atomic64_init(&cmd_q->free_slots);
+		rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1));
+		/* unused slot barrier b/w H&T */
+	}
+
+	if (ccp_assign_lsbs(dev))
+		CCP_LOG_ERR("Unable to assign lsb region");
+
+	/* pre-allocate LSB slots */
+	for (i = 0; i < dev->cmd_q_count; i++) {
+		dev->cmd_q[i].sb_key =
+			ccp_lsb_alloc(&dev->cmd_q[i], 1);
+		dev->cmd_q[i].sb_iv =
+			ccp_lsb_alloc(&dev->cmd_q[i], 1);
+		dev->cmd_q[i].sb_sha =
+			ccp_lsb_alloc(&dev->cmd_q[i], 2);
+		dev->cmd_q[i].sb_hmac =
+			ccp_lsb_alloc(&dev->cmd_q[i], 2);
+	}
+
+	TAILQ_INSERT_TAIL(&ccp_list, dev, next);
+	return 0;
+}
+
+static void
+ccp_remove_device(struct ccp_device *dev)
+{
+	if (dev == NULL)
+		return;
+
+	TAILQ_REMOVE(&ccp_list, dev, next);
+}
+
+static int
+is_ccp_device(const char *dirname,
+	      const struct rte_pci_id *ccp_id,
+	      int *type)
+{
+	char filename[PATH_MAX];
+	const struct rte_pci_id *id;
+	uint16_t vendor, device_id;
+	int i;
+	unsigned long tmp;
+
+	/* get vendor id */
+	snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		return 0;
+	vendor = (uint16_t)tmp;
+
+	/* get device id */
+	snprintf(filename, sizeof(filename), "%s/device", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		return 0;
+	device_id = (uint16_t)tmp;
+
+	for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) {
+		if (vendor == id->vendor_id &&
+		    device_id == id->device_id) {
+			*type = i;
+			return 1; /* Matched device */
+		}
+	}
+	return 0;
+}
+
+static int
+ccp_probe_device(const char *dirname, uint16_t domain,
+		 uint8_t bus, uint8_t devid,
+		 uint8_t function, int ccp_type)
+{
+	struct ccp_device *ccp_dev = NULL;
+	struct rte_pci_device *pci;
+	char filename[PATH_MAX];
+	unsigned long tmp;
+	int uio_fd = -1, i, uio_num;
+	char uio_devname[PATH_MAX];
+	void *map_addr;
+
+	ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
+			      RTE_CACHE_LINE_SIZE);
+	if (ccp_dev == NULL)
+		goto fail;
+	pci = &(ccp_dev->pci);
+
+	pci->addr.domain = domain;
+	pci->addr.bus = bus;
+	pci->addr.devid = devid;
+	pci->addr.function = function;
+
+	/* get vendor id */
+	snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.vendor_id = (uint16_t)tmp;
+
+	/* get device id */
+	snprintf(filename, sizeof(filename), "%s/device", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.device_id = (uint16_t)tmp;
+
+	/* get subsystem_vendor id */
+	snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
+			dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.subsystem_vendor_id = (uint16_t)tmp;
+
+	/* get subsystem_device id */
+	snprintf(filename, sizeof(filename), "%s/subsystem_device",
+			dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.subsystem_device_id = (uint16_t)tmp;
+
+	/* get class_id */
+	snprintf(filename, sizeof(filename), "%s/class",
+			dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	/* the least 24 bits are valid: class, subclass, program interface */
+	pci->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
+
+	/* parse resources */
+	snprintf(filename, sizeof(filename), "%s/resource", dirname);
+	if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
+		goto fail;
+
+	uio_num = ccp_find_uio_devname(dirname);
+	if (uio_num < 0) {
+		/*
+		 * It may take time for uio device to appear,
+		 * wait  here and try again
+		 */
+		usleep(100000);
+		uio_num = ccp_find_uio_devname(dirname);
+		if (uio_num < 0)
+			goto fail;
+	}
+	snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
+
+	uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK);
+	if (uio_fd < 0)
+		goto fail;
+	if (flock(uio_fd, LOCK_EX | LOCK_NB))
+		goto fail;
+
+	/* Map the PCI memory resource of device */
+	for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+
+		char devname[PATH_MAX];
+		int res_fd;
+
+		if (pci->mem_resource[i].phys_addr == 0)
+			continue;
+		snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
+		res_fd = open(devname, O_RDWR);
+		if (res_fd < 0)
+			goto fail;
+		map_addr = mmap(NULL, pci->mem_resource[i].len,
+				PROT_READ | PROT_WRITE,
+				MAP_SHARED, res_fd, 0);
+		if (map_addr == MAP_FAILED)
+			goto fail;
+
+		pci->mem_resource[i].addr = map_addr;
+	}
+
+	/* device is valid, add in list */
+	if (ccp_add_device(ccp_dev, ccp_type)) {
+		ccp_remove_device(ccp_dev);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	CCP_LOG_ERR("CCP Device probe failed");
+	if (uio_fd > 0)
+		close(uio_fd);
+	if (ccp_dev)
+		rte_free(ccp_dev);
+	return -1;
+}
+
+int
+ccp_probe_devices(const struct rte_pci_id *ccp_id)
+{
+	int dev_cnt = 0;
+	int ccp_type = 0;
+	struct dirent *d;
+	DIR *dir;
+	int ret = 0;
+	int module_idx = 0;
+	uint16_t domain;
+	uint8_t bus, devid, function;
+	char dirname[PATH_MAX];
+
+	module_idx = ccp_check_pci_uio_module();
+	if (module_idx < 0)
+		return -1;
+
+	TAILQ_INIT(&ccp_list);
+	dir = opendir(SYSFS_PCI_DEVICES);
+	if (dir == NULL)
+		return -1;
+	while ((d = readdir(dir)) != NULL) {
+		if (d->d_name[0] == '.')
+			continue;
+		if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name),
+					&domain, &bus, &devid, &function) != 0)
+			continue;
+		snprintf(dirname, sizeof(dirname), "%s/%s",
+			     SYSFS_PCI_DEVICES, d->d_name);
+		if (is_ccp_device(dirname, ccp_id, &ccp_type)) {
+			printf("CCP : Detected CCP device with ID = 0x%x\n",
+			       ccp_id[ccp_type].device_id);
+			ret = ccp_probe_device(dirname, domain, bus, devid,
+					       function, ccp_type);
+			if (ret == 0)
+				dev_cnt++;
+		}
+	}
+	closedir(dir);
+	return dev_cnt;
+}
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
new file mode 100644
index 0000000..fe05bf0
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -0,0 +1,310 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CCP_DEV_H_
+#define _CCP_DEV_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_bus_pci.h>
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+/**< CCP sspecific */
+#define MAX_HW_QUEUES                   5
+
+/**< CCP Register Mappings */
+#define Q_MASK_REG                      0x000
+#define TRNG_OUT_REG                    0x00c
+
+/* CCP Version 5 Specifics */
+#define CMD_QUEUE_MASK_OFFSET		0x00
+#define	CMD_QUEUE_PRIO_OFFSET		0x04
+#define CMD_REQID_CONFIG_OFFSET		0x08
+#define	CMD_CMD_TIMEOUT_OFFSET		0x10
+#define LSB_PUBLIC_MASK_LO_OFFSET	0x18
+#define LSB_PUBLIC_MASK_HI_OFFSET	0x1C
+#define LSB_PRIVATE_MASK_LO_OFFSET	0x20
+#define LSB_PRIVATE_MASK_HI_OFFSET	0x24
+
+#define CMD_Q_CONTROL_BASE		0x0000
+#define CMD_Q_TAIL_LO_BASE		0x0004
+#define CMD_Q_HEAD_LO_BASE		0x0008
+#define CMD_Q_INT_ENABLE_BASE		0x000C
+#define CMD_Q_INTERRUPT_STATUS_BASE	0x0010
+
+#define CMD_Q_STATUS_BASE		0x0100
+#define CMD_Q_INT_STATUS_BASE		0x0104
+
+#define	CMD_CONFIG_0_OFFSET		0x6000
+#define	CMD_TRNG_CTL_OFFSET		0x6008
+#define	CMD_AES_MASK_OFFSET		0x6010
+#define	CMD_CLK_GATE_CTL_OFFSET		0x603C
+
+/* Address offset between two virtual queue registers */
+#define CMD_Q_STATUS_INCR		0x1000
+
+/* Bit masks */
+#define CMD_Q_RUN			0x1
+#define CMD_Q_SIZE			0x1F
+#define CMD_Q_SHIFT			3
+#define COMMANDS_PER_QUEUE		2048
+
+#define QUEUE_SIZE_VAL                  ((ffs(COMMANDS_PER_QUEUE) - 2) & \
+					 CMD_Q_SIZE)
+#define Q_DESC_SIZE                     sizeof(struct ccp_desc)
+#define Q_SIZE(n)                       (COMMANDS_PER_QUEUE*(n))
+
+#define INT_COMPLETION                  0x1
+#define INT_ERROR                       0x2
+#define INT_QUEUE_STOPPED               0x4
+#define ALL_INTERRUPTS                  (INT_COMPLETION| \
+					 INT_ERROR| \
+					 INT_QUEUE_STOPPED)
+
+#define LSB_REGION_WIDTH                5
+#define MAX_LSB_CNT                     8
+
+#define LSB_SIZE                        16
+#define LSB_ITEM_SIZE                   32
+#define SLSB_MAP_SIZE                   (MAX_LSB_CNT * LSB_SIZE)
+
+/* bitmap */
+enum {
+	BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT
+};
+
+#define WORD_OFFSET(b) ((b) / BITS_PER_WORD)
+#define BIT_OFFSET(b)  ((b) % BITS_PER_WORD)
+
+#define CCP_DIV_ROUND_UP(n, d)  (((n) + (d) - 1) / (d))
+#define CCP_BITMAP_SIZE(nr) \
+	CCP_DIV_ROUND_UP(nr, CHAR_BIT * sizeof(unsigned long))
+
+#define CCP_BITMAP_FIRST_WORD_MASK(start) \
+	(~0UL << ((start) & (BITS_PER_WORD - 1)))
+#define CCP_BITMAP_LAST_WORD_MASK(nbits) \
+	(~0UL >> (-(nbits) & (BITS_PER_WORD - 1)))
+
+#define __ccp_round_mask(x, y) ((typeof(x))((y)-1))
+#define ccp_round_down(x, y) ((x) & ~__ccp_round_mask(x, y))
+
+/** CCP registers Write/Read */
+
+static inline void ccp_pci_reg_write(void *base, int offset,
+				     uint32_t value)
+{
+	volatile void *reg_addr = ((uint8_t *)base + offset);
+
+	rte_write32((rte_cpu_to_le_32(value)), reg_addr);
+}
+
+static inline uint32_t ccp_pci_reg_read(void *base, int offset)
+{
+	volatile void *reg_addr = ((uint8_t *)base + offset);
+
+	return rte_le_to_cpu_32(rte_read32(reg_addr));
+}
+
+#define CCP_READ_REG(hw_addr, reg_offset) \
+	ccp_pci_reg_read(hw_addr, reg_offset)
+
+#define CCP_WRITE_REG(hw_addr, reg_offset, value) \
+	ccp_pci_reg_write(hw_addr, reg_offset, value)
+
+TAILQ_HEAD(ccp_list, ccp_device);
+
+extern struct ccp_list ccp_list;
+
+/**
+ * CCP device version
+ */
+enum ccp_device_version {
+	CCP_VERSION_5A = 0,
+	CCP_VERSION_5B,
+};
+
+/**
+ * A structure describing a CCP command queue.
+ */
+struct ccp_queue {
+	struct ccp_device *dev;
+	char memz_name[RTE_MEMZONE_NAMESIZE];
+
+	rte_atomic64_t free_slots;
+	/**< available free slots updated from enq/deq calls */
+
+	/* Queue identifier */
+	uint64_t id;	/**< queue id */
+	uint64_t qidx;	/**< queue index */
+	uint64_t qsize;	/**< queue size */
+
+	/* Queue address */
+	struct ccp_desc *qbase_desc;
+	void *qbase_addr;
+	phys_addr_t qbase_phys_addr;
+	/**< queue-page registers addr */
+	void *reg_base;
+
+	uint32_t qcontrol;
+	/**< queue ctrl reg */
+
+	int lsb;
+	/**< lsb region assigned to queue */
+	unsigned long lsbmask;
+	/**< lsb regions queue can access */
+	unsigned long lsbmap[CCP_BITMAP_SIZE(LSB_SIZE)];
+	/**< all lsb resources which queue is using */
+	uint32_t sb_key;
+	/**< lsb assigned for queue */
+	uint32_t sb_iv;
+	/**< lsb assigned for iv */
+	uint32_t sb_sha;
+	/**< lsb assigned for sha ctx */
+	uint32_t sb_hmac;
+	/**< lsb assigned for hmac ctx */
+} ____cacheline_aligned;
+
+/**
+ * A structure describing a CCP device.
+ */
+struct ccp_device {
+	TAILQ_ENTRY(ccp_device) next;
+	int id;
+	/**< ccp dev id on platform */
+	struct ccp_queue cmd_q[MAX_HW_QUEUES];
+	/**< ccp queue */
+	int cmd_q_count;
+	/**< no. of ccp Queues */
+	struct rte_pci_device pci;
+	/**< ccp pci identifier */
+	unsigned long lsbmap[CCP_BITMAP_SIZE(SLSB_MAP_SIZE)];
+	/**< shared lsb mask of ccp */
+	rte_spinlock_t lsb_lock;
+	/**< protection for shared lsb region allocation */
+	int qidx;
+	/**< current queue index */
+} __rte_cache_aligned;
+
+/**
+ * descriptor for version 5 CPP commands
+ * 8 32-bit words:
+ * word 0: function; engine; control bits
+ * word 1: length of source data
+ * word 2: low 32 bits of source pointer
+ * word 3: upper 16 bits of source pointer; source memory type
+ * word 4: low 32 bits of destination pointer
+ * word 5: upper 16 bits of destination pointer; destination memory
+ * type
+ * word 6: low 32 bits of key pointer
+ * word 7: upper 16 bits of key pointer; key memory type
+ */
+struct dword0 {
+	uint32_t soc:1;
+	uint32_t ioc:1;
+	uint32_t rsvd1:1;
+	uint32_t init:1;
+	uint32_t eom:1;
+	uint32_t function:15;
+	uint32_t engine:4;
+	uint32_t prot:1;
+	uint32_t rsvd2:7;
+};
+
+struct dword3 {
+	uint32_t src_hi:16;
+	uint32_t src_mem:2;
+	uint32_t lsb_cxt_id:8;
+	uint32_t rsvd1:5;
+	uint32_t fixed:1;
+};
+
+union dword4 {
+	uint32_t dst_lo;	/* NON-SHA */
+	uint32_t sha_len_lo;	/* SHA */
+};
+
+union dword5 {
+	struct {
+		uint32_t dst_hi:16;
+		uint32_t dst_mem:2;
+		uint32_t rsvd1:13;
+		uint32_t fixed:1;
+	}
+	fields;
+	uint32_t sha_len_hi;
+};
+
+struct dword7 {
+	uint32_t key_hi:16;
+	uint32_t key_mem:2;
+	uint32_t rsvd1:14;
+};
+
+struct ccp_desc {
+	struct dword0 dw0;
+	uint32_t length;
+	uint32_t src_lo;
+	struct dword3 dw3;
+	union dword4 dw4;
+	union dword5 dw5;
+	uint32_t key_lo;
+	struct dword7 dw7;
+};
+
+static inline uint32_t
+low32_value(unsigned long addr)
+{
+	return ((uint64_t)addr) & 0x0ffffffff;
+}
+
+static inline uint32_t
+high32_value(unsigned long addr)
+{
+	return ((uint64_t)addr >> 32) & 0x00000ffff;
+}
+
+/**
+ * Detect ccp platform and initialize all ccp devices
+ *
+ * @param ccp_id rte_pci_id list for supported CCP devices
+ * @return no. of successfully initialized CCP devices
+ */
+int ccp_probe_devices(const struct rte_pci_id *ccp_id);
+
+#endif /* _CCP_DEV_H_ */
diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c
new file mode 100644
index 0000000..ddf4b49
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pci.c
@@ -0,0 +1,262 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_string_fns.h>
+
+#include "ccp_pci.h"
+
+static const char * const uio_module_names[] = {
+	"igb_uio",
+	"uio_pci_generic",
+};
+
+int
+ccp_check_pci_uio_module(void)
+{
+	FILE *fp;
+	int i;
+	char buf[BUFSIZ];
+
+	fp = fopen(PROC_MODULES, "r");
+	if (fp == NULL)
+		return -1;
+	i = 0;
+	while (uio_module_names[i] != NULL) {
+		while (fgets(buf, sizeof(buf), fp) != NULL) {
+			if (!strncmp(buf, uio_module_names[i],
+				     strlen(uio_module_names[i])))
+				return i;
+		}
+		i++;
+		rewind(fp);
+	}
+	printf("Insert igb_uio or uio_pci_generic kernel module(s)");
+	return -1;/* uio not inserted */
+}
+
+/*
+ * split up a pci address into its constituent parts.
+ */
+int
+ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+			  uint8_t *bus, uint8_t *devid, uint8_t *function)
+{
+	/* first split on ':' */
+	union splitaddr {
+		struct {
+			char *domain;
+			char *bus;
+			char *devid;
+			char *function;
+		};
+		char *str[PCI_FMT_NVAL];
+		/* last element-separator is "." not ":" */
+	} splitaddr;
+
+	char *buf_copy = strndup(buf, bufsize);
+
+	if (buf_copy == NULL)
+		return -1;
+
+	if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':')
+			!= PCI_FMT_NVAL - 1)
+		goto error;
+	/* final split is on '.' between devid and function */
+	splitaddr.function = strchr(splitaddr.devid, '.');
+	if (splitaddr.function == NULL)
+		goto error;
+	*splitaddr.function++ = '\0';
+
+	/* now convert to int values */
+	errno = 0;
+	*domain = (uint8_t)strtoul(splitaddr.domain, NULL, 16);
+	*bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16);
+	*devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16);
+	*function = (uint8_t)strtoul(splitaddr.function, NULL, 10);
+	if (errno != 0)
+		goto error;
+
+	free(buf_copy); /* free the copy made with strdup */
+	return 0;
+error:
+	free(buf_copy);
+	return -1;
+}
+
+int
+ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val)
+{
+	FILE *f;
+	char buf[BUFSIZ];
+	char *end = NULL;
+
+	f = fopen(filename, "r");
+	if (f == NULL)
+		return -1;
+	if (fgets(buf, sizeof(buf), f) == NULL) {
+		fclose(f);
+		return -1;
+	}
+	*val = strtoul(buf, &end, 0);
+	if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
+		fclose(f);
+		return -1;
+	}
+	fclose(f);
+	return 0;
+}
+
+/** IO resource type: */
+#define IORESOURCE_IO         0x00000100
+#define IORESOURCE_MEM        0x00000200
+
+/* parse one line of the "resource" sysfs file (note that the 'line'
+ * string is modified)
+ */
+static int
+ccp_pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr,
+				 uint64_t *end_addr, uint64_t *flags)
+{
+	union pci_resource_info {
+		struct {
+			char *phys_addr;
+			char *end_addr;
+			char *flags;
+		};
+		char *ptrs[PCI_RESOURCE_FMT_NVAL];
+	} res_info;
+
+	if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3)
+		return -1;
+	errno = 0;
+	*phys_addr = strtoull(res_info.phys_addr, NULL, 16);
+	*end_addr = strtoull(res_info.end_addr, NULL, 16);
+	*flags = strtoull(res_info.flags, NULL, 16);
+	if (errno != 0)
+		return -1;
+
+	return 0;
+}
+
+/* parse the "resource" sysfs file */
+int
+ccp_pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
+{
+	FILE *fp;
+	char buf[BUFSIZ];
+	int i;
+	uint64_t phys_addr, end_addr, flags;
+
+	fp = fopen(filename, "r");
+	if (fp == NULL)
+		return -1;
+
+	for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+		if (fgets(buf, sizeof(buf), fp) == NULL)
+			goto error;
+		if (ccp_pci_parse_one_sysfs_resource(buf, sizeof(buf),
+				&phys_addr, &end_addr, &flags) < 0)
+			goto error;
+
+		if (flags & IORESOURCE_MEM) {
+			dev->mem_resource[i].phys_addr = phys_addr;
+			dev->mem_resource[i].len = end_addr - phys_addr + 1;
+			/* not mapped for now */
+			dev->mem_resource[i].addr = NULL;
+		}
+	}
+	fclose(fp);
+	return 0;
+
+error:
+	fclose(fp);
+	return -1;
+}
+
+int
+ccp_find_uio_devname(const char *dirname)
+{
+
+	DIR *dir;
+	struct dirent *e;
+	char dirname_uio[PATH_MAX];
+	unsigned int uio_num;
+	int ret = -1;
+
+	/* depending on kernel version, uio can be located in uio/uioX
+	 * or uio:uioX
+	 */
+	snprintf(dirname_uio, sizeof(dirname_uio), "%s/uio", dirname);
+	dir = opendir(dirname_uio);
+	if (dir == NULL) {
+	/* retry with the parent directory might be different kernel version*/
+		dir = opendir(dirname);
+		if (dir == NULL)
+			return -1;
+	}
+
+	/* take the first file starting with "uio" */
+	while ((e = readdir(dir)) != NULL) {
+		/* format could be uio%d ...*/
+		int shortprefix_len = sizeof("uio") - 1;
+		/* ... or uio:uio%d */
+		int longprefix_len = sizeof("uio:uio") - 1;
+		char *endptr;
+
+		if (strncmp(e->d_name, "uio", 3) != 0)
+			continue;
+
+		/* first try uio%d */
+		errno = 0;
+		uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
+		if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
+			ret = uio_num;
+			break;
+		}
+
+		/* then try uio:uio%d */
+		errno = 0;
+		uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
+		if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
+			ret = uio_num;
+			break;
+		}
+	}
+	closedir(dir);
+	return ret;
+
+
+}
diff --git a/drivers/crypto/ccp/ccp_pci.h b/drivers/crypto/ccp/ccp_pci.h
new file mode 100644
index 0000000..a4c09c8
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pci.h
@@ -0,0 +1,53 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CCP_PCI_H_
+#define _CCP_PCI_H_
+
+#include <stdint.h>
+
+#include <rte_bus_pci.h>
+
+#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
+#define PROC_MODULES "/proc/modules"
+
+int ccp_check_pci_uio_module(void);
+
+int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+			      uint8_t *bus, uint8_t *devid, uint8_t *function);
+
+int ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val);
+
+int ccp_pci_parse_sysfs_resource(const char *filename,
+				 struct rte_pci_device *dev);
+
+int ccp_find_uio_devname(const char *dirname);
+
+#endif /* _CCP_PCI_H_ */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
new file mode 100644
index 0000000..bc4120b
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -0,0 +1,55 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_cryptodev_pmd.h>
+
+struct rte_cryptodev_ops ccp_ops = {
+		.dev_configure		= NULL,
+		.dev_start		= NULL,
+		.dev_stop		= NULL,
+		.dev_close		= NULL,
+
+		.stats_get		= NULL,
+		.stats_reset		= NULL,
+
+		.dev_infos_get		= NULL,
+
+		.queue_pair_setup	= NULL,
+		.queue_pair_release	= NULL,
+		.queue_pair_start	= NULL,
+		.queue_pair_stop	= NULL,
+		.queue_pair_count	= NULL,
+
+		.session_get_size	= NULL,
+		.session_configure	= NULL,
+		.session_clear		= NULL,
+};
+
+struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops;
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
new file mode 100644
index 0000000..f5b6061
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -0,0 +1,82 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CCP_PMD_PRIVATE_H_
+#define _CCP_PMD_PRIVATE_H_
+
+#include <rte_cryptodev.h>
+
+#define CRYPTODEV_NAME_CCP_PMD crypto_ccp
+
+#define CCP_LOG_ERR(fmt, args...) \
+	RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",  \
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+			__func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_CCP_DEBUG
+#define CCP_LOG_INFO(fmt, args...) \
+	RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+			__func__, __LINE__, ## args)
+
+#define CCP_LOG_DBG(fmt, args...) \
+	RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+			__func__, __LINE__, ## args)
+#else
+#define CCP_LOG_INFO(fmt, args...)
+#define CCP_LOG_DBG(fmt, args...)
+#endif
+
+/**< Maximum queue pairs supported by CCP PMD */
+#define CCP_PMD_MAX_QUEUE_PAIRS	1
+#define CCP_NB_MAX_DESCRIPTORS 1024
+#define CCP_MAX_BURST 64
+
+/* private data structure for each CCP crypto device */
+struct ccp_private {
+	unsigned int max_nb_qpairs;	/**< Max number of queue pairs */
+	unsigned int max_nb_sessions;	/**< Max number of sessions */
+	uint8_t crypto_num_dev;		/**< Number of working crypto devices */
+};
+
+/**< device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *ccp_pmd_ops;
+
+uint16_t
+ccp_cpu_pmd_enqueue_burst(void *queue_pair,
+			  struct rte_crypto_op **ops,
+			  uint16_t nb_ops);
+uint16_t
+ccp_cpu_pmd_dequeue_burst(void *queue_pair,
+			  struct rte_crypto_op **ops,
+			  uint16_t nb_ops);
+
+#endif /* _CCP_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index 6fa14bd..1fdbe1f 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -28,23 +28,170 @@
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <rte_bus_pci.h>
 #include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_config.h>
 #include <rte_cryptodev.h>
 #include <rte_cryptodev_pmd.h>
+#include <rte_pci.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
 
+#include "ccp_dev.h"
+#include "ccp_pmd_private.h"
+
+/**
+ * Global static parameter used to find if CCP device is already initialized.
+ */
+static unsigned int ccp_pmd_init_done;
 uint8_t ccp_cryptodev_driver_id;
 
+static uint16_t
+ccp_pmd_enqueue_burst(void *queue_pair __rte_unused,
+		      struct rte_crypto_op **ops __rte_unused,
+		      uint16_t nb_ops __rte_unused)
+{
+	uint16_t enq_cnt = 0;
+
+	return enq_cnt;
+}
+
+static uint16_t
+ccp_pmd_dequeue_burst(void *queue_pair __rte_unused,
+		      struct rte_crypto_op **ops __rte_unused,
+		      uint16_t nb_ops __rte_unused)
+{
+	uint16_t nb_dequeued = 0;
+
+	return nb_dequeued;
+}
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id ccp_pci_id[] = {
+	{
+		RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */
+	},
+	{
+		RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */
+	},
+	{.device_id = 0},
+};
+
 /** Remove ccp pmd */
 static int
-cryptodev_ccp_remove(struct rte_vdev_device *dev __rte_unused)
+cryptodev_ccp_remove(struct rte_vdev_device *dev)
 {
+	const char *name;
+
+	ccp_pmd_init_done = 0;
+	name = rte_vdev_device_name(dev);
+	if (name == NULL)
+		return -EINVAL;
+
+	RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n",
+			name, rte_socket_id());
+
 	return 0;
 }
 
+/** Create crypto device */
+static int
+cryptodev_ccp_create(const char *name,
+		     struct rte_vdev_device *vdev,
+		     struct rte_cryptodev_pmd_init_params *init_params)
+{
+	struct rte_cryptodev *dev;
+	struct ccp_private *internals;
+	uint8_t cryptodev_cnt = 0;
+
+	if (init_params->name[0] == '\0')
+		snprintf(init_params->name, sizeof(init_params->name),
+				"%s", name);
+
+	dev = rte_cryptodev_pmd_create(init_params->name,
+				       &vdev->device,
+				       init_params);
+	if (dev == NULL) {
+		CCP_LOG_ERR("failed to create cryptodev vdev");
+		goto init_error;
+	}
+
+	cryptodev_cnt = ccp_probe_devices(ccp_pci_id);
+
+	if (cryptodev_cnt == 0) {
+		CCP_LOG_ERR("failed to detect CCP crypto device");
+		goto init_error;
+	}
+
+	printf("CCP : Crypto device count = %d\n", cryptodev_cnt);
+	dev->driver_id = ccp_cryptodev_driver_id;
+
+	/* register rx/tx burst functions for data path */
+	dev->dev_ops = ccp_pmd_ops;
+	dev->enqueue_burst = ccp_pmd_enqueue_burst;
+	dev->dequeue_burst = ccp_pmd_dequeue_burst;
+
+	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+			RTE_CRYPTODEV_FF_HW_ACCELERATED |
+			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+	internals = dev->data->dev_private;
+
+	internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+	internals->max_nb_sessions = init_params->max_nb_sessions;
+	internals->crypto_num_dev = cryptodev_cnt;
+
+	return 0;
+
+init_error:
+	CCP_LOG_ERR("driver %s: cryptodev_ccp_create failed",
+		    init_params->name);
+	cryptodev_ccp_remove(vdev);
+
+	return -EFAULT;
+}
+
 /** Probe ccp pmd */
 static int
-cryptodev_ccp_probe(struct rte_vdev_device *vdev __rte_unused)
+cryptodev_ccp_probe(struct rte_vdev_device *vdev)
 {
+	int rc = 0;
+	const char *name;
+	struct rte_cryptodev_pmd_init_params init_params = {
+		"",
+		sizeof(struct ccp_private),
+		rte_socket_id(),
+		CCP_PMD_MAX_QUEUE_PAIRS,
+		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+	};
+	const char *input_args;
+
+	if (ccp_pmd_init_done) {
+		RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
+		return -EFAULT;
+	}
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	input_args = rte_vdev_device_args(vdev);
+	rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
+	init_params.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS;
+
+	RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+			init_params.socket_id);
+	RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n",
+			init_params.max_nb_queue_pairs);
+	RTE_LOG(INFO, PMD, "Max number of sessions = %d\n",
+			init_params.max_nb_sessions);
+
+	rc = cryptodev_ccp_create(name, vdev, &init_params);
+	if (rc)
+		return rc;
+	ccp_pmd_init_done = 1;
 	return 0;
 }
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v2 03/20] crypto/ccp: add basic pmd ops support for start, stop, config etc
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
  2018-01-05  9:39   ` [PATCH v2 02/20] crypto/ccp: add ccp device initialization and remove Ravi Kumar
@ 2018-01-05  9:39   ` Ravi Kumar
  2018-01-08 17:21     ` De Lara Guarch, Pablo
  2018-01-05  9:39   ` [PATCH v2 04/20] crypto/ccp: add session related crypto pmd ops support Ravi Kumar
                     ` (19 subsequent siblings)
  21 siblings, 1 reply; 131+ messages in thread
From: Ravi Kumar @ 2018-01-05  9:39 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_dev.c         |  9 ++++++
 drivers/crypto/ccp/ccp_dev.h         |  9 ++++++
 drivers/crypto/ccp/ccp_pmd_ops.c     | 61 +++++++++++++++++++++++++++++++++---
 drivers/crypto/ccp/ccp_pmd_private.h | 43 +++++++++++++++++++++++++
 4 files changed, 117 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index 5af2b49..57bccf4 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -52,6 +52,15 @@
 struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
 static int ccp_dev_id;
 
+int
+ccp_dev_start(struct rte_cryptodev *dev)
+{
+	struct ccp_private *priv = dev->data->dev_private;
+
+	priv->last_dev = TAILQ_FIRST(&ccp_list);
+	return 0;
+}
+
 static const struct rte_memzone *
 ccp_queue_dma_zone_reserve(const char *queue_name,
 			   uint32_t queue_size,
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index fe05bf0..b321530 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -104,6 +104,10 @@
 #define LSB_ITEM_SIZE                   32
 #define SLSB_MAP_SIZE                   (MAX_LSB_CNT * LSB_SIZE)
 
+/* General CCP Defines */
+
+#define CCP_SB_BYTES                    32
+
 /* bitmap */
 enum {
 	BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT
@@ -299,6 +303,11 @@ high32_value(unsigned long addr)
 	return ((uint64_t)addr >> 32) & 0x00000ffff;
 }
 
+/*
+ * Start CCP device
+ */
+int ccp_dev_start(struct rte_cryptodev *dev);
+
 /**
  * Detect ccp platform and initialize all ccp devices
  *
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index bc4120b..b6f8c48 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -28,18 +28,69 @@
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <string.h>
+
+#include <rte_common.h>
 #include <rte_cryptodev_pmd.h>
+#include <rte_malloc.h>
+
+#include "ccp_pmd_private.h"
+#include "ccp_dev.h"
+
+static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+ccp_pmd_config(struct rte_cryptodev *dev __rte_unused,
+	       struct rte_cryptodev_config *config __rte_unused)
+{
+	return 0;
+}
+
+static int
+ccp_pmd_start(struct rte_cryptodev *dev)
+{
+	return ccp_dev_start(dev);
+}
+
+static void
+ccp_pmd_stop(struct rte_cryptodev *dev __rte_unused)
+{
+
+}
+
+static int
+ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
+{
+	return 0;
+}
+
+static void
+ccp_pmd_info_get(struct rte_cryptodev *dev,
+		 struct rte_cryptodev_info *dev_info)
+{
+	struct ccp_private *internals = dev->data->dev_private;
+
+	if (dev_info != NULL) {
+		dev_info->driver_id = dev->driver_id;
+		dev_info->feature_flags = dev->feature_flags;
+		dev_info->capabilities = ccp_pmd_capabilities;
+		dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+		dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+	}
+}
 
 struct rte_cryptodev_ops ccp_ops = {
-		.dev_configure		= NULL,
-		.dev_start		= NULL,
-		.dev_stop		= NULL,
-		.dev_close		= NULL,
+		.dev_configure		= ccp_pmd_config,
+		.dev_start		= ccp_pmd_start,
+		.dev_stop		= ccp_pmd_stop,
+		.dev_close		= ccp_pmd_close,
 
 		.stats_get		= NULL,
 		.stats_reset		= NULL,
 
-		.dev_infos_get		= NULL,
+		.dev_infos_get		= ccp_pmd_info_get,
 
 		.queue_pair_setup	= NULL,
 		.queue_pair_release	= NULL,
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
index f5b6061..d2283e8 100644
--- a/drivers/crypto/ccp/ccp_pmd_private.h
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -60,13 +60,56 @@
 #define CCP_NB_MAX_DESCRIPTORS 1024
 #define CCP_MAX_BURST 64
 
+#include "ccp_dev.h"
+
 /* private data structure for each CCP crypto device */
 struct ccp_private {
 	unsigned int max_nb_qpairs;	/**< Max number of queue pairs */
 	unsigned int max_nb_sessions;	/**< Max number of sessions */
 	uint8_t crypto_num_dev;		/**< Number of working crypto devices */
+	struct ccp_device *last_dev;	/**< Last working crypto device */
 };
 
+/* CCP batch info */
+struct ccp_batch_info {
+	struct rte_crypto_op *op[CCP_MAX_BURST];
+	/**< optable populated at enque time from app*/
+	int op_idx;
+	struct ccp_queue *cmd_q;
+	uint16_t opcnt;
+	/**< no. of crypto ops in batch*/
+	int desccnt;
+	/**< no. of ccp queue descriptors*/
+	uint32_t head_offset;
+	/**< ccp queue head tail offsets time of enqueue*/
+	uint32_t tail_offset;
+	uint8_t lsb_buf[CCP_SB_BYTES * CCP_MAX_BURST];
+	phys_addr_t lsb_buf_phys;
+	/**< LSB intermediate buf for passthru */
+	int lsb_buf_idx;
+} __rte_cache_aligned;
+
+/**< CCP crypto queue pair */
+struct ccp_qp {
+	uint16_t id;
+	/**< Queue Pair Identifier */
+	char name[RTE_CRYPTODEV_NAME_LEN];
+	/**< Unique Queue Pair Name */
+	struct rte_ring *processed_pkts;
+	/**< Ring for placing process packets */
+	struct rte_mempool *sess_mp;
+	/**< Session Mempool */
+	struct rte_mempool *batch_mp;
+	/**< Session Mempool for batch info */
+	struct rte_cryptodev_stats qp_stats;
+	/**< Queue pair statistics */
+	struct ccp_batch_info *b_info;
+	/**< Store ops pulled out of queue */
+	struct rte_cryptodev *dev;
+	/**< rte crypto device to which this qp belongs */
+} __rte_cache_aligned;
+
+
 /**< device specific operations function pointer structure */
 extern struct rte_cryptodev_ops *ccp_pmd_ops;
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v2 04/20] crypto/ccp: add session related crypto pmd ops support
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
  2018-01-05  9:39   ` [PATCH v2 02/20] crypto/ccp: add ccp device initialization and remove Ravi Kumar
  2018-01-05  9:39   ` [PATCH v2 03/20] crypto/ccp: add basic pmd ops support for start, stop, config etc Ravi Kumar
@ 2018-01-05  9:39   ` Ravi Kumar
  2018-01-05  9:39   ` [PATCH v2 05/20] crypto/ccp: add queue pair related " Ravi Kumar
                     ` (18 subsequent siblings)
  21 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-05  9:39 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/Makefile      |   3 +-
 drivers/crypto/ccp/ccp_crypto.c  | 230 +++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_crypto.h  | 267 +++++++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_dev.h     | 129 +++++++++++++++++++
 drivers/crypto/ccp/ccp_pmd_ops.c |  61 ++++++++-
 5 files changed, 686 insertions(+), 4 deletions(-)
 create mode 100644 drivers/crypto/ccp/ccp_crypto.c
 create mode 100644 drivers/crypto/ccp/ccp_crypto.h

diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 5e58c31..5241465 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -51,8 +51,9 @@ EXPORT_MAP := rte_pmd_ccp_version.map
 
 # library source files
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_crypto.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
new file mode 100644
index 0000000..80abcdc
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -0,0 +1,230 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "ccp_dev.h"
+#include "ccp_crypto.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+static enum ccp_cmd_order
+ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+{
+	enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
+
+	if (xform == NULL)
+		return res;
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (xform->next == NULL)
+			return CCP_CMD_AUTH;
+		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+			return CCP_CMD_HASH_CIPHER;
+	}
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (xform->next == NULL)
+			return CCP_CMD_CIPHER;
+		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+			return CCP_CMD_CIPHER_HASH;
+	}
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+		return CCP_CMD_COMBINED;
+	return res;
+}
+
+/* configure session */
+static int
+ccp_configure_session_cipher(struct ccp_session *sess,
+			     const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+
+	cipher_xform = &xform->cipher;
+
+	/* set cipher direction */
+	if (cipher_xform->op ==  RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+		sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+	else
+		sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+
+	/* set cipher key */
+	sess->cipher.key_length = cipher_xform->key.length;
+	rte_memcpy(sess->cipher.key, cipher_xform->key.data,
+		   cipher_xform->key.length);
+
+	/* set iv parameters */
+	sess->iv.offset = cipher_xform->iv.offset;
+	sess->iv.length = cipher_xform->iv.length;
+
+	switch (cipher_xform->algo) {
+	default:
+		CCP_LOG_ERR("Unsupported cipher algo");
+		return -1;
+	}
+
+
+	switch (sess->cipher.engine) {
+	default:
+		CCP_LOG_ERR("Invalid CCP Engine");
+		return -ENOTSUP;
+	}
+	return 0;
+}
+
+static int
+ccp_configure_session_auth(struct ccp_session *sess,
+			   const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_auth_xform *auth_xform = NULL;
+
+	auth_xform = &xform->auth;
+
+	sess->auth.digest_length = auth_xform->digest_length;
+	if (auth_xform->op ==  RTE_CRYPTO_AUTH_OP_GENERATE)
+		sess->auth.op = CCP_AUTH_OP_GENERATE;
+	else
+		sess->auth.op = CCP_AUTH_OP_VERIFY;
+	switch (auth_xform->algo) {
+	default:
+		CCP_LOG_ERR("Unsupported hash algo");
+		return -ENOTSUP;
+	}
+	return 0;
+}
+
+static int
+ccp_configure_session_aead(struct ccp_session *sess,
+			   const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_aead_xform *aead_xform = NULL;
+
+	aead_xform = &xform->aead;
+
+	sess->cipher.key_length = aead_xform->key.length;
+	rte_memcpy(sess->cipher.key, aead_xform->key.data,
+		   aead_xform->key.length);
+
+	if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+		sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+		sess->auth.op = CCP_AUTH_OP_GENERATE;
+	} else {
+		sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+		sess->auth.op = CCP_AUTH_OP_VERIFY;
+	}
+	sess->auth.aad_length = aead_xform->aad_length;
+	sess->auth.digest_length = aead_xform->digest_length;
+
+	/* set iv parameters */
+	sess->iv.offset = aead_xform->iv.offset;
+	sess->iv.length = aead_xform->iv.length;
+
+	switch (aead_xform->algo) {
+	default:
+		CCP_LOG_ERR("Unsupported aead algo");
+		return -ENOTSUP;
+	}
+	return 0;
+}
+
+int
+ccp_set_session_parameters(struct ccp_session *sess,
+			   const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_sym_xform *cipher_xform = NULL;
+	const struct rte_crypto_sym_xform *auth_xform = NULL;
+	const struct rte_crypto_sym_xform *aead_xform = NULL;
+	int ret = 0;
+
+	sess->cmd_id = ccp_get_cmd_id(xform);
+
+	switch (sess->cmd_id) {
+	case CCP_CMD_CIPHER:
+		cipher_xform = xform;
+		break;
+	case CCP_CMD_AUTH:
+		auth_xform = xform;
+		break;
+	case CCP_CMD_CIPHER_HASH:
+		cipher_xform = xform;
+		auth_xform = xform->next;
+		break;
+	case CCP_CMD_HASH_CIPHER:
+		auth_xform = xform;
+		cipher_xform = xform->next;
+		break;
+	case CCP_CMD_COMBINED:
+		aead_xform = xform;
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported cmd_id");
+		return -1;
+	}
+
+	/* Default IV length = 0 */
+	sess->iv.length = 0;
+	if (cipher_xform) {
+		ret = ccp_configure_session_cipher(sess, cipher_xform);
+		if (ret != 0) {
+			CCP_LOG_ERR("Invalid/unsupported cipher parameters");
+			return ret;
+		}
+	}
+	if (auth_xform) {
+		ret = ccp_configure_session_auth(sess, auth_xform);
+		if (ret != 0) {
+			CCP_LOG_ERR("Invalid/unsupported auth parameters");
+			return ret;
+		}
+	}
+	if (aead_xform) {
+		ret = ccp_configure_session_aead(sess, aead_xform);
+		if (ret != 0) {
+			CCP_LOG_ERR("Invalid/unsupported aead parameters");
+			return ret;
+		}
+	}
+	return ret;
+}
+
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
new file mode 100644
index 0000000..346d5ee
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -0,0 +1,267 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CCP_CRYPTO_H_
+#define _CCP_CRYPTO_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+#include "ccp_dev.h"
+
+#define CCP_SHA3_CTX_SIZE 200
+/**
+ * CCP supported AES modes
+ */
+enum ccp_aes_mode {
+	CCP_AES_MODE_ECB = 0,
+	CCP_AES_MODE_CBC,
+	CCP_AES_MODE_OFB,
+	CCP_AES_MODE_CFB,
+	CCP_AES_MODE_CTR,
+	CCP_AES_MODE_CMAC,
+	CCP_AES_MODE_GHASH,
+	CCP_AES_MODE_GCTR,
+	CCP_AES_MODE__LAST,
+};
+
+/**
+ * CCP AES GHASH mode
+ */
+enum ccp_aes_ghash_mode {
+	CCP_AES_MODE_GHASH_AAD = 0,
+	CCP_AES_MODE_GHASH_FINAL
+};
+
+/**
+ * CCP supported AES types
+ */
+enum ccp_aes_type {
+	CCP_AES_TYPE_128 = 0,
+	CCP_AES_TYPE_192,
+	CCP_AES_TYPE_256,
+	CCP_AES_TYPE__LAST,
+};
+
+/***** 3DES engine *****/
+
+/**
+ * CCP supported DES/3DES modes
+ */
+enum ccp_des_mode {
+	CCP_DES_MODE_ECB = 0, /* Not supported */
+	CCP_DES_MODE_CBC,
+	CCP_DES_MODE_CFB,
+};
+
+/**
+ * CCP supported DES types
+ */
+enum ccp_des_type {
+	CCP_DES_TYPE_128 = 0,	/* 112 + 16 parity */
+	CCP_DES_TYPE_192,	/* 168 + 24 parity */
+	CCP_DES_TYPE__LAST,
+};
+
+/***** SHA engine *****/
+
+/**
+ * ccp_sha_type - type of SHA operation
+ *
+ * @CCP_SHA_TYPE_1: SHA-1 operation
+ * @CCP_SHA_TYPE_224: SHA-224 operation
+ * @CCP_SHA_TYPE_256: SHA-256 operation
+ */
+enum ccp_sha_type {
+	CCP_SHA_TYPE_1 = 1,
+	CCP_SHA_TYPE_224,
+	CCP_SHA_TYPE_256,
+	CCP_SHA_TYPE_384,
+	CCP_SHA_TYPE_512,
+	CCP_SHA_TYPE_RSVD1,
+	CCP_SHA_TYPE_RSVD2,
+	CCP_SHA3_TYPE_224,
+	CCP_SHA3_TYPE_256,
+	CCP_SHA3_TYPE_384,
+	CCP_SHA3_TYPE_512,
+	CCP_SHA_TYPE__LAST,
+};
+
+/**
+ * CCP supported cipher algorithms
+ */
+enum ccp_cipher_algo {
+	CCP_CIPHER_ALGO_AES_CBC = 0,
+	CCP_CIPHER_ALGO_AES_ECB,
+	CCP_CIPHER_ALGO_AES_CTR,
+	CCP_CIPHER_ALGO_AES_GCM,
+	CCP_CIPHER_ALGO_3DES_CBC,
+};
+
+/**
+ * CCP cipher operation type
+ */
+enum ccp_cipher_dir {
+	CCP_CIPHER_DIR_DECRYPT = 0,
+	CCP_CIPHER_DIR_ENCRYPT = 1,
+};
+
+/**
+ * CCP supported hash algorithms
+ */
+enum ccp_hash_algo {
+	CCP_AUTH_ALGO_SHA1 = 0,
+	CCP_AUTH_ALGO_SHA1_HMAC,
+	CCP_AUTH_ALGO_SHA224,
+	CCP_AUTH_ALGO_SHA224_HMAC,
+	CCP_AUTH_ALGO_SHA3_224,
+	CCP_AUTH_ALGO_SHA3_224_HMAC,
+	CCP_AUTH_ALGO_SHA256,
+	CCP_AUTH_ALGO_SHA256_HMAC,
+	CCP_AUTH_ALGO_SHA3_256,
+	CCP_AUTH_ALGO_SHA3_256_HMAC,
+	CCP_AUTH_ALGO_SHA384,
+	CCP_AUTH_ALGO_SHA384_HMAC,
+	CCP_AUTH_ALGO_SHA3_384,
+	CCP_AUTH_ALGO_SHA3_384_HMAC,
+	CCP_AUTH_ALGO_SHA512,
+	CCP_AUTH_ALGO_SHA512_HMAC,
+	CCP_AUTH_ALGO_SHA3_512,
+	CCP_AUTH_ALGO_SHA3_512_HMAC,
+	CCP_AUTH_ALGO_AES_CMAC,
+	CCP_AUTH_ALGO_AES_GCM,
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	CCP_AUTH_ALGO_MD5_HMAC,
+#endif
+};
+
+/**
+ * CCP hash operation type
+ */
+enum ccp_hash_op {
+	CCP_AUTH_OP_GENERATE = 0,
+	CCP_AUTH_OP_VERIFY = 1,
+};
+
+/* CCP crypto private session structure */
+struct ccp_session {
+	enum ccp_cmd_order cmd_id;
+	/**< chain order mode */
+	struct {
+		uint16_t length;
+		uint16_t offset;
+	} iv;
+	/**< IV parameters */
+	struct {
+		enum ccp_cipher_algo  algo;
+		enum ccp_engine  engine;
+		union {
+			enum ccp_aes_mode aes_mode;
+			enum ccp_des_mode des_mode;
+		} um;
+		union {
+			enum ccp_aes_type aes_type;
+			enum ccp_des_type des_type;
+		} ut;
+		enum ccp_cipher_dir dir;
+		uint64_t key_length;
+		/**< max cipher key size 256 bits */
+		uint8_t key[32];
+		/**ccp key format*/
+		uint8_t key_ccp[32];
+		phys_addr_t key_phys;
+		/**AES-ctr nonce(4) iv(8) ctr*/
+		uint8_t nonce[32];
+		phys_addr_t nonce_phys;
+	} cipher;
+	/**< Cipher Parameters */
+
+	struct {
+		enum ccp_hash_algo algo;
+		enum ccp_engine  engine;
+		union {
+			enum ccp_aes_mode aes_mode;
+		} um;
+		union {
+			enum ccp_sha_type sha_type;
+			enum ccp_aes_type aes_type;
+		} ut;
+		enum ccp_hash_op op;
+		uint64_t key_length;
+		/**< max hash key size 144 bytes (struct capabilties) */
+		uint8_t key[144];
+		/**< max be key size of AES is 32*/
+		uint8_t key_ccp[32];
+		phys_addr_t key_phys;
+		uint64_t digest_length;
+		void *ctx;
+		int ctx_len;
+		int offset;
+		int block_size;
+		/**< Buffer to store  Software generated precomute values*/
+		/**< For HMAC H(ipad ^ key) and H(opad ^ key) */
+		/**< For CMAC K1 IV and K2 IV*/
+		uint8_t pre_compute[2 * CCP_SHA3_CTX_SIZE];
+		/**< SHA3 initial ctx all zeros*/
+		uint8_t sha3_ctx[200];
+		int aad_length;
+	} auth;
+	/**< Authentication Parameters */
+	enum rte_crypto_aead_algorithm aead_algo;
+	/**< AEAD Algorithm */
+
+	uint32_t reserved;
+} __rte_cache_aligned;
+
+extern uint8_t ccp_cryptodev_driver_id;
+
+struct ccp_qp;
+
+/**
+ * Set and validate CCP crypto session parameters
+ *
+ * @param sess ccp private session
+ * @param xform crypto xform for this session
+ * @return 0 on success otherwise -1
+ */
+int ccp_set_session_parameters(struct ccp_session *sess,
+			       const struct rte_crypto_sym_xform *xform);
+
+#endif /* _CCP_CRYPTO_H_ */
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index b321530..a16ba81 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -225,6 +225,123 @@ struct ccp_device {
 	/**< current queue index */
 } __rte_cache_aligned;
 
+/**< CCP H/W engine related */
+/**
+ * ccp_engine - CCP operation identifiers
+ *
+ * @CCP_ENGINE_AES: AES operation
+ * @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation
+ * @CCP_ENGINE_3DES: DES/3DES operation
+ * @CCP_ENGINE_SHA: SHA operation
+ * @CCP_ENGINE_RSA: RSA operation
+ * @CCP_ENGINE_PASSTHRU: pass-through operation
+ * @CCP_ENGINE_ZLIB_DECOMPRESS: unused
+ * @CCP_ENGINE_ECC: ECC operation
+ */
+enum ccp_engine {
+	CCP_ENGINE_AES = 0,
+	CCP_ENGINE_XTS_AES_128,
+	CCP_ENGINE_3DES,
+	CCP_ENGINE_SHA,
+	CCP_ENGINE_RSA,
+	CCP_ENGINE_PASSTHRU,
+	CCP_ENGINE_ZLIB_DECOMPRESS,
+	CCP_ENGINE_ECC,
+	CCP_ENGINE__LAST,
+};
+
+/* Passthru engine */
+/**
+ * ccp_passthru_bitwise - type of bitwise passthru operation
+ *
+ * @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed
+ * @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask
+ * @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask
+ * @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask
+ * @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask
+ */
+enum ccp_passthru_bitwise {
+	CCP_PASSTHRU_BITWISE_NOOP = 0,
+	CCP_PASSTHRU_BITWISE_AND,
+	CCP_PASSTHRU_BITWISE_OR,
+	CCP_PASSTHRU_BITWISE_XOR,
+	CCP_PASSTHRU_BITWISE_MASK,
+	CCP_PASSTHRU_BITWISE__LAST,
+};
+
+/**
+ * ccp_passthru_byteswap - type of byteswap passthru operation
+ *
+ * @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed
+ * @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words
+ * @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words
+ */
+enum ccp_passthru_byteswap {
+	CCP_PASSTHRU_BYTESWAP_NOOP = 0,
+	CCP_PASSTHRU_BYTESWAP_32BIT,
+	CCP_PASSTHRU_BYTESWAP_256BIT,
+	CCP_PASSTHRU_BYTESWAP__LAST,
+};
+
+/**
+ * CCP passthru
+ */
+struct ccp_passthru {
+	phys_addr_t src_addr;
+	phys_addr_t dest_addr;
+	enum ccp_passthru_bitwise bit_mod;
+	enum ccp_passthru_byteswap byte_swap;
+	int len;
+	int dir;
+};
+
+/* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
+union ccp_function {
+	struct {
+		uint16_t size:7;
+		uint16_t encrypt:1;
+		uint16_t mode:5;
+		uint16_t type:2;
+	} aes;
+	struct {
+		uint16_t size:7;
+		uint16_t encrypt:1;
+		uint16_t mode:5;
+		uint16_t type:2;
+	} des;
+	struct {
+		uint16_t size:7;
+		uint16_t encrypt:1;
+		uint16_t rsvd:5;
+		uint16_t type:2;
+	} aes_xts;
+	struct {
+		uint16_t rsvd1:10;
+		uint16_t type:4;
+		uint16_t rsvd2:1;
+	} sha;
+	struct {
+		uint16_t mode:3;
+		uint16_t size:12;
+	} rsa;
+	struct {
+		uint16_t byteswap:2;
+		uint16_t bitwise:3;
+		uint16_t reflect:2;
+		uint16_t rsvd:8;
+	} pt;
+	struct  {
+		uint16_t rsvd:13;
+	} zlib;
+	struct {
+		uint16_t size:10;
+		uint16_t type:2;
+		uint16_t mode:3;
+	} ecc;
+	uint16_t raw;
+};
+
+
 /**
  * descriptor for version 5 CPP commands
  * 8 32-bit words:
@@ -291,6 +408,18 @@ struct ccp_desc {
 	struct dword7 dw7;
 };
 
+/**
+ * cmd id to follow order
+ */
+enum ccp_cmd_order {
+	CCP_CMD_CIPHER = 0,
+	CCP_CMD_AUTH,
+	CCP_CMD_CIPHER_HASH,
+	CCP_CMD_HASH_CIPHER,
+	CCP_CMD_COMBINED,
+	CCP_CMD_NOT_SUPPORTED,
+};
+
 static inline uint32_t
 low32_value(unsigned long addr)
 {
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index b6f8c48..ad0a670 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -36,6 +36,7 @@
 
 #include "ccp_pmd_private.h"
 #include "ccp_dev.h"
+#include "ccp_crypto.h"
 
 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
@@ -81,6 +82,60 @@ ccp_pmd_info_get(struct rte_cryptodev *dev,
 	}
 }
 
+static unsigned
+ccp_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct ccp_session);
+}
+
+static int
+ccp_pmd_session_configure(struct rte_cryptodev *dev,
+			  struct rte_crypto_sym_xform *xform,
+			  struct rte_cryptodev_sym_session *sess,
+			  struct rte_mempool *mempool)
+{
+	int ret;
+	void *sess_private_data;
+
+	if (unlikely(sess == NULL || xform == NULL)) {
+		CCP_LOG_ERR("Invalid session struct or xform");
+		return -ENOMEM;
+	}
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		CCP_LOG_ERR("Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+	ret = ccp_set_session_parameters(sess_private_data, xform);
+	if (ret != 0) {
+		CCP_LOG_ERR("failed configure session parameters");
+
+		/* Return session to mempool */
+		rte_mempool_put(mempool, sess_private_data);
+		return ret;
+	}
+	set_session_private_data(sess, dev->driver_id,
+				 sess_private_data);
+
+	return 0;
+}
+
+static void
+ccp_pmd_session_clear(struct rte_cryptodev *dev,
+		      struct rte_cryptodev_sym_session *sess)
+{
+	uint8_t index = dev->driver_id;
+	void *sess_priv = get_session_private_data(sess, index);
+
+	if (sess_priv) {
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+		rte_mempool_put(sess_mp, sess_priv);
+		memset(sess_priv, 0, sizeof(struct ccp_session));
+		set_session_private_data(sess, index, NULL);
+	}
+}
+
 struct rte_cryptodev_ops ccp_ops = {
 		.dev_configure		= ccp_pmd_config,
 		.dev_start		= ccp_pmd_start,
@@ -98,9 +153,9 @@ struct rte_cryptodev_ops ccp_ops = {
 		.queue_pair_stop	= NULL,
 		.queue_pair_count	= NULL,
 
-		.session_get_size	= NULL,
-		.session_configure	= NULL,
-		.session_clear		= NULL,
+		.session_get_size	= ccp_pmd_session_get_size,
+		.session_configure	= ccp_pmd_session_configure,
+		.session_clear		= ccp_pmd_session_clear,
 };
 
 struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v2 05/20] crypto/ccp: add queue pair related pmd ops support
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
                     ` (2 preceding siblings ...)
  2018-01-05  9:39   ` [PATCH v2 04/20] crypto/ccp: add session related crypto pmd ops support Ravi Kumar
@ 2018-01-05  9:39   ` Ravi Kumar
  2018-01-05  9:39   ` [PATCH v2 06/20] crypto/ccp: add crypto enqueue and dequeue burst api support Ravi Kumar
                     ` (17 subsequent siblings)
  21 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-05  9:39 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_pmd_ops.c | 149 +++++++++++++++++++++++++++++++++++++--
 1 file changed, 144 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index ad0a670..a02aa6f 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -82,6 +82,145 @@ ccp_pmd_info_get(struct rte_cryptodev *dev,
 	}
 }
 
+static int
+ccp_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+	struct ccp_qp *qp;
+
+	if (dev->data->queue_pairs[qp_id] != NULL) {
+		qp = (struct ccp_qp *)dev->data->queue_pairs[qp_id];
+		rte_ring_free(qp->processed_pkts);
+		rte_mempool_free(qp->batch_mp);
+		rte_free(qp);
+		dev->data->queue_pairs[qp_id] = NULL;
+	}
+	return 0;
+}
+
+static int
+ccp_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+		struct ccp_qp *qp)
+{
+	unsigned int n = snprintf(qp->name, sizeof(qp->name),
+			"ccp_pmd_%u_qp_%u",
+			dev->data->dev_id, qp->id);
+
+	if (n > sizeof(qp->name))
+		return -1;
+
+	return 0;
+}
+
+static struct rte_ring *
+ccp_pmd_qp_create_batch_info_ring(struct ccp_qp *qp,
+				  unsigned int ring_size, int socket_id)
+{
+	struct rte_ring *r;
+
+	r = rte_ring_lookup(qp->name);
+	if (r) {
+		if (r->size >= ring_size) {
+			CCP_LOG_INFO(
+				"Reusing ring %s for processed packets",
+				qp->name);
+			return r;
+		}
+		CCP_LOG_INFO(
+			"Unable to reuse ring %s for processed packets",
+			 qp->name);
+		return NULL;
+	}
+
+	return rte_ring_create(qp->name, ring_size, socket_id,
+			RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+static int
+ccp_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+		 const struct rte_cryptodev_qp_conf *qp_conf,
+		 int socket_id, struct rte_mempool *session_pool)
+{
+	struct ccp_private *internals = dev->data->dev_private;
+	struct ccp_qp *qp;
+	int retval = 0;
+
+	if (qp_id >= internals->max_nb_qpairs) {
+		CCP_LOG_ERR("Invalid qp_id %u, should be less than %u",
+			    qp_id, internals->max_nb_qpairs);
+		return (-EINVAL);
+	}
+
+	/* Free memory prior to re-allocation if needed. */
+	if (dev->data->queue_pairs[qp_id] != NULL)
+		ccp_pmd_qp_release(dev, qp_id);
+
+	/* Allocate the queue pair data structure. */
+	qp = rte_zmalloc_socket("CCP Crypto PMD Queue Pair", sizeof(*qp),
+					RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp == NULL) {
+		CCP_LOG_ERR("Failed to allocate queue pair memory");
+		return (-ENOMEM);
+	}
+
+	qp->dev = dev;
+	qp->id = qp_id;
+	dev->data->queue_pairs[qp_id] = qp;
+
+	retval = ccp_pmd_qp_set_unique_name(dev, qp);
+	if (retval) {
+		CCP_LOG_ERR("Failed to create unique name for ccp qp");
+		goto qp_setup_cleanup;
+	}
+
+	qp->processed_pkts = ccp_pmd_qp_create_batch_info_ring(qp,
+			qp_conf->nb_descriptors, socket_id);
+	if (qp->processed_pkts == NULL) {
+		CCP_LOG_ERR("Failed to create batch info ring");
+		goto qp_setup_cleanup;
+	}
+
+	qp->sess_mp = session_pool;
+
+	/* mempool for batch info */
+	qp->batch_mp = rte_mempool_create(
+				qp->name,
+				qp_conf->nb_descriptors,
+				sizeof(struct ccp_batch_info),
+				RTE_CACHE_LINE_SIZE,
+				0, NULL, NULL, NULL, NULL,
+				SOCKET_ID_ANY, 0);
+	if (qp->batch_mp == NULL)
+		goto qp_setup_cleanup;
+	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	return 0;
+
+qp_setup_cleanup:
+	dev->data->queue_pairs[qp_id] = NULL;
+	if (qp)
+		rte_free(qp);
+	return -1;
+}
+
+static int
+ccp_pmd_qp_start(struct rte_cryptodev *dev __rte_unused,
+		 uint16_t queue_pair_id __rte_unused)
+{
+	return -ENOTSUP;
+}
+
+static int
+ccp_pmd_qp_stop(struct rte_cryptodev *dev __rte_unused,
+		uint16_t queue_pair_id __rte_unused)
+{
+	return -ENOTSUP;
+}
+
+static uint32_t
+ccp_pmd_qp_count(struct rte_cryptodev *dev)
+{
+	return dev->data->nb_queue_pairs;
+}
+
 static unsigned
 ccp_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
 {
@@ -147,11 +286,11 @@ struct rte_cryptodev_ops ccp_ops = {
 
 		.dev_infos_get		= ccp_pmd_info_get,
 
-		.queue_pair_setup	= NULL,
-		.queue_pair_release	= NULL,
-		.queue_pair_start	= NULL,
-		.queue_pair_stop	= NULL,
-		.queue_pair_count	= NULL,
+		.queue_pair_setup	= ccp_pmd_qp_setup,
+		.queue_pair_release	= ccp_pmd_qp_release,
+		.queue_pair_start	= ccp_pmd_qp_start,
+		.queue_pair_stop	= ccp_pmd_qp_stop,
+		.queue_pair_count	= ccp_pmd_qp_count,
 
 		.session_get_size	= ccp_pmd_session_get_size,
 		.session_configure	= ccp_pmd_session_configure,
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v2 06/20] crypto/ccp: add crypto enqueue and dequeue burst api support
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
                     ` (3 preceding siblings ...)
  2018-01-05  9:39   ` [PATCH v2 05/20] crypto/ccp: add queue pair related " Ravi Kumar
@ 2018-01-05  9:39   ` Ravi Kumar
  2018-01-05  9:39   ` [PATCH v2 07/20] crypto/ccp: add support for RTE_CRYPTO_OP_SESSIONLESS Ravi Kumar
                     ` (16 subsequent siblings)
  21 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-05  9:39 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 359 +++++++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_crypto.h  |  35 ++++
 drivers/crypto/ccp/ccp_dev.c     |  27 +++
 drivers/crypto/ccp/ccp_dev.h     |   9 +
 drivers/crypto/ccp/rte_ccp_pmd.c |  64 ++++++-
 5 files changed, 487 insertions(+), 7 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 80abcdc..c17e84f 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -228,3 +228,362 @@ ccp_set_session_parameters(struct ccp_session *sess,
 	return ret;
 }
 
+/* calculate CCP descriptors requirement */
+static inline int
+ccp_cipher_slot(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->cipher.algo) {
+	default:
+		CCP_LOG_ERR("Unsupported cipher algo %d",
+			    session->cipher.algo);
+	}
+	return count;
+}
+
+static inline int
+ccp_auth_slot(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->auth.algo) {
+	default:
+		CCP_LOG_ERR("Unsupported auth algo %d",
+			    session->auth.algo);
+	}
+
+	return count;
+}
+
+static int
+ccp_aead_slot(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->aead_algo) {
+	default:
+		CCP_LOG_ERR("Unsupported aead algo %d",
+			    session->aead_algo);
+	}
+	return count;
+}
+
+int
+ccp_compute_slot_count(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->cmd_id) {
+	case CCP_CMD_CIPHER:
+		count = ccp_cipher_slot(session);
+		break;
+	case CCP_CMD_AUTH:
+		count = ccp_auth_slot(session);
+		break;
+	case CCP_CMD_CIPHER_HASH:
+	case CCP_CMD_HASH_CIPHER:
+		count = ccp_cipher_slot(session);
+		count += ccp_auth_slot(session);
+		break;
+	case CCP_CMD_COMBINED:
+		count = ccp_aead_slot(session);
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported cmd_id");
+
+	}
+
+	return count;
+}
+
+static inline int
+ccp_crypto_cipher(struct rte_crypto_op *op,
+		  struct ccp_queue *cmd_q __rte_unused,
+		  struct ccp_batch_info *b_info __rte_unused)
+{
+	int result = 0;
+	struct ccp_session *session;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 ccp_cryptodev_driver_id);
+
+	switch (session->cipher.algo) {
+	default:
+		CCP_LOG_ERR("Unsupported cipher algo %d",
+			    session->cipher.algo);
+		return -ENOTSUP;
+	}
+	return result;
+}
+
+static inline int
+ccp_crypto_auth(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q __rte_unused,
+		struct ccp_batch_info *b_info __rte_unused)
+{
+
+	int result = 0;
+	struct ccp_session *session;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	switch (session->auth.algo) {
+	default:
+		CCP_LOG_ERR("Unsupported auth algo %d",
+			    session->auth.algo);
+		return -ENOTSUP;
+	}
+
+	return result;
+}
+
+static inline int
+ccp_crypto_aead(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q __rte_unused,
+		struct ccp_batch_info *b_info __rte_unused)
+{
+	int result = 0;
+	struct ccp_session *session;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	switch (session->aead_algo) {
+	default:
+		CCP_LOG_ERR("Unsupported aead algo %d",
+			    session->aead_algo);
+		return -ENOTSUP;
+	}
+	return result;
+}
+
+int
+process_ops_to_enqueue(const struct ccp_qp *qp,
+		       struct rte_crypto_op **op,
+		       struct ccp_queue *cmd_q,
+		       uint16_t nb_ops,
+		       int slots_req)
+{
+	int i, result = 0;
+	struct ccp_batch_info *b_info;
+	struct ccp_session *session;
+
+	if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
+		CCP_LOG_ERR("batch info allocation failed");
+		return 0;
+	}
+	/* populate batch info necessary for dequeue */
+	b_info->op_idx = 0;
+	b_info->lsb_buf_idx = 0;
+	b_info->desccnt = 0;
+	b_info->cmd_q = cmd_q;
+	b_info->lsb_buf_phys =
+		(phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+	rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
+
+	b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+					 Q_DESC_SIZE);
+	for (i = 0; i < nb_ops; i++) {
+		session = (struct ccp_session *)get_session_private_data(
+						 op[i]->sym->session,
+						 ccp_cryptodev_driver_id);
+		switch (session->cmd_id) {
+		case CCP_CMD_CIPHER:
+			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_AUTH:
+			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_CIPHER_HASH:
+			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+			if (result)
+				break;
+			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_HASH_CIPHER:
+			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+			if (result)
+				break;
+			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_COMBINED:
+			result = ccp_crypto_aead(op[i], cmd_q, b_info);
+			break;
+		default:
+			CCP_LOG_ERR("Unsupported cmd_id");
+			result = -1;
+		}
+		if (unlikely(result < 0)) {
+			rte_atomic64_add(&b_info->cmd_q->free_slots,
+					 (slots_req - b_info->desccnt));
+			break;
+		}
+		b_info->op[i] = op[i];
+	}
+
+	b_info->opcnt = i;
+	b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+					 Q_DESC_SIZE);
+
+	rte_wmb();
+	/* Write the new tail address back to the queue register */
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+			      b_info->tail_offset);
+	/* Turn the queue back on using our cached control register */
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol | CMD_Q_RUN);
+
+	rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
+
+	return i;
+}
+
+static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
+{
+	struct ccp_session *session;
+	uint8_t *digest_data, *addr;
+	struct rte_mbuf *m_last;
+	int offset, digest_offset;
+	uint8_t digest_le[64];
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	if (session->cmd_id == CCP_CMD_COMBINED) {
+		digest_data = op->sym->aead.digest.data;
+		digest_offset = op->sym->aead.data.offset +
+					op->sym->aead.data.length;
+	} else {
+		digest_data = op->sym->auth.digest.data;
+		digest_offset = op->sym->auth.data.offset +
+					op->sym->auth.data.length;
+	}
+	m_last = rte_pktmbuf_lastseg(op->sym->m_src);
+	addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
+			   m_last->data_len - session->auth.ctx_len);
+
+	rte_mb();
+	offset = session->auth.offset;
+
+	if (session->auth.engine == CCP_ENGINE_SHA)
+		if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
+		    (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
+		    (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
+			/* All other algorithms require byte
+			 * swap done by host
+			 */
+			unsigned int i;
+
+			offset = session->auth.ctx_len -
+				session->auth.offset - 1;
+			for (i = 0; i < session->auth.digest_length; i++)
+				digest_le[i] = addr[offset - i];
+			offset = 0;
+			addr = digest_le;
+		}
+
+	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	if (session->auth.op == CCP_AUTH_OP_VERIFY) {
+		if (memcmp(addr + offset, digest_data,
+			   session->auth.digest_length) != 0)
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+	} else {
+		if (unlikely(digest_data == 0))
+			digest_data = rte_pktmbuf_mtod_offset(
+					op->sym->m_dst, uint8_t *,
+					digest_offset);
+		rte_memcpy(digest_data, addr + offset,
+			   session->auth.digest_length);
+	}
+	/* Trim area used for digest from mbuf. */
+	rte_pktmbuf_trim(op->sym->m_src,
+			 session->auth.ctx_len);
+}
+
+static int
+ccp_prepare_ops(struct rte_crypto_op **op_d,
+		struct ccp_batch_info *b_info,
+		uint16_t nb_ops)
+{
+	int i, min_ops;
+	struct ccp_session *session;
+
+	min_ops = RTE_MIN(nb_ops, b_info->opcnt);
+
+	for (i = 0; i < min_ops; i++) {
+		op_d[i] = b_info->op[b_info->op_idx++];
+		session = (struct ccp_session *)get_session_private_data(
+						 op_d[i]->sym->session,
+						ccp_cryptodev_driver_id);
+		switch (session->cmd_id) {
+		case CCP_CMD_CIPHER:
+			op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+			break;
+		case CCP_CMD_AUTH:
+		case CCP_CMD_CIPHER_HASH:
+		case CCP_CMD_HASH_CIPHER:
+		case CCP_CMD_COMBINED:
+			ccp_auth_dq_prepare(op_d[i]);
+			break;
+		default:
+			CCP_LOG_ERR("Unsupported cmd_id");
+		}
+	}
+
+	b_info->opcnt -= min_ops;
+	return min_ops;
+}
+
+int
+process_ops_to_dequeue(struct ccp_qp *qp,
+		       struct rte_crypto_op **op,
+		       uint16_t nb_ops)
+{
+	struct ccp_batch_info *b_info;
+	uint32_t cur_head_offset;
+
+	if (qp->b_info != NULL) {
+		b_info = qp->b_info;
+		if (unlikely(b_info->op_idx > 0))
+			goto success;
+	} else if (rte_ring_dequeue(qp->processed_pkts,
+				    (void **)&b_info))
+		return 0;
+	cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
+				       CMD_Q_HEAD_LO_BASE);
+
+	if (b_info->head_offset < b_info->tail_offset) {
+		if ((cur_head_offset >= b_info->head_offset) &&
+		    (cur_head_offset < b_info->tail_offset)) {
+			qp->b_info = b_info;
+			return 0;
+		}
+	} else {
+		if ((cur_head_offset >= b_info->head_offset) ||
+		    (cur_head_offset < b_info->tail_offset)) {
+			qp->b_info = b_info;
+			return 0;
+		}
+	}
+
+
+success:
+	nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
+	rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
+	b_info->desccnt = 0;
+	if (b_info->opcnt > 0) {
+		qp->b_info = b_info;
+	} else {
+		rte_mempool_put(qp->batch_mp, (void *)b_info);
+		qp->b_info = NULL;
+	}
+
+	return nb_ops;
+}
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 346d5ee..4455497 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -264,4 +264,39 @@ struct ccp_qp;
 int ccp_set_session_parameters(struct ccp_session *sess,
 			       const struct rte_crypto_sym_xform *xform);
 
+/**
+ * Find count of slots
+ *
+ * @param session CCP private session
+ * @return count of free slots available
+ */
+int ccp_compute_slot_count(struct ccp_session *session);
+
+/**
+ * process crypto ops to be enqueued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param cmd_q CCP cmd queue
+ * @param nb_ops No. of ops to be submitted
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_enqueue(const struct ccp_qp *qp,
+			   struct rte_crypto_op **op,
+			   struct ccp_queue *cmd_q,
+			   uint16_t nb_ops,
+			   int slots_req);
+
+/**
+ * process crypto ops to be dequeued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param nb_ops requested no. of ops
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_dequeue(struct ccp_qp *qp,
+			   struct rte_crypto_op **op,
+			   uint16_t nb_ops);
+
 #endif /* _CCP_CRYPTO_H_ */
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index 57bccf4..fee90e3 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -61,6 +61,33 @@ ccp_dev_start(struct rte_cryptodev *dev)
 	return 0;
 }
 
+struct ccp_queue *
+ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
+{
+	int i, ret = 0;
+	struct ccp_device *dev;
+	struct ccp_private *priv = cdev->data->dev_private;
+
+	dev = TAILQ_NEXT(priv->last_dev, next);
+	if (unlikely(dev == NULL))
+		dev = TAILQ_FIRST(&ccp_list);
+	priv->last_dev = dev;
+	if (dev->qidx >= dev->cmd_q_count)
+		dev->qidx = 0;
+	ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+	if (ret >= slot_req)
+		return &dev->cmd_q[dev->qidx];
+	for (i = 0; i < dev->cmd_q_count; i++) {
+		dev->qidx++;
+		if (dev->qidx >= dev->cmd_q_count)
+			dev->qidx = 0;
+		ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+		if (ret >= slot_req)
+			return &dev->cmd_q[dev->qidx];
+	}
+	return NULL;
+}
+
 static const struct rte_memzone *
 ccp_queue_dma_zone_reserve(const char *queue_name,
 			   uint32_t queue_size,
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index a16ba81..cfb3b03 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -445,4 +445,13 @@ int ccp_dev_start(struct rte_cryptodev *dev);
  */
 int ccp_probe_devices(const struct rte_pci_id *ccp_id);
 
+/**
+ * allocate a ccp command queue
+ *
+ * @dev rte crypto device
+ * @param slot_req number of required
+ * @return allotted CCP queue on success otherwise NULL
+ */
+struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
+
 #endif /* _CCP_DEV_H_ */
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index 1fdbe1f..fb6d41e 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -38,6 +38,7 @@
 #include <rte_dev.h>
 #include <rte_malloc.h>
 
+#include "ccp_crypto.h"
 #include "ccp_dev.h"
 #include "ccp_pmd_private.h"
 
@@ -47,23 +48,72 @@
 static unsigned int ccp_pmd_init_done;
 uint8_t ccp_cryptodev_driver_id;
 
+static struct ccp_session *
+get_ccp_session(struct ccp_qp *qp __rte_unused, struct rte_crypto_op *op)
+{
+	struct ccp_session *sess = NULL;
+
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		if (unlikely(op->sym->session == NULL))
+			return NULL;
+
+		sess = (struct ccp_session *)
+			get_session_private_data(
+				op->sym->session,
+				ccp_cryptodev_driver_id);
+	}
+
+	return sess;
+}
+
 static uint16_t
-ccp_pmd_enqueue_burst(void *queue_pair __rte_unused,
-		      struct rte_crypto_op **ops __rte_unused,
-		      uint16_t nb_ops __rte_unused)
+ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		      uint16_t nb_ops)
 {
-	uint16_t enq_cnt = 0;
+	struct ccp_session *sess = NULL;
+	struct ccp_qp *qp = queue_pair;
+	struct ccp_queue *cmd_q;
+	struct rte_cryptodev *dev = qp->dev;
+	uint16_t i, enq_cnt = 0, slots_req = 0;
+
+	if (nb_ops == 0)
+		return 0;
+
+	if (unlikely(rte_ring_full(qp->processed_pkts) != 0))
+		return 0;
+
+	for (i = 0; i < nb_ops; i++) {
+		sess = get_ccp_session(qp, ops[i]);
+		if (unlikely(sess == NULL) && (i == 0)) {
+			qp->qp_stats.enqueue_err_count++;
+			return 0;
+		} else if (sess == NULL) {
+			nb_ops = i;
+			break;
+		}
+		slots_req += ccp_compute_slot_count(sess);
+	}
+
+	cmd_q = ccp_allot_queue(dev, slots_req);
+	if (unlikely(cmd_q == NULL))
+		return 0;
 
+	enq_cnt = process_ops_to_enqueue(qp, ops, cmd_q, nb_ops, slots_req);
+	qp->qp_stats.enqueued_count += enq_cnt;
 	return enq_cnt;
 }
 
 static uint16_t
-ccp_pmd_dequeue_burst(void *queue_pair __rte_unused,
-		      struct rte_crypto_op **ops __rte_unused,
-		      uint16_t nb_ops __rte_unused)
+ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
 {
+	struct ccp_qp *qp = queue_pair;
 	uint16_t nb_dequeued = 0;
 
+	nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops);
+
+	qp->qp_stats.dequeued_count += nb_dequeued;
+
 	return nb_dequeued;
 }
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v2 07/20] crypto/ccp: add support for RTE_CRYPTO_OP_SESSIONLESS
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
                     ` (4 preceding siblings ...)
  2018-01-05  9:39   ` [PATCH v2 06/20] crypto/ccp: add crypto enqueue and dequeue burst api support Ravi Kumar
@ 2018-01-05  9:39   ` Ravi Kumar
  2018-01-05  9:39   ` [PATCH v2 08/20] crypto/ccp: add stats related crypto pmd ops support Ravi Kumar
                     ` (15 subsequent siblings)
  21 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-05  9:39 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/rte_ccp_pmd.c | 33 +++++++++++++++++++++++++++++++--
 1 file changed, 31 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index fb6d41e..011240b 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -49,7 +49,7 @@ static unsigned int ccp_pmd_init_done;
 uint8_t ccp_cryptodev_driver_id;
 
 static struct ccp_session *
-get_ccp_session(struct ccp_qp *qp __rte_unused, struct rte_crypto_op *op)
+get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op)
 {
 	struct ccp_session *sess = NULL;
 
@@ -61,6 +61,27 @@ get_ccp_session(struct ccp_qp *qp __rte_unused, struct rte_crypto_op *op)
 			get_session_private_data(
 				op->sym->session,
 				ccp_cryptodev_driver_id);
+	} else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		void *_sess;
+		void *_sess_private_data = NULL;
+
+		if (rte_mempool_get(qp->sess_mp, &_sess))
+			return NULL;
+		if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+			return NULL;
+
+		sess = (struct ccp_session *)_sess_private_data;
+
+		if (unlikely(ccp_set_session_parameters(sess,
+							op->sym->xform) != 0)) {
+			rte_mempool_put(qp->sess_mp, _sess);
+			rte_mempool_put(qp->sess_mp, _sess_private_data);
+			sess = NULL;
+		}
+		op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+		set_session_private_data(op->sym->session,
+					 ccp_cryptodev_driver_id,
+					 _sess_private_data);
 	}
 
 	return sess;
@@ -108,10 +129,18 @@ ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
 	struct ccp_qp *qp = queue_pair;
-	uint16_t nb_dequeued = 0;
+	uint16_t nb_dequeued = 0, i;
 
 	nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops);
 
+	/* Free session if a session-less crypto op */
+	for (i = 0; i < nb_dequeued; i++)
+		if (unlikely(ops[i]->sess_type ==
+			     RTE_CRYPTO_OP_SESSIONLESS)) {
+			rte_mempool_put(qp->sess_mp,
+					ops[i]->sym->session);
+			ops[i]->sym->session = NULL;
+		}
 	qp->qp_stats.dequeued_count += nb_dequeued;
 
 	return nb_dequeued;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v2 08/20] crypto/ccp: add stats related crypto pmd ops support
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
                     ` (5 preceding siblings ...)
  2018-01-05  9:39   ` [PATCH v2 07/20] crypto/ccp: add support for RTE_CRYPTO_OP_SESSIONLESS Ravi Kumar
@ 2018-01-05  9:39   ` Ravi Kumar
  2018-01-05  9:39   ` [PATCH v2 09/20] crypto/ccp: add ccp hwrng feature support Ravi Kumar
                     ` (14 subsequent siblings)
  21 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-05  9:39 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_pmd_ops.c | 34 ++++++++++++++++++++++++++++++++--
 1 file changed, 32 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index a02aa6f..d483a74 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -68,6 +68,36 @@ ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
 }
 
 static void
+ccp_pmd_stats_get(struct rte_cryptodev *dev,
+		  struct rte_cryptodev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+		stats->enqueued_count += qp->qp_stats.enqueued_count;
+		stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+	}
+
+}
+
+static void
+ccp_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	}
+}
+
+static void
 ccp_pmd_info_get(struct rte_cryptodev *dev,
 		 struct rte_cryptodev_info *dev_info)
 {
@@ -281,8 +311,8 @@ struct rte_cryptodev_ops ccp_ops = {
 		.dev_stop		= ccp_pmd_stop,
 		.dev_close		= ccp_pmd_close,
 
-		.stats_get		= NULL,
-		.stats_reset		= NULL,
+		.stats_get		= ccp_pmd_stats_get,
+		.stats_reset		= ccp_pmd_stats_reset,
 
 		.dev_infos_get		= ccp_pmd_info_get,
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v2 09/20] crypto/ccp: add ccp hwrng feature support
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
                     ` (6 preceding siblings ...)
  2018-01-05  9:39   ` [PATCH v2 08/20] crypto/ccp: add stats related crypto pmd ops support Ravi Kumar
@ 2018-01-05  9:39   ` Ravi Kumar
  2018-01-05  9:39   ` [PATCH v2 10/20] crypto/ccp: add aes cipher algo support Ravi Kumar
                     ` (13 subsequent siblings)
  21 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-05  9:39 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_dev.c | 20 ++++++++++++++++++++
 drivers/crypto/ccp/ccp_dev.h | 11 +++++++++++
 2 files changed, 31 insertions(+)

diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index fee90e3..d8c0ab4 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -88,6 +88,26 @@ ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
 	return NULL;
 }
 
+int
+ccp_read_hwrng(uint32_t *value)
+{
+	struct ccp_device *dev;
+
+	TAILQ_FOREACH(dev, &ccp_list, next) {
+		void *vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+		while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) {
+			*value = CCP_READ_REG(vaddr, TRNG_OUT_REG);
+			if (*value) {
+				dev->hwrng_retries = 0;
+				return 0;
+			}
+		}
+		dev->hwrng_retries = 0;
+	}
+	return -1;
+}
+
 static const struct rte_memzone *
 ccp_queue_dma_zone_reserve(const char *queue_name,
 			   uint32_t queue_size,
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index cfb3b03..a5c9ef3 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -47,6 +47,7 @@
 
 /**< CCP sspecific */
 #define MAX_HW_QUEUES                   5
+#define CCP_MAX_TRNG_RETRIES		10
 
 /**< CCP Register Mappings */
 #define Q_MASK_REG                      0x000
@@ -223,6 +224,8 @@ struct ccp_device {
 	/**< protection for shared lsb region allocation */
 	int qidx;
 	/**< current queue index */
+	int hwrng_retries;
+	/**< retry counter for CCP TRNG */
 } __rte_cache_aligned;
 
 /**< CCP H/W engine related */
@@ -454,4 +457,12 @@ int ccp_probe_devices(const struct rte_pci_id *ccp_id);
  */
 struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
 
+/**
+ * read hwrng value
+ *
+ * @param trng_value data pointer to write RNG value
+ * @return 0 on success otherwise -1
+ */
+int ccp_read_hwrng(uint32_t *trng_value);
+
 #endif /* _CCP_DEV_H_ */
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v2 10/20] crypto/ccp: add aes cipher algo support
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
                     ` (7 preceding siblings ...)
  2018-01-05  9:39   ` [PATCH v2 09/20] crypto/ccp: add ccp hwrng feature support Ravi Kumar
@ 2018-01-05  9:39   ` Ravi Kumar
  2018-01-05  9:39   ` [PATCH v2 11/20] crypto/ccp: add 3des " Ravi Kumar
                     ` (12 subsequent siblings)
  21 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-05  9:39 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 197 ++++++++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_crypto.h  |  13 +++
 drivers/crypto/ccp/ccp_dev.h     |  53 +++++++++++
 drivers/crypto/ccp/ccp_pmd_ops.c |  60 ++++++++++++
 4 files changed, 321 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index c17e84f..b097355 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -80,6 +80,7 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 			     const struct rte_crypto_sym_xform *xform)
 {
 	const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+	size_t i;
 
 	cipher_xform = &xform->cipher;
 
@@ -99,6 +100,21 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 	sess->iv.length = cipher_xform->iv.length;
 
 	switch (cipher_xform->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_ECB:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo");
 		return -1;
@@ -106,10 +122,27 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 
 
 	switch (sess->cipher.engine) {
+	case CCP_ENGINE_AES:
+		if (sess->cipher.key_length == 16)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+		else if (sess->cipher.key_length == 24)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+		else if (sess->cipher.key_length == 32)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+		else {
+			CCP_LOG_ERR("Invalid cipher key length");
+			return -1;
+		}
+		for (i = 0; i < sess->cipher.key_length ; i++)
+			sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+				sess->cipher.key[i];
+		break;
 	default:
 		CCP_LOG_ERR("Invalid CCP Engine");
 		return -ENOTSUP;
 	}
+	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
 	return 0;
 }
 
@@ -235,6 +268,18 @@ ccp_cipher_slot(struct ccp_session *session)
 	int count = 0;
 
 	switch (session->cipher.algo) {
+	case CCP_CIPHER_ALGO_AES_CBC:
+		count = 2;
+		/**< op + passthrough for iv */
+		break;
+	case CCP_CIPHER_ALGO_AES_ECB:
+		count = 1;
+		/**<only op*/
+		break;
+	case CCP_CIPHER_ALGO_AES_CTR:
+		count = 2;
+		/**< op + passthrough for iv */
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo %d",
 			    session->cipher.algo);
@@ -297,10 +342,146 @@ ccp_compute_slot_count(struct ccp_session *session)
 	return count;
 }
 
+static void
+ccp_perform_passthru(struct ccp_passthru *pst,
+		     struct ccp_queue *cmd_q)
+{
+	struct ccp_desc *desc;
+	union ccp_function function;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 0;
+	CCP_CMD_EOM(desc) = 0;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_PT_BYTESWAP(&function) = pst->byte_swap;
+	CCP_PT_BITWISE(&function) = pst->bit_mod;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = pst->len;
+
+	if (pst->dir) {
+		CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+		CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+		CCP_CMD_DST_HI(desc) = 0;
+		CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+		if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+			CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
+	} else {
+
+		CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+		CCP_CMD_SRC_HI(desc) = 0;
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
+
+		CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+		CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
+		CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+	}
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+}
+
+static int
+ccp_perform_aes(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	uint8_t *lsb_buf;
+	struct ccp_passthru pst = {0};
+	struct ccp_desc *desc;
+	phys_addr_t src_addr, dest_addr, key_addr;
+	uint8_t *iv;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+	function.raw = 0;
+
+	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+	if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
+		if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
+			rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
+				   iv, session->iv.length);
+			pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
+			CCP_AES_SIZE(&function) = 0x1F;
+		} else {
+			lsb_buf =
+			&(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+			rte_memcpy(lsb_buf +
+				   (CCP_SB_BYTES - session->iv.length),
+				   iv, session->iv.length);
+			pst.src_addr = b_info->lsb_buf_phys +
+				(b_info->lsb_buf_idx * CCP_SB_BYTES);
+			b_info->lsb_buf_idx++;
+		}
+
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+	}
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->cipher.data.offset);
+	if (likely(op->sym->m_dst != NULL))
+		dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+						op->sym->cipher.data.offset);
+	else
+		dest_addr = src_addr;
+	key_addr = session->cipher.key_phys;
+
+	/* prepare desc for aes command */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+	CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
 static inline int
 ccp_crypto_cipher(struct rte_crypto_op *op,
-		  struct ccp_queue *cmd_q __rte_unused,
-		  struct ccp_batch_info *b_info __rte_unused)
+		  struct ccp_queue *cmd_q,
+		  struct ccp_batch_info *b_info)
 {
 	int result = 0;
 	struct ccp_session *session;
@@ -310,6 +491,18 @@ ccp_crypto_cipher(struct rte_crypto_op *op,
 					 ccp_cryptodev_driver_id);
 
 	switch (session->cipher.algo) {
+	case CCP_CIPHER_ALGO_AES_CBC:
+		result = ccp_perform_aes(op, cmd_q, b_info);
+		b_info->desccnt += 2;
+		break;
+	case CCP_CIPHER_ALGO_AES_CTR:
+		result = ccp_perform_aes(op, cmd_q, b_info);
+		b_info->desccnt += 2;
+		break;
+	case CCP_CIPHER_ALGO_AES_ECB:
+		result = ccp_perform_aes(op, cmd_q, b_info);
+		b_info->desccnt += 1;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo %d",
 			    session->cipher.algo);
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 4455497..614cd47 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -46,7 +46,20 @@
 
 #include "ccp_dev.h"
 
+#define AES_BLOCK_SIZE 16
+#define CMAC_PAD_VALUE 0x80
+#define CTR_NONCE_SIZE 4
+#define CTR_IV_SIZE 8
 #define CCP_SHA3_CTX_SIZE 200
+
+/**Macro helpers for CCP command creation*/
+#define	CCP_AES_SIZE(p)		((p)->aes.size)
+#define	CCP_AES_ENCRYPT(p)	((p)->aes.encrypt)
+#define	CCP_AES_MODE(p)		((p)->aes.mode)
+#define	CCP_AES_TYPE(p)		((p)->aes.type)
+#define	CCP_PT_BYTESWAP(p)	((p)->pt.byteswap)
+#define	CCP_PT_BITWISE(p)	((p)->pt.bitwise)
+
 /**
  * CCP supported AES modes
  */
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index a5c9ef3..759afc1 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -48,6 +48,7 @@
 /**< CCP sspecific */
 #define MAX_HW_QUEUES                   5
 #define CCP_MAX_TRNG_RETRIES		10
+#define CCP_ALIGN(x, y) ((((x) + (y - 1)) / y) * y)
 
 /**< CCP Register Mappings */
 #define Q_MASK_REG                      0x000
@@ -104,10 +105,52 @@
 #define LSB_SIZE                        16
 #define LSB_ITEM_SIZE                   32
 #define SLSB_MAP_SIZE                   (MAX_LSB_CNT * LSB_SIZE)
+#define LSB_ENTRY_NUMBER(LSB_ADDR)      (LSB_ADDR / LSB_ITEM_SIZE)
 
 /* General CCP Defines */
 
 #define CCP_SB_BYTES                    32
+/* Word 0 */
+#define CCP_CMD_DW0(p)		((p)->dw0)
+#define CCP_CMD_SOC(p)		(CCP_CMD_DW0(p).soc)
+#define CCP_CMD_IOC(p)		(CCP_CMD_DW0(p).ioc)
+#define CCP_CMD_INIT(p)	        (CCP_CMD_DW0(p).init)
+#define CCP_CMD_EOM(p)		(CCP_CMD_DW0(p).eom)
+#define CCP_CMD_FUNCTION(p)	(CCP_CMD_DW0(p).function)
+#define CCP_CMD_ENGINE(p)	(CCP_CMD_DW0(p).engine)
+#define CCP_CMD_PROT(p)	        (CCP_CMD_DW0(p).prot)
+
+/* Word 1 */
+#define CCP_CMD_DW1(p)		((p)->length)
+#define CCP_CMD_LEN(p)		(CCP_CMD_DW1(p))
+
+/* Word 2 */
+#define CCP_CMD_DW2(p)		((p)->src_lo)
+#define CCP_CMD_SRC_LO(p)	(CCP_CMD_DW2(p))
+
+/* Word 3 */
+#define CCP_CMD_DW3(p)		((p)->dw3)
+#define CCP_CMD_SRC_MEM(p)	((p)->dw3.src_mem)
+#define CCP_CMD_SRC_HI(p)	((p)->dw3.src_hi)
+#define CCP_CMD_LSB_ID(p)	((p)->dw3.lsb_cxt_id)
+#define CCP_CMD_FIX_SRC(p)	((p)->dw3.fixed)
+
+/* Words 4/5 */
+#define CCP_CMD_DW4(p)		((p)->dw4)
+#define CCP_CMD_DST_LO(p)	(CCP_CMD_DW4(p).dst_lo)
+#define CCP_CMD_DW5(p)		((p)->dw5.fields.dst_hi)
+#define CCP_CMD_DST_HI(p)	(CCP_CMD_DW5(p))
+#define CCP_CMD_DST_MEM(p)	((p)->dw5.fields.dst_mem)
+#define CCP_CMD_FIX_DST(p)	((p)->dw5.fields.fixed)
+#define CCP_CMD_SHA_LO(p)	((p)->dw4.sha_len_lo)
+#define CCP_CMD_SHA_HI(p)	((p)->dw5.sha_len_hi)
+
+/* Word 6/7 */
+#define CCP_CMD_DW6(p)		((p)->key_lo)
+#define CCP_CMD_KEY_LO(p)	(CCP_CMD_DW6(p))
+#define CCP_CMD_DW7(p)		((p)->dw7)
+#define CCP_CMD_KEY_HI(p)	((p)->dw7.key_hi)
+#define CCP_CMD_KEY_MEM(p)	((p)->dw7.key_mem)
 
 /* bitmap */
 enum {
@@ -412,6 +455,16 @@ struct ccp_desc {
 };
 
 /**
+ * ccp memory type
+ */
+enum ccp_memtype {
+	CCP_MEMTYPE_SYSTEM = 0,
+	CCP_MEMTYPE_SB,
+	CCP_MEMTYPE_LOCAL,
+	CCP_MEMTYPE_LAST,
+};
+
+/**
  * cmd id to follow order
  */
 enum ccp_cmd_order {
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index d483a74..5f56242 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -39,6 +39,66 @@
 #include "ccp_crypto.h"
 
 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+	{       /* AES ECB */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_ECB,
+				.block_size = 16,
+				.key_size = {
+				   .min = 16,
+				   .max = 32,
+				   .increment = 8
+				},
+				.iv_size = {
+				   .min = 0,
+				   .max = 0,
+				   .increment = 0
+				}
+			}, }
+		}, }
+	},
+	{       /* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v2 11/20] crypto/ccp: add 3des cipher algo support
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
                     ` (8 preceding siblings ...)
  2018-01-05  9:39   ` [PATCH v2 10/20] crypto/ccp: add aes cipher algo support Ravi Kumar
@ 2018-01-05  9:39   ` Ravi Kumar
  2018-01-05  9:39   ` [PATCH v2 12/20] crypto/ccp: add aes-cmac auth algo aupport Ravi Kumar
                     ` (11 subsequent siblings)
  21 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-05  9:39 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 132 ++++++++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_crypto.h  |   3 +
 drivers/crypto/ccp/ccp_pmd_ops.c |  20 ++++++
 3 files changed, 154 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index b097355..0660761 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -80,7 +80,7 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 			     const struct rte_crypto_sym_xform *xform)
 {
 	const struct rte_crypto_cipher_xform *cipher_xform = NULL;
-	size_t i;
+	size_t i, j, x;
 
 	cipher_xform = &xform->cipher;
 
@@ -115,6 +115,11 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 		sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
 		sess->cipher.engine = CCP_ENGINE_AES;
 		break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
+		sess->cipher.um.des_mode = CCP_DES_MODE_CBC;
+		sess->cipher.engine = CCP_ENGINE_3DES;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo");
 		return -1;
@@ -137,6 +142,20 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 			sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
 				sess->cipher.key[i];
 		break;
+	case CCP_ENGINE_3DES:
+		if (sess->cipher.key_length == 16)
+			sess->cipher.ut.des_type = CCP_DES_TYPE_128;
+		else if (sess->cipher.key_length == 24)
+			sess->cipher.ut.des_type = CCP_DES_TYPE_192;
+		else {
+			CCP_LOG_ERR("Invalid cipher key length");
+			return -1;
+		}
+		for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
+			for (i = 0; i < 8; i++)
+				sess->cipher.key_ccp[(8 + x) - i - 1] =
+					sess->cipher.key[i + x];
+		break;
 	default:
 		CCP_LOG_ERR("Invalid CCP Engine");
 		return -ENOTSUP;
@@ -280,6 +299,10 @@ ccp_cipher_slot(struct ccp_session *session)
 		count = 2;
 		/**< op + passthrough for iv */
 		break;
+	case CCP_CIPHER_ALGO_3DES_CBC:
+		count = 2;
+		/**< op + passthrough for iv */
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo %d",
 			    session->cipher.algo);
@@ -478,6 +501,109 @@ ccp_perform_aes(struct rte_crypto_op *op,
 	return 0;
 }
 
+static int
+ccp_perform_3des(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	unsigned char *lsb_buf;
+	struct ccp_passthru pst;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	uint8_t *iv;
+	phys_addr_t src_addr, dest_addr, key_addr;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+	switch (session->cipher.um.des_mode) {
+	case CCP_DES_MODE_CBC:
+		lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+		b_info->lsb_buf_idx++;
+
+		rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
+			   iv, session->iv.length);
+
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+		break;
+	case CCP_DES_MODE_CFB:
+	case CCP_DES_MODE_ECB:
+		CCP_LOG_ERR("Unsupported DES cipher mode");
+		return -ENOTSUP;
+	}
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->cipher.data.offset);
+	if (unlikely(op->sym->m_dst != NULL))
+		dest_addr =
+			rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+						   op->sym->cipher.data.offset);
+	else
+		dest_addr = src_addr;
+
+	key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+	memset(desc, 0, Q_DESC_SIZE);
+
+	/* prepare desc for des command */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_DES_ENCRYPT(&function) = session->cipher.dir;
+	CCP_DES_MODE(&function) = session->cipher.um.des_mode;
+	CCP_DES_TYPE(&function) = session->cipher.ut.des_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	if (session->cipher.um.des_mode)
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	/* Write the new tail address back to the queue register */
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	/* Turn the queue back on using our cached control register */
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
 static inline int
 ccp_crypto_cipher(struct rte_crypto_op *op,
 		  struct ccp_queue *cmd_q,
@@ -503,6 +629,10 @@ ccp_crypto_cipher(struct rte_crypto_op *op,
 		result = ccp_perform_aes(op, cmd_q, b_info);
 		b_info->desccnt += 1;
 		break;
+	case CCP_CIPHER_ALGO_3DES_CBC:
+		result = ccp_perform_3des(op, cmd_q, b_info);
+		b_info->desccnt += 2;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo %d",
 			    session->cipher.algo);
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 614cd47..d528ec9 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -57,6 +57,9 @@
 #define	CCP_AES_ENCRYPT(p)	((p)->aes.encrypt)
 #define	CCP_AES_MODE(p)		((p)->aes.mode)
 #define	CCP_AES_TYPE(p)		((p)->aes.type)
+#define	CCP_DES_ENCRYPT(p)	((p)->des.encrypt)
+#define	CCP_DES_MODE(p)		((p)->des.mode)
+#define	CCP_DES_TYPE(p)		((p)->des.type)
 #define	CCP_PT_BYTESWAP(p)	((p)->pt.byteswap)
 #define	CCP_PT_BITWISE(p)	((p)->pt.bitwise)
 
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 5f56242..3a16be8 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -99,6 +99,26 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			}, }
 		}, }
 	},
+	{	/* 3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 16,
+					.max = 24,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v2 12/20] crypto/ccp: add aes-cmac auth algo aupport
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
                     ` (9 preceding siblings ...)
  2018-01-05  9:39   ` [PATCH v2 11/20] crypto/ccp: add 3des " Ravi Kumar
@ 2018-01-05  9:39   ` Ravi Kumar
  2018-01-08 17:27     ` De Lara Guarch, Pablo
  2018-01-05  9:39   ` [PATCH v2 13/20] crypto/ccp: add aes-gcm aead algo support Ravi Kumar
                     ` (10 subsequent siblings)
  21 siblings, 1 reply; 131+ messages in thread
From: Ravi Kumar @ 2018-01-05  9:39 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 277 ++++++++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_pmd_ops.c |  21 +++
 2 files changed, 296 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 0660761..6e593d8 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -36,6 +36,8 @@
 #include <sys/queue.h>
 #include <sys/types.h>
 #include <unistd.h>
+#include <openssl/cmac.h> /*sub key apis*/
+#include <openssl/evp.h> /*sub key apis*/
 
 #include <rte_hexdump.h>
 #include <rte_memzone.h>
@@ -74,6 +76,84 @@ ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 	return res;
 }
 
+/* prepare temporary keys K1 and K2 */
+static void prepare_key(unsigned char *k, unsigned char *l, int bl)
+{
+	int i;
+	/* Shift block to left, including carry */
+	for (i = 0; i < bl; i++) {
+		k[i] = l[i] << 1;
+		if (i < bl - 1 && l[i + 1] & 0x80)
+			k[i] |= 1;
+	}
+	/* If MSB set fixup with R */
+	if (l[0] & 0x80)
+		k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b;
+}
+
+/* subkeys K1 and K2 generation for CMAC */
+static int
+generate_cmac_subkeys(struct ccp_session *sess)
+{
+	const EVP_CIPHER *algo;
+	EVP_CIPHER_CTX *ctx;
+	unsigned char *ccp_ctx;
+	size_t i;
+	int dstlen, totlen;
+	unsigned char zero_iv[AES_BLOCK_SIZE] = {0};
+	unsigned char dst[2 * AES_BLOCK_SIZE] = {0};
+	unsigned char k1[AES_BLOCK_SIZE] = {0};
+	unsigned char k2[AES_BLOCK_SIZE] = {0};
+
+	if (sess->auth.ut.aes_type == CCP_AES_TYPE_128)
+		algo =  EVP_aes_128_cbc();
+	else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192)
+		algo =  EVP_aes_192_cbc();
+	else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256)
+		algo =  EVP_aes_256_cbc();
+	else {
+		CCP_LOG_ERR("Invalid CMAC type length");
+		return -1;
+	}
+
+	ctx = EVP_CIPHER_CTX_new();
+	if (!ctx) {
+		CCP_LOG_ERR("ctx creation failed");
+		return -1;
+	}
+	if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key,
+			    (unsigned char *)zero_iv) <= 0)
+		goto key_generate_err;
+	if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
+		goto key_generate_err;
+	if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv,
+			      AES_BLOCK_SIZE) <= 0)
+		goto key_generate_err;
+	if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
+		goto key_generate_err;
+
+	memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2);
+
+	ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1);
+	prepare_key(k1, dst, AES_BLOCK_SIZE);
+	for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
+		*ccp_ctx = k1[i];
+
+	ccp_ctx = (unsigned char *)(sess->auth.pre_compute +
+				   (2 * CCP_SB_BYTES) - 1);
+	prepare_key(k2, k1, AES_BLOCK_SIZE);
+	for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
+		*ccp_ctx = k2[i];
+
+	EVP_CIPHER_CTX_free(ctx);
+
+	return 0;
+
+key_generate_err:
+	CCP_LOG_ERR("CMAC Init failed");
+		return -1;
+}
+
 /* configure session */
 static int
 ccp_configure_session_cipher(struct ccp_session *sess,
@@ -170,6 +250,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   const struct rte_crypto_sym_xform *xform)
 {
 	const struct rte_crypto_auth_xform *auth_xform = NULL;
+	size_t i;
 
 	auth_xform = &xform->auth;
 
@@ -179,6 +260,33 @@ ccp_configure_session_auth(struct ccp_session *sess,
 	else
 		sess->auth.op = CCP_AUTH_OP_VERIFY;
 	switch (auth_xform->algo) {
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
+		sess->auth.engine = CCP_ENGINE_AES;
+		sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
+		sess->auth.key_length = auth_xform->key.length;
+		/**<padding and hash result*/
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = AES_BLOCK_SIZE;
+		sess->auth.block_size = AES_BLOCK_SIZE;
+		if (sess->auth.key_length == 16)
+			sess->auth.ut.aes_type = CCP_AES_TYPE_128;
+		else if (sess->auth.key_length == 24)
+			sess->auth.ut.aes_type = CCP_AES_TYPE_192;
+		else if (sess->auth.key_length == 32)
+			sess->auth.ut.aes_type = CCP_AES_TYPE_256;
+		else {
+			CCP_LOG_ERR("Invalid CMAC key length");
+			return -1;
+		}
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   sess->auth.key_length);
+		for (i = 0; i < sess->auth.key_length; i++)
+			sess->auth.key_ccp[sess->auth.key_length - i - 1] =
+				sess->auth.key[i];
+		if (generate_cmac_subkeys(sess))
+			return -1;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported hash algo");
 		return -ENOTSUP;
@@ -316,6 +424,15 @@ ccp_auth_slot(struct ccp_session *session)
 	int count = 0;
 
 	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_AES_CMAC:
+		count = 4;
+		/**
+		 * op
+		 * extra descriptor in padding case
+		 * (k1/k2(255:128) with iv(127:0))
+		 * Retrieve result
+		 */
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported auth algo %d",
 			    session->auth.algo);
@@ -415,6 +532,158 @@ ccp_perform_passthru(struct ccp_passthru *pst,
 }
 
 static int
+ccp_perform_aes_cmac(struct rte_crypto_op *op,
+		     struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_passthru pst;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	uint8_t *src_tb, *append_ptr, *ctx_addr;
+	phys_addr_t src_addr, dest_addr, key_addr;
+	int length, non_align_len;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+	key_addr = rte_mem_virt2phy(session->auth.key_ccp);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
+	CCP_AES_MODE(&function) = session->auth.um.aes_mode;
+	CCP_AES_TYPE(&function) = session->auth.ut.aes_type;
+
+	if (op->sym->auth.data.length % session->auth.block_size == 0) {
+
+		ctx_addr = session->auth.pre_compute;
+		memset(ctx_addr, 0, AES_BLOCK_SIZE);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		desc = &cmd_q->qbase_desc[cmd_q->qidx];
+		memset(desc, 0, Q_DESC_SIZE);
+
+		/* prepare desc for aes-cmac command */
+		CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+		CCP_CMD_EOM(desc) = 1;
+		CCP_CMD_FUNCTION(desc) = function.raw;
+
+		CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+		CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+		CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+		CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+		CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+		cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+		rte_wmb();
+
+		tail =
+		(uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol | CMD_Q_RUN);
+	} else {
+		ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
+		memset(ctx_addr, 0, AES_BLOCK_SIZE);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		length = (op->sym->auth.data.length / AES_BLOCK_SIZE);
+		length *= AES_BLOCK_SIZE;
+		non_align_len = op->sym->auth.data.length - length;
+		/* prepare desc for aes-cmac command */
+		/*Command 1*/
+		desc = &cmd_q->qbase_desc[cmd_q->qidx];
+		memset(desc, 0, Q_DESC_SIZE);
+
+		CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+		CCP_CMD_INIT(desc) = 1;
+		CCP_CMD_FUNCTION(desc) = function.raw;
+
+		CCP_CMD_LEN(desc) = length;
+		CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+		CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+		CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+		CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+		cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+		/*Command 2*/
+		append_ptr = append_ptr + CCP_SB_BYTES;
+		memset(append_ptr, 0, AES_BLOCK_SIZE);
+		src_tb = rte_pktmbuf_mtod_offset(op->sym->m_src,
+						 uint8_t *,
+						 op->sym->auth.data.offset +
+						 length);
+		rte_memcpy(append_ptr, src_tb, non_align_len);
+		append_ptr[non_align_len] = CMAC_PAD_VALUE;
+
+		desc = &cmd_q->qbase_desc[cmd_q->qidx];
+		memset(desc, 0, Q_DESC_SIZE);
+
+		CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+		CCP_CMD_EOM(desc) = 1;
+		CCP_CMD_FUNCTION(desc) = function.raw;
+		CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+		CCP_CMD_SRC_LO(desc) = ((uint32_t)(dest_addr + CCP_SB_BYTES));
+		CCP_CMD_SRC_HI(desc) = high32_value(dest_addr + CCP_SB_BYTES);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+		CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+		CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+		cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+		rte_wmb();
+		tail =
+		(uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol | CMD_Q_RUN);
+	}
+	/* Retrieve result */
+	pst.dest_addr = dest_addr;
+	pst.src_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+	pst.len = CCP_SB_BYTES;
+	pst.dir = 0;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
 ccp_perform_aes(struct rte_crypto_op *op,
 		struct ccp_queue *cmd_q,
 		struct ccp_batch_info *b_info)
@@ -643,8 +912,8 @@ ccp_crypto_cipher(struct rte_crypto_op *op,
 
 static inline int
 ccp_crypto_auth(struct rte_crypto_op *op,
-		struct ccp_queue *cmd_q __rte_unused,
-		struct ccp_batch_info *b_info __rte_unused)
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
 {
 
 	int result = 0;
@@ -655,6 +924,10 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 					ccp_cryptodev_driver_id);
 
 	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_AES_CMAC:
+		result = ccp_perform_aes_cmac(op, cmd_q);
+		b_info->desccnt += 4;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported auth algo %d",
 			    session->auth.algo);
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 3a16be8..59e7285 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -39,6 +39,27 @@
 #include "ccp_crypto.h"
 
 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+	{	/*AES-CMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				 .block_size = 16,
+				 .key_size = {
+					 .min = 16,
+					 .max = 32,
+					 .increment = 8
+				 },
+				 .digest_size = {
+					 .min = 16,
+					 .max = 16,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			}, }
+		}, }
+	},
 	{       /* AES ECB */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v2 13/20] crypto/ccp: add aes-gcm aead algo support
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
                     ` (10 preceding siblings ...)
  2018-01-05  9:39   ` [PATCH v2 12/20] crypto/ccp: add aes-cmac auth algo aupport Ravi Kumar
@ 2018-01-05  9:39   ` Ravi Kumar
  2018-01-05  9:39   ` [PATCH v2 14/20] crypto/ccp: add sha1 auth " Ravi Kumar
                     ` (9 subsequent siblings)
  21 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-05  9:39 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 240 ++++++++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_pmd_ops.c |  30 +++++
 2 files changed, 266 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 6e593d8..4ced193 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -299,6 +299,7 @@ ccp_configure_session_aead(struct ccp_session *sess,
 			   const struct rte_crypto_sym_xform *xform)
 {
 	const struct rte_crypto_aead_xform *aead_xform = NULL;
+	size_t i;
 
 	aead_xform = &xform->aead;
 
@@ -313,6 +314,7 @@ ccp_configure_session_aead(struct ccp_session *sess,
 		sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
 		sess->auth.op = CCP_AUTH_OP_VERIFY;
 	}
+	sess->aead_algo = aead_xform->algo;
 	sess->auth.aad_length = aead_xform->aad_length;
 	sess->auth.digest_length = aead_xform->digest_length;
 
@@ -321,10 +323,37 @@ ccp_configure_session_aead(struct ccp_session *sess,
 	sess->iv.length = aead_xform->iv.length;
 
 	switch (aead_xform->algo) {
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_GCM;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_GCTR;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		if (sess->cipher.key_length == 16)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+		else if (sess->cipher.key_length == 24)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+		else if (sess->cipher.key_length == 32)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+		else {
+			CCP_LOG_ERR("Invalid aead key length");
+			return -1;
+		}
+		for (i = 0; i < sess->cipher.key_length; i++)
+			sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+				sess->cipher.key[i];
+		sess->auth.algo = CCP_AUTH_ALGO_AES_GCM;
+		sess->auth.engine = CCP_ENGINE_AES;
+		sess->auth.um.aes_mode = CCP_AES_MODE_GHASH;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = 0;
+		sess->auth.block_size = AES_BLOCK_SIZE;
+		sess->cmd_id = CCP_CMD_COMBINED;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported aead algo");
 		return -ENOTSUP;
 	}
+	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
 	return 0;
 }
 
@@ -447,10 +476,27 @@ ccp_aead_slot(struct ccp_session *session)
 	int count = 0;
 
 	switch (session->aead_algo) {
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported aead algo %d",
 			    session->aead_algo);
 	}
+	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_AES_GCM:
+		count = 5;
+		/**
+		 * 1. Passthru iv
+		 * 2. Hash AAD
+		 * 3. GCTR
+		 * 4. Reload passthru
+		 * 5. Hash Final tag
+		 */
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported combined auth ALGO %d",
+			    session->auth.algo);
+	}
 	return count;
 }
 
@@ -873,6 +919,184 @@ ccp_perform_3des(struct rte_crypto_op *op,
 	return 0;
 }
 
+static int
+ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	uint8_t *lsb_buf, *append_ptr, *iv;
+	struct ccp_passthru pst;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	uint64_t *temp;
+	phys_addr_t src_addr, dest_addr, key_addr, aad_addr;
+	phys_addr_t digest_dest_addr;
+	int length, non_align_len, i;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 ccp_cryptodev_driver_id);
+	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+	key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->cipher.data.offset);
+	if (unlikely(op->sym->m_dst != NULL))
+		dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+						op->sym->cipher.data.offset);
+	else
+		dest_addr = src_addr;
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						   session->auth.ctx_len);
+	digest_dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	append_ptr += AES_BLOCK_SIZE;
+	temp = (uint64_t *)append_ptr;
+	*temp++ =  rte_bswap64(session->auth.aad_length << 3);
+	*temp =  rte_bswap64(op->sym->cipher.data.length << 3);
+
+	non_align_len = op->sym->cipher.data.length % AES_BLOCK_SIZE;
+	length = CCP_ALIGN(op->sym->cipher.data.length, AES_BLOCK_SIZE);
+
+	aad_addr = rte_mem_virt2phy((void *)op->sym->aead.aad.data);
+
+	/* CMD1 IV Passthru */
+	for (i = 0;  i < CTR_IV_SIZE; i++)
+		session->cipher.nonce[CTR_NONCE_SIZE + CTR_IV_SIZE - 1 - i] =
+			iv[i];
+	lsb_buf = session->cipher.nonce;
+
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+	pst.len = CCP_SB_BYTES;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/* CMD2 GHASH-AAD */
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_AAD;
+	CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = session->auth.aad_length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)aad_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(aad_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* CMD3 : GCTR Plain text */
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+	CCP_AES_MODE(&function) = CCP_AES_MODE_GCTR;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+	if (non_align_len == 0)
+		CCP_AES_SIZE(&function) = (AES_BLOCK_SIZE << 3) - 1;
+	else
+		CCP_AES_SIZE(&function) = (non_align_len << 3) - 1;
+
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* CMD4 : PT to copy IV */
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+	pst.len = AES_BLOCK_SIZE;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/* CMD5 : GHASH-Final */
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_FINAL;
+	CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+	/* Last block (AAD_len || PT_len)*/
+	CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)digest_dest_addr + AES_BLOCK_SIZE);
+	CCP_CMD_SRC_HI(desc) = high32_value(digest_dest_addr + AES_BLOCK_SIZE);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)digest_dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(digest_dest_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
 static inline int
 ccp_crypto_cipher(struct rte_crypto_op *op,
 		  struct ccp_queue *cmd_q,
@@ -939,17 +1163,25 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 
 static inline int
 ccp_crypto_aead(struct rte_crypto_op *op,
-		struct ccp_queue *cmd_q __rte_unused,
-		struct ccp_batch_info *b_info __rte_unused)
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
 {
 	int result = 0;
 	struct ccp_session *session;
 
 	session = (struct ccp_session *)get_session_private_data(
-					 op->sym->session,
+					op->sym->session,
 					ccp_cryptodev_driver_id);
 
-	switch (session->aead_algo) {
+	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_AES_GCM:
+		if (session->cipher.algo != CCP_CIPHER_ALGO_AES_GCM) {
+			CCP_LOG_ERR("Incorrect chain order");
+			return -1;
+		}
+		result = ccp_perform_aes_gcm(op, cmd_q);
+		b_info->desccnt += 5;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported aead algo %d",
 			    session->aead_algo);
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 59e7285..2309e9f 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -140,6 +140,36 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			}, }
 		}, }
 	},
+	{       /* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				 .algo = RTE_CRYPTO_AEAD_AES_GCM,
+				 .block_size = 16,
+				 .key_size = {
+					 .min = 16,
+					 .max = 32,
+					 .increment = 8
+				 },
+				 .digest_size = {
+					 .min = 16,
+					 .max = 16,
+					 .increment = 0
+				 },
+				 .aad_size = {
+					 .min = 0,
+					 .max = 65535,
+					 .increment = 1
+				 },
+				 .iv_size = {
+					 .min = 12,
+					 .max = 16,
+					 .increment = 4
+				 },
+			}, }
+		}, }
+	},
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v2 14/20] crypto/ccp: add sha1 auth algo support
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
                     ` (11 preceding siblings ...)
  2018-01-05  9:39   ` [PATCH v2 13/20] crypto/ccp: add aes-gcm aead algo support Ravi Kumar
@ 2018-01-05  9:39   ` Ravi Kumar
  2018-01-05  9:39   ` [PATCH v2 15/20] crypto/ccp: add sha2 family " Ravi Kumar
                     ` (8 subsequent siblings)
  21 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-05  9:39 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 367 +++++++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_crypto.h  |  23 +++
 drivers/crypto/ccp/ccp_pmd_ops.c |  42 +++++
 3 files changed, 432 insertions(+)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 4ced193..a137405 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -36,6 +36,7 @@
 #include <sys/queue.h>
 #include <sys/types.h>
 #include <unistd.h>
+#include <openssl/sha.h>
 #include <openssl/cmac.h> /*sub key apis*/
 #include <openssl/evp.h> /*sub key apis*/
 
@@ -52,6 +53,14 @@
 #include "ccp_pci.h"
 #include "ccp_pmd_private.h"
 
+/* SHA initial context values */
+static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
+	SHA1_H4, SHA1_H3,
+	SHA1_H2, SHA1_H1,
+	SHA1_H0, 0x0U,
+	0x0U, 0x0U,
+};
+
 static enum ccp_cmd_order
 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 {
@@ -76,6 +85,59 @@ ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 	return res;
 }
 
+/* partial hash using openssl */
+static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA_CTX ctx;
+
+	if (!SHA1_Init(&ctx))
+		return -EFAULT;
+	SHA1_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
+	return 0;
+}
+
+static int generate_partial_hash(struct ccp_session *sess)
+{
+
+	uint8_t ipad[sess->auth.block_size];
+	uint8_t	opad[sess->auth.block_size];
+	uint8_t *ipad_t, *opad_t;
+	uint32_t *hash_value_be32, hash_temp32[8];
+	int i, count;
+
+	opad_t = ipad_t = (uint8_t *)sess->auth.key;
+
+	hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
+
+	/* considering key size is always equal to block size of algorithm */
+	for (i = 0; i < sess->auth.block_size; i++) {
+		ipad[i] = (ipad_t[i] ^ HMAC_IPAD_VALUE);
+		opad[i] = (opad_t[i] ^ HMAC_OPAD_VALUE);
+	}
+
+	switch (sess->auth.algo) {
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+		count = SHA1_DIGEST_SIZE >> 2;
+
+		if (partial_hash_sha1(ipad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+
+		hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha1(opad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+		return 0;
+	default:
+		CCP_LOG_ERR("Invalid auth algo");
+		return -1;
+	}
+}
+
 /* prepare temporary keys K1 and K2 */
 static void prepare_key(unsigned char *k, unsigned char *l, int bl)
 {
@@ -260,6 +322,31 @@ ccp_configure_session_auth(struct ccp_session *sess,
 	else
 		sess->auth.op = CCP_AUTH_OP_VERIFY;
 	switch (auth_xform->algo) {
+	case RTE_CRYPTO_AUTH_SHA1:
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA1;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+		sess->auth.ctx = (void *)ccp_sha1_init;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+			return -1;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		sess->auth.block_size = SHA1_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_AES_CMAC:
 		sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
 		sess->auth.engine = CCP_ENGINE_AES;
@@ -453,6 +540,13 @@ ccp_auth_slot(struct ccp_session *session)
 	int count = 0;
 
 	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_SHA1:
+		count = 3;
+		/**< op + lsb passthrough cpy to/from*/
+		break;
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+		count = 6;
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		count = 4;
 		/**
@@ -578,6 +672,271 @@ ccp_perform_passthru(struct ccp_passthru *pst,
 }
 
 static int
+ccp_perform_hmac(struct rte_crypto_op *op,
+		 struct ccp_queue *cmd_q)
+{
+
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr, dest_addr_t;
+	struct ccp_passthru pst;
+	uint64_t auth_msg_bits;
+	void *append_ptr;
+	uint8_t *addr;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 ccp_cryptodev_driver_id);
+	addr = session->auth.pre_compute;
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+	dest_addr_t = dest_addr;
+
+	/** Load PHash1 to LSB*/
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.len = session->auth.ctx_len;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/**sha engine command descriptor for IntermediateHash*/
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+	auth_msg_bits = (op->sym->auth.data.length +
+			 session->auth.block_size)  * 8;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+	CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+	CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Intermediate Hash value retrieve */
+	if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA_TYPE_512)) {
+
+		pst.src_addr =
+			(phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+	} else {
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = session->auth.ctx_len;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+	}
+
+	/** Load PHash2 to LSB*/
+	addr += session->auth.ctx_len;
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.len = session->auth.ctx_len;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/**sha engine command descriptor for FinalHash*/
+	dest_addr_t += session->auth.offset;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = (session->auth.ctx_len -
+			     session->auth.offset);
+	auth_msg_bits = (session->auth.block_size +
+			 session->auth.ctx_len -
+			 session->auth.offset) * 8;
+
+	CCP_CMD_SRC_LO(desc) = (uint32_t)(dest_addr_t);
+	CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+	CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+	CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Retrieve hmac output */
+	pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.dest_addr = dest_addr;
+	pst.len = session->auth.ctx_len;
+	pst.dir = 0;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	else
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+
+}
+
+static int
+ccp_perform_sha(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr;
+	struct ccp_passthru pst;
+	void *append_ptr;
+	uint64_t auth_msg_bits;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+
+	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+
+	/** Passthru sha context*/
+
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
+						     session->auth.ctx);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.len = session->auth.ctx_len;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/**prepare sha command descriptor*/
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+	auth_msg_bits = op->sym->auth.data.length * 8;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+	CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+	CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Hash value retrieve */
+	pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.dest_addr = dest_addr;
+	pst.len = session->auth.ctx_len;
+	pst.dir = 0;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	else
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+
+}
+
+static int
 ccp_perform_aes_cmac(struct rte_crypto_op *op,
 		     struct ccp_queue *cmd_q)
 {
@@ -1148,6 +1507,14 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 					ccp_cryptodev_driver_id);
 
 	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_SHA1:
+		result = ccp_perform_sha(op, cmd_q);
+		b_info->desccnt += 3;
+		break;
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+		result = ccp_perform_hmac(op, cmd_q);
+		b_info->desccnt += 6;
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		result = ccp_perform_aes_cmac(op, cmd_q);
 		b_info->desccnt += 4;
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index d528ec9..42179de 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -60,9 +60,32 @@
 #define	CCP_DES_ENCRYPT(p)	((p)->des.encrypt)
 #define	CCP_DES_MODE(p)		((p)->des.mode)
 #define	CCP_DES_TYPE(p)		((p)->des.type)
+#define	CCP_SHA_TYPE(p)		((p)->sha.type)
 #define	CCP_PT_BYTESWAP(p)	((p)->pt.byteswap)
 #define	CCP_PT_BITWISE(p)	((p)->pt.bitwise)
 
+/* HMAC */
+#define HMAC_IPAD_VALUE 0x36
+#define HMAC_OPAD_VALUE 0x5c
+
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+#define MD5_DIGEST_SIZE         16
+#define MD5_BLOCK_SIZE          64
+#endif
+
+/* SHA */
+#define SHA_COMMON_DIGEST_SIZE	32
+#define SHA1_DIGEST_SIZE        20
+#define SHA1_BLOCK_SIZE         64
+
+/* SHA LSB intialiazation values */
+
+#define SHA1_H0		0x67452301UL
+#define SHA1_H1		0xefcdab89UL
+#define SHA1_H2		0x98badcfeUL
+#define SHA1_H3		0x10325476UL
+#define SHA1_H4		0xc3d2e1f0UL
+
 /**
  * CCP supported AES modes
  */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 2309e9f..19d0491 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -39,6 +39,48 @@
 #include "ccp_crypto.h"
 
 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+	{	/* SHA1 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA1,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 20,
+					 .max = 20,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 20,
+					 .max = 20,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/*AES-CMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v2 15/20] crypto/ccp: add sha2 family auth algo support
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
                     ` (12 preceding siblings ...)
  2018-01-05  9:39   ` [PATCH v2 14/20] crypto/ccp: add sha1 auth " Ravi Kumar
@ 2018-01-05  9:39   ` Ravi Kumar
  2018-01-05  9:39   ` [PATCH v2 16/20] crypto/ccp: add sha3 " Ravi Kumar
                     ` (7 subsequent siblings)
  21 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-05  9:39 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 270 +++++++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_crypto.h  |  48 +++++++
 drivers/crypto/ccp/ccp_pmd_ops.c | 168 ++++++++++++++++++++++++
 3 files changed, 486 insertions(+)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index a137405..52eb76c 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -61,6 +61,34 @@ static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
 	0x0U, 0x0U,
 };
 
+uint32_t ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+	SHA224_H7, SHA224_H6,
+	SHA224_H5, SHA224_H4,
+	SHA224_H3, SHA224_H2,
+	SHA224_H1, SHA224_H0,
+};
+
+uint32_t ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+	SHA256_H7, SHA256_H6,
+	SHA256_H5, SHA256_H4,
+	SHA256_H3, SHA256_H2,
+	SHA256_H1, SHA256_H0,
+};
+
+uint64_t ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+	SHA384_H7, SHA384_H6,
+	SHA384_H5, SHA384_H4,
+	SHA384_H3, SHA384_H2,
+	SHA384_H1, SHA384_H0,
+};
+
+uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+	SHA512_H7, SHA512_H6,
+	SHA512_H5, SHA512_H4,
+	SHA512_H3, SHA512_H2,
+	SHA512_H1, SHA512_H0,
+};
+
 static enum ccp_cmd_order
 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 {
@@ -97,6 +125,54 @@ static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
+static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA256_CTX ctx;
+
+	if (!SHA224_Init(&ctx))
+		return -EFAULT;
+	SHA256_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA256_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA256_CTX ctx;
+
+	if (!SHA256_Init(&ctx))
+		return -EFAULT;
+	SHA256_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA256_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA512_CTX ctx;
+
+	if (!SHA384_Init(&ctx))
+		return -EFAULT;
+	SHA512_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA512_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA512_CTX ctx;
+
+	if (!SHA512_Init(&ctx))
+		return -EFAULT;
+	SHA512_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA512_DIGEST_LENGTH);
+	return 0;
+}
+
 static int generate_partial_hash(struct ccp_session *sess)
 {
 
@@ -104,11 +180,13 @@ static int generate_partial_hash(struct ccp_session *sess)
 	uint8_t	opad[sess->auth.block_size];
 	uint8_t *ipad_t, *opad_t;
 	uint32_t *hash_value_be32, hash_temp32[8];
+	uint64_t *hash_value_be64, hash_temp64[8];
 	int i, count;
 
 	opad_t = ipad_t = (uint8_t *)sess->auth.key;
 
 	hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
+	hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute);
 
 	/* considering key size is always equal to block size of algorithm */
 	for (i = 0; i < sess->auth.block_size; i++) {
@@ -132,6 +210,66 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be32++)
 			*hash_value_be32 = hash_temp32[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+		count = SHA256_DIGEST_SIZE >> 2;
+
+		if (partial_hash_sha224(ipad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+
+		hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha224(opad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+		return 0;
+	case CCP_AUTH_ALGO_SHA256_HMAC:
+		count = SHA256_DIGEST_SIZE >> 2;
+
+		if (partial_hash_sha256(ipad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+
+		hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha256(opad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+		return 0;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+		count = SHA512_DIGEST_SIZE >> 3;
+
+		if (partial_hash_sha384(ipad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+
+		hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha384(opad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+		return 0;
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		count = SHA512_DIGEST_SIZE >> 3;
+
+		if (partial_hash_sha512(ipad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+
+		hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha512(opad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+		return 0;
 	default:
 		CCP_LOG_ERR("Invalid auth algo");
 		return -1;
@@ -347,6 +485,107 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
+	case RTE_CRYPTO_AUTH_SHA224:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA224;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+		sess->auth.ctx = (void *)ccp_sha224_init;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		sess->auth.block_size = SHA224_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA256;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+		sess->auth.ctx = (void *)ccp_sha256_init;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		sess->auth.block_size = SHA256_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA384;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+		sess->auth.ctx = (void *)ccp_sha384_init;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		sess->auth.block_size = SHA384_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA512;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+		sess->auth.ctx = (void *)ccp_sha512_init;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		sess->auth.block_size = SHA512_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+
 	case RTE_CRYPTO_AUTH_AES_CMAC:
 		sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
 		sess->auth.engine = CCP_ENGINE_AES;
@@ -541,12 +780,32 @@ ccp_auth_slot(struct ccp_session *session)
 
 	switch (session->auth.algo) {
 	case CCP_AUTH_ALGO_SHA1:
+	case CCP_AUTH_ALGO_SHA224:
+	case CCP_AUTH_ALGO_SHA256:
+	case CCP_AUTH_ALGO_SHA384:
+	case CCP_AUTH_ALGO_SHA512:
 		count = 3;
 		/**< op + lsb passthrough cpy to/from*/
 		break;
 	case CCP_AUTH_ALGO_SHA1_HMAC:
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+	case CCP_AUTH_ALGO_SHA256_HMAC:
 		count = 6;
 		break;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		count = 7;
+		/**
+		 * 1. Load PHash1 = H(k ^ ipad); to LSB
+		 * 2. generate IHash = H(hash on meassage with PHash1
+		 * as init values);
+		 * 3. Retrieve IHash 2 slots for 384/512
+		 * 4. Load Phash2 = H(k ^ opad); to LSB
+		 * 5. generate FHash = H(hash on Ihash with Phash2
+		 * as init value);
+		 * 6. Retrieve HMAC output from LSB to host memory
+		 */
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		count = 4;
 		/**
@@ -1508,13 +1767,24 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 
 	switch (session->auth.algo) {
 	case CCP_AUTH_ALGO_SHA1:
+	case CCP_AUTH_ALGO_SHA224:
+	case CCP_AUTH_ALGO_SHA256:
+	case CCP_AUTH_ALGO_SHA384:
+	case CCP_AUTH_ALGO_SHA512:
 		result = ccp_perform_sha(op, cmd_q);
 		b_info->desccnt += 3;
 		break;
 	case CCP_AUTH_ALGO_SHA1_HMAC:
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+	case CCP_AUTH_ALGO_SHA256_HMAC:
 		result = ccp_perform_hmac(op, cmd_q);
 		b_info->desccnt += 6;
 		break;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		result = ccp_perform_hmac(op, cmd_q);
+		b_info->desccnt += 7;
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		result = ccp_perform_aes_cmac(op, cmd_q);
 		b_info->desccnt += 4;
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 42179de..ca1c1a8 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -78,6 +78,18 @@
 #define SHA1_DIGEST_SIZE        20
 #define SHA1_BLOCK_SIZE         64
 
+#define SHA224_DIGEST_SIZE      28
+#define SHA224_BLOCK_SIZE       64
+
+#define SHA256_DIGEST_SIZE      32
+#define SHA256_BLOCK_SIZE       64
+
+#define SHA384_DIGEST_SIZE      48
+#define SHA384_BLOCK_SIZE       128
+
+#define SHA512_DIGEST_SIZE      64
+#define SHA512_BLOCK_SIZE       128
+
 /* SHA LSB intialiazation values */
 
 #define SHA1_H0		0x67452301UL
@@ -86,6 +98,42 @@
 #define SHA1_H3		0x10325476UL
 #define SHA1_H4		0xc3d2e1f0UL
 
+#define SHA224_H0	0xc1059ed8UL
+#define SHA224_H1	0x367cd507UL
+#define SHA224_H2	0x3070dd17UL
+#define SHA224_H3	0xf70e5939UL
+#define SHA224_H4	0xffc00b31UL
+#define SHA224_H5	0x68581511UL
+#define SHA224_H6	0x64f98fa7UL
+#define SHA224_H7	0xbefa4fa4UL
+
+#define SHA256_H0	0x6a09e667UL
+#define SHA256_H1	0xbb67ae85UL
+#define SHA256_H2	0x3c6ef372UL
+#define SHA256_H3	0xa54ff53aUL
+#define SHA256_H4	0x510e527fUL
+#define SHA256_H5	0x9b05688cUL
+#define SHA256_H6	0x1f83d9abUL
+#define SHA256_H7	0x5be0cd19UL
+
+#define SHA384_H0	0xcbbb9d5dc1059ed8ULL
+#define SHA384_H1	0x629a292a367cd507ULL
+#define SHA384_H2	0x9159015a3070dd17ULL
+#define SHA384_H3	0x152fecd8f70e5939ULL
+#define SHA384_H4	0x67332667ffc00b31ULL
+#define SHA384_H5	0x8eb44a8768581511ULL
+#define SHA384_H6	0xdb0c2e0d64f98fa7ULL
+#define SHA384_H7	0x47b5481dbefa4fa4ULL
+
+#define SHA512_H0	0x6a09e667f3bcc908ULL
+#define SHA512_H1	0xbb67ae8584caa73bULL
+#define SHA512_H2	0x3c6ef372fe94f82bULL
+#define SHA512_H3	0xa54ff53a5f1d36f1ULL
+#define SHA512_H4	0x510e527fade682d1ULL
+#define SHA512_H5	0x9b05688c2b3e6c1fULL
+#define SHA512_H6	0x1f83d9abfb41bd6bULL
+#define SHA512_H7	0x5be0cd19137e2179ULL
+
 /**
  * CCP supported AES modes
  */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 19d0491..57d202f 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -81,6 +81,174 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA224 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA224,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA224 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA256 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA256,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA384 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA384,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 1,
+					 .max = 128,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA512  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA512,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 1,
+					 .max = 128,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/*AES-CMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v2 16/20] crypto/ccp: add sha3 family auth algo support
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
                     ` (13 preceding siblings ...)
  2018-01-05  9:39   ` [PATCH v2 15/20] crypto/ccp: add sha2 family " Ravi Kumar
@ 2018-01-05  9:39   ` Ravi Kumar
  2018-01-05  9:39   ` [PATCH v2 17/20] crypto/ccp: add cpu based md5 and sha2 " Ravi Kumar
                     ` (6 subsequent siblings)
  21 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-05  9:39 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c       | 667 +++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_crypto.h       |  22 ++
 drivers/crypto/ccp/ccp_pmd_ops.c      | 168 +++++++++
 lib/librte_cryptodev/rte_crypto_sym.h |  17 +
 4 files changed, 873 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 52eb76c..e390fe0 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -89,6 +89,74 @@ uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
 	SHA512_H1, SHA512_H0,
 };
 
+#if defined(_MSC_VER)
+#define SHA3_CONST(x) x
+#else
+#define SHA3_CONST(x) x##L
+#endif
+
+/** 'Words' here refers to uint64_t */
+#define SHA3_KECCAK_SPONGE_WORDS \
+	(((1600) / 8) / sizeof(uint64_t))
+typedef struct sha3_context_ {
+	uint64_t saved;
+	/**
+	 * The portion of the input message that we
+	 * didn't consume yet
+	 */
+	union {
+		uint64_t s[SHA3_KECCAK_SPONGE_WORDS];
+		/* Keccak's state */
+		uint8_t sb[SHA3_KECCAK_SPONGE_WORDS * 8];
+		/**total 200 ctx size**/
+	};
+	unsigned int byteIndex;
+	/**
+	 * 0..7--the next byte after the set one
+	 * (starts from 0; 0--none are buffered)
+	 */
+	unsigned int wordIndex;
+	/**
+	 * 0..24--the next word to integrate input
+	 * (starts from 0)
+	 */
+	unsigned int capacityWords;
+	/**
+	 * the double size of the hash output in
+	 * words (e.g. 16 for Keccak 512)
+	 */
+} sha3_context;
+
+#ifndef SHA3_ROTL64
+#define SHA3_ROTL64(x, y) \
+	(((x) << (y)) | ((x) >> ((sizeof(uint64_t)*8) - (y))))
+#endif
+
+static const uint64_t keccakf_rndc[24] = {
+	SHA3_CONST(0x0000000000000001UL), SHA3_CONST(0x0000000000008082UL),
+	SHA3_CONST(0x800000000000808aUL), SHA3_CONST(0x8000000080008000UL),
+	SHA3_CONST(0x000000000000808bUL), SHA3_CONST(0x0000000080000001UL),
+	SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008009UL),
+	SHA3_CONST(0x000000000000008aUL), SHA3_CONST(0x0000000000000088UL),
+	SHA3_CONST(0x0000000080008009UL), SHA3_CONST(0x000000008000000aUL),
+	SHA3_CONST(0x000000008000808bUL), SHA3_CONST(0x800000000000008bUL),
+	SHA3_CONST(0x8000000000008089UL), SHA3_CONST(0x8000000000008003UL),
+	SHA3_CONST(0x8000000000008002UL), SHA3_CONST(0x8000000000000080UL),
+	SHA3_CONST(0x000000000000800aUL), SHA3_CONST(0x800000008000000aUL),
+	SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008080UL),
+	SHA3_CONST(0x0000000080000001UL), SHA3_CONST(0x8000000080008008UL)
+};
+
+static const unsigned int keccakf_rotc[24] = {
+	1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62,
+	18, 39, 61, 20, 44
+};
+
+static const unsigned int keccakf_piln[24] = {
+	10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20,
+	14, 22, 9, 6, 1
+};
+
 static enum ccp_cmd_order
 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 {
@@ -173,6 +241,223 @@ static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
+static void
+keccakf(uint64_t s[25])
+{
+	int i, j, round;
+	uint64_t t, bc[5];
+#define KECCAK_ROUNDS 24
+
+	for (round = 0; round < KECCAK_ROUNDS; round++) {
+
+		/* Theta */
+		for (i = 0; i < 5; i++)
+			bc[i] = s[i] ^ s[i + 5] ^ s[i + 10] ^ s[i + 15] ^
+				s[i + 20];
+
+		for (i = 0; i < 5; i++) {
+			t = bc[(i + 4) % 5] ^ SHA3_ROTL64(bc[(i + 1) % 5], 1);
+			for (j = 0; j < 25; j += 5)
+				s[j + i] ^= t;
+		}
+
+		/* Rho Pi */
+		t = s[1];
+		for (i = 0; i < 24; i++) {
+			j = keccakf_piln[i];
+			bc[0] = s[j];
+			s[j] = SHA3_ROTL64(t, keccakf_rotc[i]);
+			t = bc[0];
+		}
+
+		/* Chi */
+		for (j = 0; j < 25; j += 5) {
+			for (i = 0; i < 5; i++)
+				bc[i] = s[j + i];
+			for (i = 0; i < 5; i++)
+				s[j + i] ^= (~bc[(i + 1) % 5]) &
+					    bc[(i + 2) % 5];
+		}
+
+		/* Iota */
+		s[0] ^= keccakf_rndc[round];
+	}
+}
+
+static void
+sha3_Init224(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 224 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init256(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 256 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init384(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 384 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init512(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 512 / (8 * sizeof(uint64_t));
+}
+
+
+/* This is simply the 'update' with the padding block.
+ * The padding block is 0x01 || 0x00* || 0x80. First 0x01 and last 0x80
+ * bytes are always present, but they can be the same byte.
+ */
+static void
+sha3_Update(void *priv, void const *bufIn, size_t len)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+	unsigned int old_tail = (8 - ctx->byteIndex) & 7;
+	size_t words;
+	unsigned int tail;
+	size_t i;
+	const uint8_t *buf = bufIn;
+
+	if (len < old_tail) {
+		while (len--)
+			ctx->saved |= (uint64_t) (*(buf++)) <<
+				      ((ctx->byteIndex++) * 8);
+		return;
+	}
+
+	if (old_tail) {
+		len -= old_tail;
+		while (old_tail--)
+			ctx->saved |= (uint64_t) (*(buf++)) <<
+				      ((ctx->byteIndex++) * 8);
+
+		ctx->s[ctx->wordIndex] ^= ctx->saved;
+		ctx->byteIndex = 0;
+		ctx->saved = 0;
+		if (++ctx->wordIndex ==
+		   (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+			keccakf(ctx->s);
+			ctx->wordIndex = 0;
+		}
+	}
+
+	words = len / sizeof(uint64_t);
+	tail = len - words * sizeof(uint64_t);
+
+	for (i = 0; i < words; i++, buf += sizeof(uint64_t)) {
+		const uint64_t t = (uint64_t) (buf[0]) |
+			((uint64_t) (buf[1]) << 8 * 1) |
+			((uint64_t) (buf[2]) << 8 * 2) |
+			((uint64_t) (buf[3]) << 8 * 3) |
+			((uint64_t) (buf[4]) << 8 * 4) |
+			((uint64_t) (buf[5]) << 8 * 5) |
+			((uint64_t) (buf[6]) << 8 * 6) |
+			((uint64_t) (buf[7]) << 8 * 7);
+		ctx->s[ctx->wordIndex] ^= t;
+		if (++ctx->wordIndex ==
+		   (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+			keccakf(ctx->s);
+			ctx->wordIndex = 0;
+		}
+	}
+
+	while (tail--)
+		ctx->saved |= (uint64_t) (*(buf++)) << ((ctx->byteIndex++) * 8);
+}
+
+int partial_hash_sha3_224(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init224(ctx);
+	sha3_Update(ctx, data_in, SHA3_224_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
+int partial_hash_sha3_256(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init256(ctx);
+	sha3_Update(ctx, data_in, SHA3_256_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
+int partial_hash_sha3_384(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init384(ctx);
+	sha3_Update(ctx, data_in, SHA3_384_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
+int partial_hash_sha3_512(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init512(ctx);
+	sha3_Update(ctx, data_in, SHA3_512_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
 static int generate_partial_hash(struct ccp_session *sess)
 {
 
@@ -182,6 +467,7 @@ static int generate_partial_hash(struct ccp_session *sess)
 	uint32_t *hash_value_be32, hash_temp32[8];
 	uint64_t *hash_value_be64, hash_temp64[8];
 	int i, count;
+	uint8_t *hash_value_sha3;
 
 	opad_t = ipad_t = (uint8_t *)sess->auth.key;
 
@@ -225,6 +511,16 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be32++)
 			*hash_value_be32 = hash_temp32[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA3_224_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_224(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha3_224(opad, hash_value_sha3))
+			return -1;
+		return 0;
 	case CCP_AUTH_ALGO_SHA256_HMAC:
 		count = SHA256_DIGEST_SIZE >> 2;
 
@@ -240,6 +536,16 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be32++)
 			*hash_value_be32 = hash_temp32[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA3_256_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_256(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					      + sess->auth.ctx_len);
+		if (partial_hash_sha3_256(opad, hash_value_sha3))
+			return -1;
+		return 0;
 	case CCP_AUTH_ALGO_SHA384_HMAC:
 		count = SHA512_DIGEST_SIZE >> 3;
 
@@ -255,6 +561,16 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be64++)
 			*hash_value_be64 = hash_temp64[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA3_384_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_384(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					      + sess->auth.ctx_len);
+		if (partial_hash_sha3_384(opad, hash_value_sha3))
+			return -1;
+		return 0;
 	case CCP_AUTH_ALGO_SHA512_HMAC:
 		count = SHA512_DIGEST_SIZE >> 3;
 
@@ -270,6 +586,16 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be64++)
 			*hash_value_be64 = hash_temp64[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA3_512_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_512(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					      + sess->auth.ctx_len);
+		if (partial_hash_sha3_512(opad, hash_value_sha3))
+			return -1;
+		return 0;
 	default:
 		CCP_LOG_ERR("Invalid auth algo");
 		return -1;
@@ -510,6 +836,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
+	case RTE_CRYPTO_AUTH_SHA3_224:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_224;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_224_HMAC:
+		if (auth_xform->key.length > SHA3_224_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_224_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_224_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_SHA256:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA256;
 		sess->auth.engine = CCP_ENGINE_SHA;
@@ -535,6 +885,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
+	case RTE_CRYPTO_AUTH_SHA3_256:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_256;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_256_HMAC:
+		if (auth_xform->key.length > SHA3_256_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_256_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_256_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_SHA384:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA384;
 		sess->auth.engine = CCP_ENGINE_SHA;
@@ -560,6 +934,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
+	case RTE_CRYPTO_AUTH_SHA3_384:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_384;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_384_HMAC:
+		if (auth_xform->key.length > SHA3_384_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_384_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_384_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_SHA512:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA512;
 		sess->auth.engine = CCP_ENGINE_SHA;
@@ -585,7 +983,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
-
+	case RTE_CRYPTO_AUTH_SHA3_512:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_512;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_512_HMAC:
+		if (auth_xform->key.length > SHA3_512_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_512_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_512_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_AES_CMAC:
 		sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
 		sess->auth.engine = CCP_ENGINE_AES;
@@ -806,6 +1227,26 @@ ccp_auth_slot(struct ccp_session *session)
 		 * 6. Retrieve HMAC output from LSB to host memory
 		 */
 		break;
+	case CCP_AUTH_ALGO_SHA3_224:
+	case CCP_AUTH_ALGO_SHA3_256:
+	case CCP_AUTH_ALGO_SHA3_384:
+	case CCP_AUTH_ALGO_SHA3_512:
+		count = 1;
+		/**< only op ctx and dst in host memory*/
+		break;
+	case CCP_AUTH_ALGO_SHA3_224_HMAC:
+	case CCP_AUTH_ALGO_SHA3_256_HMAC:
+		count = 3;
+		break;
+	case CCP_AUTH_ALGO_SHA3_384_HMAC:
+	case CCP_AUTH_ALGO_SHA3_512_HMAC:
+		count = 4;
+		/**
+		 * 1. Op to Perform Ihash
+		 * 2. Retrieve result from LSB to host memory
+		 * 3. Perform final hash
+		 */
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		count = 4;
 		/**
@@ -1196,6 +1637,213 @@ ccp_perform_sha(struct rte_crypto_op *op,
 }
 
 static int
+ccp_perform_sha3_hmac(struct rte_crypto_op *op,
+		      struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	struct ccp_passthru pst;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint8_t *append_ptr;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr, ctx_paddr, dest_addr_t;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	if (!append_ptr) {
+		CCP_LOG_ERR("CCP MBUF append failed\n");
+		return -1;
+	}
+	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
+	ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void
+						   *)session->auth.pre_compute);
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	/*desc1 for SHA3-Ihash operation */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = (cmd_q->sb_sha * CCP_SB_BYTES);
+	CCP_CMD_DST_HI(desc) = 0;
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+	CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Intermediate Hash value retrieve */
+	if ((session->auth.ut.sha_type == CCP_SHA3_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA3_TYPE_512)) {
+
+		pst.src_addr =
+			(phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+	} else {
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+	}
+
+	/**sha engine command descriptor for FinalHash*/
+	ctx_paddr += CCP_SHA3_CTX_SIZE;
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	if (session->auth.ut.sha_type == CCP_SHA3_TYPE_224) {
+		dest_addr_t += (CCP_SB_BYTES - SHA224_DIGEST_SIZE);
+		CCP_CMD_LEN(desc) = SHA224_DIGEST_SIZE;
+	} else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_256) {
+		CCP_CMD_LEN(desc) = SHA256_DIGEST_SIZE;
+	} else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_384) {
+		dest_addr_t += (2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE);
+		CCP_CMD_LEN(desc) = SHA384_DIGEST_SIZE;
+	} else {
+		CCP_CMD_LEN(desc) = SHA512_DIGEST_SIZE;
+	}
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)dest_addr_t);
+	CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = (uint32_t)dest_addr;
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+	CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
+ccp_perform_sha3(struct rte_crypto_op *op,
+		 struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint8_t *ctx_addr, *append_ptr;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr, ctx_paddr;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	if (!append_ptr) {
+		CCP_LOG_ERR("CCP MBUF append failed\n");
+		return -1;
+	}
+	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	ctx_addr = session->auth.sha3_ctx;
+	ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	/* prepare desc for SHA3 operation */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+	CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
 ccp_perform_aes_cmac(struct rte_crypto_op *op,
 		     struct ccp_queue *cmd_q)
 {
@@ -1785,6 +2433,23 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 		result = ccp_perform_hmac(op, cmd_q);
 		b_info->desccnt += 7;
 		break;
+	case CCP_AUTH_ALGO_SHA3_224:
+	case CCP_AUTH_ALGO_SHA3_256:
+	case CCP_AUTH_ALGO_SHA3_384:
+	case CCP_AUTH_ALGO_SHA3_512:
+		result = ccp_perform_sha3(op, cmd_q);
+		b_info->desccnt += 1;
+		break;
+	case CCP_AUTH_ALGO_SHA3_224_HMAC:
+	case CCP_AUTH_ALGO_SHA3_256_HMAC:
+		result = ccp_perform_sha3_hmac(op, cmd_q);
+		b_info->desccnt += 3;
+		break;
+	case CCP_AUTH_ALGO_SHA3_384_HMAC:
+	case CCP_AUTH_ALGO_SHA3_512_HMAC:
+		result = ccp_perform_sha3_hmac(op, cmd_q);
+		b_info->desccnt += 4;
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		result = ccp_perform_aes_cmac(op, cmd_q);
 		b_info->desccnt += 4;
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index ca1c1a8..8459b71 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -80,15 +80,19 @@
 
 #define SHA224_DIGEST_SIZE      28
 #define SHA224_BLOCK_SIZE       64
+#define SHA3_224_BLOCK_SIZE     144
 
 #define SHA256_DIGEST_SIZE      32
 #define SHA256_BLOCK_SIZE       64
+#define SHA3_256_BLOCK_SIZE     136
 
 #define SHA384_DIGEST_SIZE      48
 #define SHA384_BLOCK_SIZE       128
+#define SHA3_384_BLOCK_SIZE	104
 
 #define SHA512_DIGEST_SIZE      64
 #define SHA512_BLOCK_SIZE       128
+#define SHA3_512_BLOCK_SIZE     72
 
 /* SHA LSB intialiazation values */
 
@@ -386,4 +390,22 @@ int process_ops_to_dequeue(struct ccp_qp *qp,
 			   struct rte_crypto_op **op,
 			   uint16_t nb_ops);
 
+
+/**
+ * Apis for SHA3 partial hash generation
+ * @param data_in buffer pointer on which phash is applied
+ * @param data_out phash result in ccp be format is written
+ */
+int partial_hash_sha3_224(uint8_t *data_in,
+			  uint8_t *data_out);
+
+int partial_hash_sha3_256(uint8_t *data_in,
+			  uint8_t *data_out);
+
+int partial_hash_sha3_384(uint8_t *data_in,
+			  uint8_t *data_out);
+
+int partial_hash_sha3_512(uint8_t *data_in,
+			  uint8_t *data_out);
+
 #endif /* _CCP_CRYPTO_H_ */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 57d202f..51ad304 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -123,6 +123,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA3-224 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_224,
+				 .block_size = 144,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-224  HMAC*/
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+				 .block_size = 144,
+				 .key_size = {
+					 .min = 1,
+					 .max = 144,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/* SHA256 */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -165,6 +207,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA3-256 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_256,
+				 .block_size = 136,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-256-HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+				 .block_size = 136,
+				 .key_size = {
+					 .min = 1,
+					 .max = 136,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/* SHA384 */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -207,6 +291,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA3-384 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_384,
+				 .block_size = 104,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-384-HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+				 .block_size = 104,
+				 .key_size = {
+					 .min = 1,
+					 .max = 104,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/* SHA512  */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -249,6 +375,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA3-512  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_512,
+				 .block_size = 72,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-512-HMAC  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+				 .block_size = 72,
+				 .key_size = {
+					 .min = 1,
+					 .max = 72,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			}, }
+		}, }
+	},
 	{	/*AES-CMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index c981f0b..4c3e60a 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -273,6 +273,23 @@ enum rte_crypto_auth_algorithm {
 	RTE_CRYPTO_AUTH_ZUC_EIA3,
 	/**< ZUC algorithm in EIA3 mode */
 
+	RTE_CRYPTO_AUTH_SHA3_224,
+	/**< 224 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+	/**< HMAC using 224 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_256,
+	/**< 256 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+	/**< HMAC using 256 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_384,
+	/**< 384 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+	/**< HMAC using 384 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_512,
+	/**< 512 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+	/**< HMAC using 512 bit SHA3 algorithm. */
+
 	RTE_CRYPTO_AUTH_LIST_END
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v2 17/20] crypto/ccp: add cpu based md5 and sha2 family auth algo support
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
                     ` (14 preceding siblings ...)
  2018-01-05  9:39   ` [PATCH v2 16/20] crypto/ccp: add sha3 " Ravi Kumar
@ 2018-01-05  9:39   ` Ravi Kumar
  2018-01-08 17:36     ` De Lara Guarch, Pablo
  2018-01-05  9:39   ` [PATCH v2 18/20] doc: add document for AMD CCP crypto poll mode driver Ravi Kumar
                     ` (5 subsequent siblings)
  21 siblings, 1 reply; 131+ messages in thread
From: Ravi Kumar @ 2018-01-05  9:39 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Auth operations can be performed on CPU without offloading
to CCP if CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH is enabled in
DPDK configuration.

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 config/common_base                   |   1 +
 drivers/crypto/ccp/ccp_crypto.c      | 278 ++++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_crypto.h      |   5 +-
 drivers/crypto/ccp/ccp_pmd_ops.c     |  23 +++
 drivers/crypto/ccp/ccp_pmd_private.h |  10 ++
 5 files changed, 312 insertions(+), 5 deletions(-)

diff --git a/config/common_base b/config/common_base
index 88826c8..2974581 100644
--- a/config/common_base
+++ b/config/common_base
@@ -560,6 +560,7 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
 # Compile PMD for AMD CCP crypto device
 #
 CONFIG_RTE_LIBRTE_PMD_CCP=n
+CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=n
 
 #
 # Compile PMD for Marvell Crypto device
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index e390fe0..407094d 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -53,6 +53,12 @@
 #include "ccp_pci.h"
 #include "ccp_pmd_private.h"
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+#include <openssl/conf.h>
+#include <openssl/err.h>
+#include <openssl/hmac.h>
+#endif
+
 /* SHA initial context values */
 static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
 	SHA1_H4, SHA1_H3,
@@ -786,6 +792,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 	else
 		sess->auth.op = CCP_AUTH_OP_VERIFY;
 	switch (auth_xform->algo) {
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		sess->auth.algo = CCP_AUTH_ALGO_MD5_HMAC;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - MD5_DIGEST_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		sess->auth.block_size = MD5_BLOCK_SIZE;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		break;
+#endif
 	case RTE_CRYPTO_AUTH_SHA1:
 		sess->auth.engine = CCP_ENGINE_SHA;
 		sess->auth.algo = CCP_AUTH_ALGO_SHA1;
@@ -795,6 +812,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
 		break;
 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		sess->auth.block_size = SHA1_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+#else
 		if (auth_xform->key.length > SHA1_BLOCK_SIZE)
 			return -1;
 		sess->auth.engine = CCP_ENGINE_SHA;
@@ -810,6 +838,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   auth_xform->key.length);
 		if (generate_partial_hash(sess))
 			return -1;
+#endif
 		break;
 	case RTE_CRYPTO_AUTH_SHA224:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA224;
@@ -820,6 +849,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
 		break;
 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		sess->auth.block_size = SHA224_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+#else
 		if (auth_xform->key.length > SHA224_BLOCK_SIZE)
 			return -1;
 		sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
@@ -835,6 +875,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   auth_xform->key.length);
 		if (generate_partial_hash(sess))
 			return -1;
+#endif
 		break;
 	case RTE_CRYPTO_AUTH_SHA3_224:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_224;
@@ -869,6 +910,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
 		break;
 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		sess->auth.block_size = SHA256_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+#else
 		if (auth_xform->key.length > SHA256_BLOCK_SIZE)
 			return -1;
 		sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
@@ -884,6 +936,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   auth_xform->key.length);
 		if (generate_partial_hash(sess))
 			return -1;
+#endif
 		break;
 	case RTE_CRYPTO_AUTH_SHA3_256:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_256;
@@ -918,6 +971,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
 		break;
 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		sess->auth.block_size = SHA384_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+#else
 		if (auth_xform->key.length > SHA384_BLOCK_SIZE)
 			return -1;
 		sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
@@ -933,6 +997,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   auth_xform->key.length);
 		if (generate_partial_hash(sess))
 			return -1;
+#endif
 		break;
 	case RTE_CRYPTO_AUTH_SHA3_384:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_384;
@@ -967,6 +1032,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
 		break;
 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		sess->auth.block_size = SHA512_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+#else
 		if (auth_xform->key.length > SHA512_BLOCK_SIZE)
 			return -1;
 		sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
@@ -982,6 +1058,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   auth_xform->key.length);
 		if (generate_partial_hash(sess))
 			return -1;
+#endif
 		break;
 	case RTE_CRYPTO_AUTH_SHA3_512:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_512;
@@ -1012,7 +1089,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.engine = CCP_ENGINE_AES;
 		sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
 		sess->auth.key_length = auth_xform->key.length;
-		/**<padding and hash result*/
+		/* padding and hash result */
 		sess->auth.ctx_len = CCP_SB_BYTES << 1;
 		sess->auth.offset = AES_BLOCK_SIZE;
 		sess->auth.block_size = AES_BLOCK_SIZE;
@@ -1208,14 +1285,22 @@ ccp_auth_slot(struct ccp_session *session)
 		count = 3;
 		/**< op + lsb passthrough cpy to/from*/
 		break;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	case CCP_AUTH_ALGO_MD5_HMAC:
+		break;
+#endif
 	case CCP_AUTH_ALGO_SHA1_HMAC:
 	case CCP_AUTH_ALGO_SHA224_HMAC:
 	case CCP_AUTH_ALGO_SHA256_HMAC:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
 		count = 6;
+#endif
 		break;
 	case CCP_AUTH_ALGO_SHA384_HMAC:
 	case CCP_AUTH_ALGO_SHA512_HMAC:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
 		count = 7;
+#endif
 		/**
 		 * 1. Load PHash1 = H(k ^ ipad); to LSB
 		 * 2. generate IHash = H(hash on meassage with PHash1
@@ -1322,6 +1407,122 @@ ccp_compute_slot_count(struct ccp_session *session)
 	return count;
 }
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+static uint8_t
+algo_select(int sessalgo,
+	    const EVP_MD **algo)
+{
+	int res = 0;
+
+	switch (sessalgo) {
+	case CCP_AUTH_ALGO_MD5_HMAC:
+		*algo = EVP_md5();
+		break;
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+		*algo = EVP_sha1();
+		break;
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+		*algo = EVP_sha224();
+		break;
+	case CCP_AUTH_ALGO_SHA256_HMAC:
+		*algo = EVP_sha256();
+		break;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+		*algo = EVP_sha384();
+		break;
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		*algo = EVP_sha512();
+		break;
+	default:
+		res = -EINVAL;
+		break;
+	}
+	return res;
+}
+
+static int
+process_cpu_auth_hmac(uint8_t *src, uint8_t *dst,
+		      __rte_unused uint8_t *iv,
+		      EVP_PKEY *pkey,
+		      int srclen,
+		      EVP_MD_CTX *ctx,
+		      const EVP_MD *algo,
+		      uint16_t d_len)
+{
+	size_t dstlen;
+	unsigned char temp_dst[64];
+
+	if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
+		goto process_auth_err;
+
+	if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
+		goto process_auth_err;
+
+	if (EVP_DigestSignFinal(ctx, temp_dst, &dstlen) <= 0)
+		goto process_auth_err;
+
+	memcpy(dst, temp_dst, d_len);
+	return 0;
+process_auth_err:
+	CCP_LOG_ERR("Process cpu auth failed");
+	return -EINVAL;
+}
+
+static int cpu_crypto_auth(struct ccp_qp *qp,
+			   struct rte_crypto_op *op,
+			   struct ccp_session *sess,
+			   EVP_MD_CTX *ctx)
+{
+	uint8_t *src, *dst;
+	int srclen, status;
+	struct rte_mbuf *mbuf_src, *mbuf_dst;
+	const EVP_MD *algo = NULL;
+	EVP_PKEY *pkey;
+
+	algo_select(sess->auth.algo, &algo);
+	pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL, sess->auth.key,
+				    sess->auth.key_length);
+	mbuf_src = op->sym->m_src;
+	mbuf_dst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+	srclen = op->sym->auth.data.length;
+	src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+				      op->sym->auth.data.offset);
+
+	if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+		dst = qp->temp_digest;
+	} else {
+		dst = op->sym->auth.digest.data;
+		if (dst == NULL) {
+			dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+						     op->sym->auth.data.offset +
+						     sess->auth.digest_length);
+		}
+	}
+	status = process_cpu_auth_hmac(src, dst, NULL,
+				       pkey, srclen,
+				       ctx,
+				       algo,
+				       sess->auth.digest_length);
+	if (status) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return status;
+	}
+
+	if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+		if (memcmp(dst, op->sym->auth.digest.data,
+			   sess->auth.digest_length) != 0) {
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+		} else {
+			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+		}
+	} else {
+		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	}
+	EVP_PKEY_free(pkey);
+	return 0;
+}
+#endif
+
 static void
 ccp_perform_passthru(struct ccp_passthru *pst,
 		     struct ccp_queue *cmd_q)
@@ -2422,14 +2623,24 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 		result = ccp_perform_sha(op, cmd_q);
 		b_info->desccnt += 3;
 		break;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	case CCP_AUTH_ALGO_MD5_HMAC:
+		break;
+#endif
 	case CCP_AUTH_ALGO_SHA1_HMAC:
 	case CCP_AUTH_ALGO_SHA224_HMAC:
 	case CCP_AUTH_ALGO_SHA256_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		break;
+#endif
 		result = ccp_perform_hmac(op, cmd_q);
 		b_info->desccnt += 6;
 		break;
 	case CCP_AUTH_ALGO_SHA384_HMAC:
 	case CCP_AUTH_ALGO_SHA512_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		break;
+#endif
 		result = ccp_perform_hmac(op, cmd_q);
 		b_info->desccnt += 7;
 		break;
@@ -2493,7 +2704,7 @@ ccp_crypto_aead(struct rte_crypto_op *op,
 }
 
 int
-process_ops_to_enqueue(const struct ccp_qp *qp,
+process_ops_to_enqueue(struct ccp_qp *qp,
 		       struct rte_crypto_op **op,
 		       struct ccp_queue *cmd_q,
 		       uint16_t nb_ops,
@@ -2502,11 +2713,22 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
 	int i, result = 0;
 	struct ccp_batch_info *b_info;
 	struct ccp_session *session;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX *auth_ctx = NULL;
+#endif
 
 	if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
 		CCP_LOG_ERR("batch info allocation failed");
 		return 0;
 	}
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	auth_ctx = EVP_MD_CTX_create();
+	if (unlikely(!auth_ctx)) {
+		CCP_LOG_ERR("Unable to create auth ctx");
+		return 0;
+	}
+	b_info->auth_ctr = 0;
+#endif
 	/* populate batch info necessary for dequeue */
 	b_info->op_idx = 0;
 	b_info->lsb_buf_idx = 0;
@@ -2528,6 +2750,11 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
 			break;
 		case CCP_CMD_AUTH:
 			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			b_info->auth_ctr++;
+			result = cpu_crypto_auth(qp, op[i],
+						 session, auth_ctx);
+#endif
 			break;
 		case CCP_CMD_CIPHER_HASH:
 			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
@@ -2537,6 +2764,12 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
 			break;
 		case CCP_CMD_HASH_CIPHER:
 			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			result = cpu_crypto_auth(qp, op[i],
+						 session, auth_ctx);
+			if (op[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
+				continue;
+#endif
 			if (result)
 				break;
 			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
@@ -2570,6 +2803,9 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
 
 	rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX_destroy(auth_ctx);
+#endif
 	return i;
 }
 
@@ -2638,13 +2874,23 @@ static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
 }
 
 static int
-ccp_prepare_ops(struct rte_crypto_op **op_d,
+ccp_prepare_ops(struct ccp_qp *qp,
+		struct rte_crypto_op **op_d,
 		struct ccp_batch_info *b_info,
 		uint16_t nb_ops)
 {
 	int i, min_ops;
 	struct ccp_session *session;
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX *auth_ctx = NULL;
+
+	auth_ctx = EVP_MD_CTX_create();
+	if (unlikely(!auth_ctx)) {
+		CCP_LOG_ERR("Unable to create auth ctx");
+		return 0;
+	}
+#endif
 	min_ops = RTE_MIN(nb_ops, b_info->opcnt);
 
 	for (i = 0; i < min_ops; i++) {
@@ -2657,8 +2903,25 @@ ccp_prepare_ops(struct rte_crypto_op **op_d,
 			op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 			break;
 		case CCP_CMD_AUTH:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			ccp_auth_dq_prepare(op_d[i]);
+#endif
+			break;
 		case CCP_CMD_CIPHER_HASH:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			cpu_crypto_auth(qp, op_d[i],
+					session, auth_ctx);
+#else
+			ccp_auth_dq_prepare(op_d[i]);
+#endif
+			break;
 		case CCP_CMD_HASH_CIPHER:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#else
+			ccp_auth_dq_prepare(op_d[i]);
+#endif
+			break;
 		case CCP_CMD_COMBINED:
 			ccp_auth_dq_prepare(op_d[i]);
 			break;
@@ -2667,6 +2930,9 @@ ccp_prepare_ops(struct rte_crypto_op **op_d,
 		}
 	}
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX_destroy(auth_ctx);
+#endif
 	b_info->opcnt -= min_ops;
 	return min_ops;
 }
@@ -2686,6 +2952,10 @@ process_ops_to_dequeue(struct ccp_qp *qp,
 	} else if (rte_ring_dequeue(qp->processed_pkts,
 				    (void **)&b_info))
 		return 0;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	if (b_info->auth_ctr == b_info->opcnt)
+		goto success;
+#endif
 	cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
 				       CMD_Q_HEAD_LO_BASE);
 
@@ -2705,7 +2975,7 @@ process_ops_to_dequeue(struct ccp_qp *qp,
 
 
 success:
-	nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
+	nb_ops = ccp_prepare_ops(qp, op, b_info, nb_ops);
 	rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
 	b_info->desccnt = 0;
 	if (b_info->opcnt > 0) {
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 8459b71..f526329 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -94,6 +94,9 @@
 #define SHA512_BLOCK_SIZE       128
 #define SHA3_512_BLOCK_SIZE     72
 
+/* Maximum length for digest */
+#define DIGEST_LENGTH_MAX	64
+
 /* SHA LSB intialiazation values */
 
 #define SHA1_H0		0x67452301UL
@@ -372,7 +375,7 @@ int ccp_compute_slot_count(struct ccp_session *session);
  * @param nb_ops No. of ops to be submitted
  * @return 0 on success otherwise -1
  */
-int process_ops_to_enqueue(const struct ccp_qp *qp,
+int process_ops_to_enqueue(struct ccp_qp *qp,
 			   struct rte_crypto_op **op,
 			   struct ccp_queue *cmd_q,
 			   uint16_t nb_ops,
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 51ad304..84eb1c0 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -39,6 +39,29 @@
 #include "ccp_crypto.h"
 
 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 16,
+					 .max = 16,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			}, }
+		}, }
+	},
+#endif
 	{	/* SHA1 */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
index d2283e8..9136f61 100644
--- a/drivers/crypto/ccp/ccp_pmd_private.h
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -32,6 +32,7 @@
 #define _CCP_PMD_PRIVATE_H_
 
 #include <rte_cryptodev.h>
+#include "ccp_crypto.h"
 
 #define CRYPTODEV_NAME_CCP_PMD crypto_ccp
 
@@ -87,6 +88,10 @@ struct ccp_batch_info {
 	phys_addr_t lsb_buf_phys;
 	/**< LSB intermediate buf for passthru */
 	int lsb_buf_idx;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	uint16_t auth_ctr;
+	/**< auth only ops batch */
+#endif
 } __rte_cache_aligned;
 
 /**< CCP crypto queue pair */
@@ -107,6 +112,11 @@ struct ccp_qp {
 	/**< Store ops pulled out of queue */
 	struct rte_cryptodev *dev;
 	/**< rte crypto device to which this qp belongs */
+	uint8_t temp_digest[DIGEST_LENGTH_MAX];
+	/**< Buffer used to store the digest generated
+	 * by the driver when verifying a digest provided
+	 * by the user (using authentication verify operation)
+	 */
 } __rte_cache_aligned;
 
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v2 18/20] doc: add document for AMD CCP crypto poll mode driver
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
                     ` (15 preceding siblings ...)
  2018-01-05  9:39   ` [PATCH v2 17/20] crypto/ccp: add cpu based md5 and sha2 " Ravi Kumar
@ 2018-01-05  9:39   ` Ravi Kumar
  2018-01-05  9:39   ` [PATCH v2 19/20] test/crypto: add test " Ravi Kumar
                     ` (4 subsequent siblings)
  21 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-05  9:39 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset=n, Size: 7869 bytes --]

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 doc/guides/cryptodevs/ccp.rst              | 127 +++++++++++++++++++++++++++++
 doc/guides/cryptodevs/features/ccp.ini     |  57 +++++++++++++
 doc/guides/cryptodevs/features/default.ini |  12 +++
 doc/guides/cryptodevs/index.rst            |   1 +
 4 files changed, 197 insertions(+)
 create mode 100644 doc/guides/cryptodevs/ccp.rst
 create mode 100644 doc/guides/cryptodevs/features/ccp.ini

diff --git a/doc/guides/cryptodevs/ccp.rst b/doc/guides/cryptodevs/ccp.rst
new file mode 100644
index 0000000..59b9281
--- /dev/null
+++ b/doc/guides/cryptodevs/ccp.rst
@@ -0,0 +1,127 @@
+.. Copyright(c) 2017 Advanced Micro Devices, Inc.
+   All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions
+   are met:
+
+   * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+   * Neither the name of the copyright holder nor the names of its
+   contributors may be used to endorse or promote products derived from
+   this software without specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+AMD CCP Poll Mode Driver
+========================
+
+This code provides the initial implementation of the ccp poll mode driver.
+The CCP poll mode driver library (librte_pmd_ccp) implements support for
+AMD’s cryptographic co-processor (CCP). The CCP PMD is a virtual crypto
+poll mode driver which schedules crypto operations to one or more available
+CCP hardware engines on the platform. The CCP PMD provides poll mode crypto
+driver support for the following hardware accelerator devices::
+
+	AMD Cryptographic Co-processor (0x1456)
+	AMD Cryptographic Co-processor (0x1468)
+
+Features
+--------
+
+CCP crypto PMD has support for:
+
+Cipher algorithms:
+
+* ``RTE_CRYPTO_CIPHER_AES_CBC``
+* ``RTE_CRYPTO_CIPHER_AES_ECB``
+* ``RTE_CRYPTO_CIPHER_AES_CTR``
+* ``RTE_CRYPTO_CIPHER_3DES_CBC``
+
+Hash algorithms:
+
+* ``RTE_CRYPTO_AUTH_SHA1``
+* ``RTE_CRYPTO_AUTH_SHA1_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA224``
+* ``RTE_CRYPTO_AUTH_SHA224_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA256``
+* ``RTE_CRYPTO_AUTH_SHA256_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA384``
+* ``RTE_CRYPTO_AUTH_SHA384_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA512``
+* ``RTE_CRYPTO_AUTH_SHA512_HMAC``
+* ``RTE_CRYPTO_AUTH_MD5_HMAC``
+* ``RTE_CRYPTO_AUTH_AES_CMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_224``
+* ``RTE_CRYPTO_AUTH_SHA3_224_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_256``
+* ``RTE_CRYPTO_AUTH_SHA3_256_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_384``
+* ``RTE_CRYPTO_AUTH_SHA3_384_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_512``
+* ``RTE_CRYPTO_AUTH_SHA3_512_HMAC``
+
+AEAD algorithms:
+
+* ``RTE_CRYPTO_AEAD_AES_GCM``
+
+Installation
+------------
+
+To compile CCP PMD, it has to be enabled in the config/common_base file.
+* ``CONFIG_RTE_LIBRTE_PMD_CCP=y``
+
+The CCP PMD also supports computing authentication over CPU with cipher offloaded
+to CCP. To enable this feature, enable following in the configuration.
+* ``CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=y``
+
+This code was verified on Ubuntu 16.04.
+
+Initialization
+--------------
+
+Bind the CCP devices to DPDK UIO driver module before running the CCP PMD stack.
+e.g. for the 0x1456 device::
+
+	cd to the top-level DPDK directory
+	modprobe uio
+	insmod ./build/kmod/igb_uio.ko
+	echo "1022 1456" > /sys/bus/pci/drivers/igb_uio/new_id
+
+Another way to bind the CCP devices to DPDK UIO driver is by using the ``dpdk-devbind.py`` script.
+The following command assumes ``BFD`` of ``0000:09:00.2``::
+
+	cd to the top-level DPDK directory
+	./usertools/dpdk-devbind.py -b igb_uio 0000:09:00.2
+
+To verify real traffic l2fwd-crypto example can be used with following command:
+
+.. code-block:: console
+
+	sudo ./build/l2fwd-crypto -l 1 -n 4 --vdev "crypto_ccp" -- -p 0x1
+	--chain CIPHER_HASH --cipher_op ENCRYPT --cipher_algo AES_CBC
+	--cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f
+	--iv 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:ff
+	--auth_op GENERATE --auth_algo SHA1_HMAC
+	--auth_key 11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+	:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+	:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+
+Limitations
+-----------
+
+* Chained mbufs are not supported
+* MD5_HMAC is supported only if ``CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=y`` is enabled in configuration
diff --git a/doc/guides/cryptodevs/features/ccp.ini b/doc/guides/cryptodevs/features/ccp.ini
new file mode 100644
index 0000000..add4bd8
--- /dev/null
+++ b/doc/guides/cryptodevs/features/ccp.ini
@@ -0,0 +1,57 @@
+;
+; Supported features of the 'ccp' crypto poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Symmetric crypto       = Y
+Sym operation chaining = Y
+HW Accelerated         = Y
+
+;
+; Supported crypto algorithms of the 'ccp' crypto driver.
+;
+[Cipher]
+AES CBC (128)  = Y
+AES CBC (192)  = Y
+AES CBC (256)  = Y
+AES ECB (128)  = Y
+AES ECB (192)  = Y
+AES ECB (256)  = Y
+AES CTR (128)  = Y
+AES CTR (192)  = Y
+AES CTR (256)  = Y
+3DES CBC       = Y
+
+;
+; Supported authentication algorithms of the 'ccp' crypto driver.
+;
+[Auth]
+MD5 HMAC       = Y
+SHA1           = Y
+SHA1 HMAC      = Y
+SHA224	       = Y
+SHA224 HMAC    = Y
+SHA256         = Y
+SHA256 HMAC    = Y
+SHA384         = Y
+SHA384 HMAC    = Y
+SHA512         = Y
+SHA512 HMAC    = Y
+AES CMAC       = Y
+SHA3_224       = Y
+SHA3_224 HMAC  = Y
+SHA3_256       = Y
+SHA3_256 HMAC  = Y
+SHA3_384       = Y
+SHA3_384 HMAC  = Y
+SHA3_512       = Y
+SHA3_512 HMAC  = Y
+
+;
+; Supported AEAD algorithms of the 'ccp' crypto driver.
+;
+[AEAD]
+AES GCM (128) = Y
+AES GCM (192) = Y
+AES GCM (256) = Y
diff --git a/doc/guides/cryptodevs/features/default.ini b/doc/guides/cryptodevs/features/default.ini
index 18d66cb..54c90c1 100644
--- a/doc/guides/cryptodevs/features/default.ini
+++ b/doc/guides/cryptodevs/features/default.ini
@@ -27,6 +27,9 @@ NULL           =
 AES CBC (128)  =
 AES CBC (192)  =
 AES CBC (256)  =
+AES ECB (128)  =
+AES ECB (192)  =
+AES ECB (256)  =
 AES CTR (128)  =
 AES CTR (192)  =
 AES CTR (256)  =
@@ -61,6 +64,15 @@ AES GMAC     =
 SNOW3G UIA2  =
 KASUMI F9    =
 ZUC EIA3     =
+AES CMAC     =
+SHA3_224       =
+SHA3_224 HMAC  =
+SHA3_256       =
+SHA3_256 HMAC  =
+SHA3_384       =
+SHA3_384 HMAC  =
+SHA3_512       =
+SHA3_512 HMAC  =
 
 ;
 ; Supported AEAD algorithms of a default crypto driver.
diff --git a/doc/guides/cryptodevs/index.rst b/doc/guides/cryptodevs/index.rst
index 6d4e15b..3202b48 100644
--- a/doc/guides/cryptodevs/index.rst
+++ b/doc/guides/cryptodevs/index.rst
@@ -39,6 +39,7 @@ Crypto Device Drivers
     aesni_mb
     aesni_gcm
     armv8
+    ccp
     dpaa2_sec
     dpaa_sec
     kasumi
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v2 19/20] test/crypto: add test for AMD CCP crypto poll mode driver
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
                     ` (16 preceding siblings ...)
  2018-01-05  9:39   ` [PATCH v2 18/20] doc: add document for AMD CCP crypto poll mode driver Ravi Kumar
@ 2018-01-05  9:39   ` Ravi Kumar
  2018-01-05  9:39   ` [PATCH v2 20/20] doc: add ccp crypto poll mode driver to release notes Ravi Kumar
                     ` (3 subsequent siblings)
  21 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-05  9:39 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 test/test/test_cryptodev.c                   | 161 +++++++++++++++++++++++++++
 test/test/test_cryptodev.h                   |   1 +
 test/test/test_cryptodev_aes_test_vectors.h  |  93 ++++++++++------
 test/test/test_cryptodev_blockcipher.c       |   9 +-
 test/test/test_cryptodev_blockcipher.h       |   1 +
 test/test/test_cryptodev_des_test_vectors.h  |  42 ++++---
 test/test/test_cryptodev_hash_test_vectors.h |  60 ++++++----
 7 files changed, 301 insertions(+), 66 deletions(-)

diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index 1bed65d..d99be69 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -364,6 +364,23 @@ testsuite_setup(void)
 		}
 	}
 
+	/* Create an CCP device if required */
+	if (gbl_driver_id == rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD))) {
+		nb_devs = rte_cryptodev_device_count_by_driver(
+				rte_cryptodev_driver_id_get(
+				RTE_STR(CRYPTODEV_NAME_CCP_PMD)));
+		if (nb_devs < 1) {
+			ret = rte_vdev_init(
+				RTE_STR(CRYPTODEV_NAME_CCP_PMD),
+				NULL);
+
+			TEST_ASSERT(ret == 0, "Failed to create "
+				"instance of pmd : %s",
+				RTE_STR(CRYPTODEV_NAME_CCP_PMD));
+		}
+	}
+
 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
 	if (gbl_driver_id == rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD))) {
@@ -1753,6 +1770,44 @@ test_AES_cipheronly_openssl_all(void)
 }
 
 static int
+test_AES_chain_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_AES_CHAIN_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_AES_cipheronly_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_AES_CIPHERONLY_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
 test_AES_chain_qat_all(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -1924,6 +1979,25 @@ test_authonly_openssl_all(void)
 }
 
 static int
+test_authonly_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_AUTHONLY_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
 test_AES_chain_armv8_all(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -4999,6 +5073,44 @@ test_3DES_cipheronly_dpaa2_sec_all(void)
 }
 
 static int
+test_3DES_chain_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_3DES_CHAIN_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_3DES_cipheronly_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_3DES_CIPHERONLY_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
 test_3DES_cipheronly_qat_all(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -9580,6 +9692,38 @@ static struct unit_test_suite cryptodev_mrvl_testsuite  = {
 	}
 };
 
+static struct unit_test_suite cryptodev_ccp_testsuite  = {
+	.suite_name = "Crypto Device CCP Unit Test Suite",
+	.setup = testsuite_setup,
+	.teardown = testsuite_teardown,
+	.unit_test_cases = {
+		TEST_CASE_ST(ut_setup, ut_teardown, test_multi_session),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_multi_session_random_usage),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_AES_chain_ccp_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_AES_cipheronly_ccp_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_3DES_chain_ccp_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_3DES_cipheronly_ccp_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_authonly_ccp_all),
+
+		/** Negative tests */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			authentication_verify_HMAC_SHA1_fail_data_corrupt),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			authentication_verify_HMAC_SHA1_fail_tag_corrupt),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			auth_decryption_AES128CBC_HMAC_SHA1_fail_data_corrupt),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			auth_decryption_AES128CBC_HMAC_SHA1_fail_tag_corrupt),
+
+		TEST_CASES_END() /**< NULL terminate unit test array */
+	}
+};
 
 static int
 test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
@@ -9801,6 +9945,22 @@ test_cryptodev_dpaa_sec(void /*argv __rte_unused, int argc __rte_unused*/)
 	return unit_test_suite_runner(&cryptodev_dpaa_sec_testsuite);
 }
 
+static int
+test_cryptodev_ccp(void)
+{
+	gbl_driver_id = rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD));
+
+	if (gbl_driver_id == -1) {
+		RTE_LOG(ERR, USER1, "CCP PMD must be loaded. Check if "
+				"CONFIG_RTE_LIBRTE_PMD_CCP is enabled "
+				"in config file to run this testsuite.\n");
+		return TEST_FAILED;
+	}
+
+	return unit_test_suite_runner(&cryptodev_ccp_testsuite);
+}
+
 REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
 REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb);
 REGISTER_TEST_COMMAND(cryptodev_openssl_autotest, test_cryptodev_openssl);
@@ -9813,3 +9973,4 @@ REGISTER_TEST_COMMAND(cryptodev_sw_armv8_autotest, test_cryptodev_armv8);
 REGISTER_TEST_COMMAND(cryptodev_sw_mrvl_autotest, test_cryptodev_mrvl);
 REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_autotest, test_cryptodev_dpaa2_sec);
 REGISTER_TEST_COMMAND(cryptodev_dpaa_sec_autotest, test_cryptodev_dpaa_sec);
+REGISTER_TEST_COMMAND(cryptodev_ccp_autotest, test_cryptodev_ccp);
diff --git a/test/test/test_cryptodev.h b/test/test/test_cryptodev.h
index 26bfbe6..9fa68f9 100644
--- a/test/test/test_cryptodev.h
+++ b/test/test/test_cryptodev.h
@@ -89,6 +89,7 @@
 #define CRYPTODEV_NAME_DPAA2_SEC_PMD	crypto_dpaa2_sec
 #define CRYPTODEV_NAME_SCHEDULER_PMD	crypto_scheduler
 #define CRYPTODEV_NAME_MRVL_PMD		crypto_mrvl
+#define CRYPTODEV_NAME_CCP_PMD		crypto_ccp
 
 /**
  * Write (spread) data from buffer to mbuf data
diff --git a/test/test/test_cryptodev_aes_test_vectors.h b/test/test/test_cryptodev_aes_test_vectors.h
index 9c13041..59ea211 100644
--- a/test/test/test_cryptodev_aes_test_vectors.h
+++ b/test/test/test_cryptodev_aes_test_vectors.h
@@ -1199,7 +1199,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CTR HMAC-SHA1 Decryption Digest "
@@ -1212,7 +1213,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CTR XCBC Encryption Digest",
@@ -1251,7 +1253,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CTR HMAC-SHA1 Decryption Digest "
@@ -1264,7 +1267,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest",
@@ -1277,7 +1281,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1311,7 +1316,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
@@ -1332,7 +1338,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA256 Encryption Digest "
@@ -1354,7 +1361,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA256 Decryption Digest "
@@ -1374,7 +1382,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
@@ -1383,7 +1392,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
@@ -1407,7 +1417,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA512 Decryption Digest "
@@ -1470,7 +1481,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA224 Decryption Digest "
@@ -1482,7 +1494,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA384 Encryption Digest",
@@ -1494,7 +1507,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA384 Decryption Digest "
@@ -1507,7 +1521,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1516,7 +1531,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
-			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr =
@@ -1526,7 +1542,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
-			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 };
 
@@ -1541,7 +1558,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC Decryption",
@@ -1553,7 +1571,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CBC Encryption",
@@ -1564,7 +1583,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CBC Encryption Scater gather",
@@ -1583,7 +1603,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CBC Encryption",
@@ -1595,7 +1616,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CBC Decryption",
@@ -1607,7 +1629,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CBC OOP Encryption",
@@ -1617,7 +1640,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CBC OOP Decryption",
@@ -1627,7 +1651,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CTR Encryption",
@@ -1639,7 +1664,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CTR Decryption",
@@ -1651,7 +1677,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CTR Encryption",
@@ -1662,7 +1689,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CTR Decryption",
@@ -1673,7 +1701,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CTR Encryption",
@@ -1685,7 +1714,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CTR Decryption",
@@ -1697,7 +1727,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CTR Encryption (12-byte IV)",
diff --git a/test/test/test_cryptodev_blockcipher.c b/test/test/test_cryptodev_blockcipher.c
index 20f3296..8955d51 100644
--- a/test/test/test_cryptodev_blockcipher.c
+++ b/test/test/test_cryptodev_blockcipher.c
@@ -82,6 +82,8 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
 
 	int openssl_pmd = rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+	int ccp_pmd = rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD));
 	int scheduler_pmd = rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD));
 	int armv8_pmd = rte_cryptodev_driver_id_get(
@@ -122,7 +124,8 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
 			driver_id == qat_pmd ||
 			driver_id == openssl_pmd ||
 			driver_id == armv8_pmd ||
-			driver_id == mrvl_pmd) { /* Fall through */
+			driver_id == mrvl_pmd ||
+			driver_id == ccp_pmd) { /* Fall through */
 		digest_len = tdata->digest.len;
 	} else if (driver_id == aesni_mb_pmd ||
 			driver_id == scheduler_pmd) {
@@ -583,6 +586,8 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
 
 	int openssl_pmd = rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+	int ccp_pmd = rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD));
 	int dpaa2_sec_pmd = rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD));
 	int dpaa_sec_pmd = rte_cryptodev_driver_id_get(
@@ -655,6 +660,8 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
 		target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER;
 	else if (driver_id == dpaa2_sec_pmd)
 		target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC;
+	else if (driver_id == ccp_pmd)
+		target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_CCP;
 	else if (driver_id == dpaa_sec_pmd)
 		target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC;
 	else if (driver_id == mrvl_pmd)
diff --git a/test/test/test_cryptodev_blockcipher.h b/test/test/test_cryptodev_blockcipher.h
index 67c78d4..b6824f6 100644
--- a/test/test/test_cryptodev_blockcipher.h
+++ b/test/test/test_cryptodev_blockcipher.h
@@ -55,6 +55,7 @@
 #define BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC	0x0020 /* DPAA2_SEC flag */
 #define BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC	0x0040 /* DPAA_SEC flag */
 #define BLOCKCIPHER_TEST_TARGET_PMD_MRVL	0x0080 /* Marvell flag */
+#define BLOCKCIPHER_TEST_TARGET_PMD_CCP		0x0040 /* CCP flag */
 
 #define BLOCKCIPHER_TEST_OP_CIPHER	(BLOCKCIPHER_TEST_OP_ENCRYPT | \
 					BLOCKCIPHER_TEST_OP_DECRYPT)
diff --git a/test/test/test_cryptodev_des_test_vectors.h b/test/test/test_cryptodev_des_test_vectors.h
index d09e23d..1a794c4 100644
--- a/test/test/test_cryptodev_des_test_vectors.h
+++ b/test/test/test_cryptodev_des_test_vectors.h
@@ -1072,7 +1072,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CBC HMAC-SHA1 Decryption Digest Verify",
@@ -1081,19 +1082,22 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CBC SHA1 Encryption Digest",
 		.test_data = &triple_des128cbc_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CBC SHA1 Decryption Digest Verify",
 		.test_data = &triple_des128cbc_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC HMAC-SHA1 Encryption Digest",
@@ -1103,7 +1107,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC HMAC-SHA1 Decryption Digest Verify",
@@ -1113,21 +1118,24 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC SHA1 Encryption Digest",
 		.test_data = &triple_des192cbc_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC SHA1 Decryption Digest Verify",
 		.test_data = &triple_des192cbc_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CTR HMAC-SHA1 Encryption Digest",
@@ -1208,7 +1216,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.test_data = &triple_des128cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr =
@@ -1217,7 +1226,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.test_data = &triple_des128cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 };
 
@@ -1229,7 +1239,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CBC Decryption",
@@ -1238,7 +1249,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC Encryption",
@@ -1248,7 +1260,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC Decryption",
@@ -1258,7 +1271,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CTR Encryption",
diff --git a/test/test/test_cryptodev_hash_test_vectors.h b/test/test/test_cryptodev_hash_test_vectors.h
index 3b840ce..e0a8f2e 100644
--- a/test/test/test_cryptodev_hash_test_vectors.h
+++ b/test/test/test_cryptodev_hash_test_vectors.h
@@ -386,13 +386,15 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.test_descr = "SHA1 Digest",
 		.test_data = &sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA1 Digest Verify",
 		.test_data = &sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA1 Digest",
@@ -403,7 +405,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA1 Digest Verify",
@@ -414,19 +417,22 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA224 Digest",
 		.test_data = &sha224_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA224 Digest Verify",
 		.test_data = &sha224_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA224 Digest",
@@ -437,7 +443,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA224 Digest Verify",
@@ -448,19 +455,22 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA256 Digest",
 		.test_data = &sha256_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA256 Digest Verify",
 		.test_data = &sha256_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA256 Digest",
@@ -471,7 +481,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA256 Digest Verify",
@@ -482,19 +493,22 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA384 Digest",
 		.test_data = &sha384_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA384 Digest Verify",
 		.test_data = &sha384_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA384 Digest",
@@ -505,7 +519,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA384 Digest Verify",
@@ -516,19 +531,22 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA512 Digest",
 		.test_data = &sha512_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA512 Digest Verify",
 		.test_data = &sha512_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA512 Digest",
@@ -539,7 +557,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA512 Digest Verify",
@@ -550,7 +569,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v2 20/20] doc: add ccp crypto poll mode driver to release notes
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
                     ` (17 preceding siblings ...)
  2018-01-05  9:39   ` [PATCH v2 19/20] test/crypto: add test " Ravi Kumar
@ 2018-01-05  9:39   ` Ravi Kumar
  2018-01-08 17:32     ` De Lara Guarch, Pablo
  2018-01-07  9:02   ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Hemant Agrawal
                     ` (2 subsequent siblings)
  21 siblings, 1 reply; 131+ messages in thread
From: Ravi Kumar @ 2018-01-05  9:39 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 MAINTAINERS                            | 6 ++++++
 doc/guides/rel_notes/release_18_02.rst | 5 +++++
 2 files changed, 11 insertions(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index f0baeb4..54734f7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -588,6 +588,12 @@ M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
 T: git://dpdk.org/next/dpdk-next-crypto
 F: doc/guides/cryptodevs/features/default.ini
 
+AMD CCP Crypto PMD
+M: Ravi Kumar <ravi1.kumar@amd.com>
+F: drivers/crypto/ccp/
+F: doc/guides/cryptodevs/ccp.rst
+F: doc/guides/cryptodevs/features/ccp.ini
+
 ARMv8 Crypto
 M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
 F: drivers/crypto/armv8/
diff --git a/doc/guides/rel_notes/release_18_02.rst b/doc/guides/rel_notes/release_18_02.rst
index 24b67bb..42ebeeb 100644
--- a/doc/guides/rel_notes/release_18_02.rst
+++ b/doc/guides/rel_notes/release_18_02.rst
@@ -41,6 +41,11 @@ New Features
      Also, make sure to start the actual text at the margin.
      =========================================================
 
+* **Added a new crypto poll mode driver for AMD CCP devices.**
+
+  Added the new ``ccp`` crypto driver for AMD CCP devices. See the
+  :doc:`../cryptodevs/ccp` crypto driver guide for more details on
+  this new driver.
 
 API Changes
 -----------
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* Re: [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
                     ` (18 preceding siblings ...)
  2018-01-05  9:39   ` [PATCH v2 20/20] doc: add ccp crypto poll mode driver to release notes Ravi Kumar
@ 2018-01-07  9:02   ` Hemant Agrawal
  2018-01-08 17:16   ` De Lara Guarch, Pablo
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
  21 siblings, 0 replies; 131+ messages in thread
From: Hemant Agrawal @ 2018-01-07  9:02 UTC (permalink / raw)
  To: Ravi Kumar, dev; +Cc: pablo.de.lara.guarch

Hi Ravi,

On 1/5/2018 3:09 PM, Ravi Kumar wrote:
> diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
> new file mode 100644
> index 0000000..51c5e5b
> --- /dev/null
> +++ b/drivers/crypto/ccp/Makefile
> @@ -0,0 +1,55 @@
> +#
> +#   Copyright(c) 2018 Advanced Micro Devices, Inc.
> +#   All rights reserved.
> +#
> +#   Redistribution and use in source and binary forms, with or without
> +#   modification, are permitted provided that the following conditions
> +#   are met:
> +#
> +# 	* Redistributions of source code must retain the above copyright
> +# 	notice, this list of conditions and the following disclaimer.
> +# 	* Redistributions in binary form must reproduce the above copyright
> +# 	notice, this list of conditions and the following disclaimer in the
> +# 	documentation and/or other materials provided with the distribution.
> +# 	* Neither the name of the copyright holder nor the names of its
> +# 	contributors may be used to endorse or promote products derived from
> +# 	this software without specific prior written permission.
> +#
> +#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> +#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> +#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> +#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> +#   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> +#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> +#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> +#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> +#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> +#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> +#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

Will you please start using the SPDX tags instead for full license plate?

Regards,
Hemant

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
                     ` (19 preceding siblings ...)
  2018-01-07  9:02   ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Hemant Agrawal
@ 2018-01-08 17:16   ` De Lara Guarch, Pablo
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
  21 siblings, 0 replies; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2018-01-08 17:16 UTC (permalink / raw)
  To: Ravi Kumar, dev

Hi Ravi,

> -----Original Message-----
> From: Ravi Kumar [mailto:Ravi1.kumar@amd.com]
> Sent: Friday, January 5, 2018 9:40 AM
> To: dev@dpdk.org
> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
> Subject: [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support
> 

Change to "crypto/ccp: support AMD CCP PMD" or maybe "crypto/ccp: add AMD CCP skeleton PMD"

Make sure you make this change for other patches. Instead of saying "crypto/ccp:add X support",
use directly "crypto/ccp: support X"

Thanks,
Pablo

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v2 03/20] crypto/ccp: add basic pmd ops support for start, stop, config etc
  2018-01-05  9:39   ` [PATCH v2 03/20] crypto/ccp: add basic pmd ops support for start, stop, config etc Ravi Kumar
@ 2018-01-08 17:21     ` De Lara Guarch, Pablo
  0 siblings, 0 replies; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2018-01-08 17:21 UTC (permalink / raw)
  To: Ravi Kumar, dev



> -----Original Message-----
> From: Ravi Kumar [mailto:Ravi1.kumar@amd.com]
> Sent: Friday, January 5, 2018 9:40 AM
> To: dev@dpdk.org
> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
> Subject: [PATCH v2 03/20] crypto/ccp: add basic pmd ops support for start,
> stop, config etc
> 
> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>

Change title to "crypto/ccp: add basic pmd ops".

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v2 12/20] crypto/ccp: add aes-cmac auth algo aupport
  2018-01-05  9:39   ` [PATCH v2 12/20] crypto/ccp: add aes-cmac auth algo aupport Ravi Kumar
@ 2018-01-08 17:27     ` De Lara Guarch, Pablo
  0 siblings, 0 replies; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2018-01-08 17:27 UTC (permalink / raw)
  To: Ravi Kumar, dev

Hi Ravi,

> -----Original Message-----
> From: Ravi Kumar [mailto:Ravi1.kumar@amd.com]
> Sent: Friday, January 5, 2018 9:40 AM
> To: dev@dpdk.org
> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
> Subject: [PATCH v2 12/20] crypto/ccp: add aes-cmac auth algo aupport
> 
> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>

...

> --- a/drivers/crypto/ccp/ccp_pmd_ops.c
> +++ b/drivers/crypto/ccp/ccp_pmd_ops.c
> @@ -39,6 +39,27 @@
>  #include "ccp_crypto.h"
> 
>  static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
> +	{	/*AES-CMAC */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +			{.auth = {
> +				 .algo = RTE_CRYPTO_AUTH_AES_CMAC,
> +				 .block_size = 16,
> +				 .key_size = {
> +					 .min = 16,
> +					 .max = 32,
> +					 .increment = 8
> +				 },
> +				 .digest_size = {
> +					 .min = 16,
> +					 .max = 16,
> +					 .increment = 0
> +				 },
> +				 .aad_size = { 0 }

Since this is an authentication only algorithm, there is no need to set AAD size here.

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v2 20/20] doc: add ccp crypto poll mode driver to release notes
  2018-01-05  9:39   ` [PATCH v2 20/20] doc: add ccp crypto poll mode driver to release notes Ravi Kumar
@ 2018-01-08 17:32     ` De Lara Guarch, Pablo
  0 siblings, 0 replies; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2018-01-08 17:32 UTC (permalink / raw)
  To: Ravi Kumar, dev

Hi Ravi,

> -----Original Message-----
> From: Ravi Kumar [mailto:Ravi1.kumar@amd.com]
> Sent: Friday, January 5, 2018 9:40 AM
> To: dev@dpdk.org
> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
> Subject: [PATCH v2 20/20] doc: add ccp crypto poll mode driver to release
> notes
> 

I think you can add this patch directly in the first patch and then
add ccp.rst and ccp.ini in the patches where you are introducing them.

Thanks,
Pablo

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v2 17/20] crypto/ccp: add cpu based md5 and sha2 family auth algo support
  2018-01-05  9:39   ` [PATCH v2 17/20] crypto/ccp: add cpu based md5 and sha2 " Ravi Kumar
@ 2018-01-08 17:36     ` De Lara Guarch, Pablo
  0 siblings, 0 replies; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2018-01-08 17:36 UTC (permalink / raw)
  To: Ravi Kumar, dev

Hi Ravi,

> -----Original Message-----
> From: Ravi Kumar [mailto:Ravi1.kumar@amd.com]
> Sent: Friday, January 5, 2018 9:40 AM
> To: dev@dpdk.org
> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
> Subject: [PATCH v2 17/20] crypto/ccp: add cpu based md5 and sha2 family
> auth algo support
> 
> Auth operations can be performed on CPU without offloading to CCP if
> CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH is enabled in DPDK
> configuration.
> 
> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>

...

> --- a/drivers/crypto/ccp/ccp_crypto.c
> +++ b/drivers/crypto/ccp/ccp_crypto.c

...

>  }
> 
> @@ -2638,13 +2874,23 @@ static inline void ccp_auth_dq_prepare(struct
> rte_crypto_op *op)  }
> 
>  static int
> -ccp_prepare_ops(struct rte_crypto_op **op_d,
> +ccp_prepare_ops(struct ccp_qp *qp,

There is a compilation error if the PMD is enabled and
CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=n.

You need to set the struct ccp_qp *qp parameter as __rte_unused,
in case this option is not enabled.

Pablo

^ permalink raw reply	[flat|nested] 131+ messages in thread

* [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
  2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
                     ` (20 preceding siblings ...)
  2018-01-08 17:16   ` De Lara Guarch, Pablo
@ 2018-01-10  9:42   ` Ravi Kumar
  2018-01-10  9:42     ` [PATCH v3 02/19] crypto/ccp: support ccp device initialization and deintialization Ravi Kumar
                       ` (19 more replies)
  21 siblings, 20 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-10  9:42 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 MAINTAINERS                                |  6 +++
 config/common_base                         |  5 +++
 doc/guides/rel_notes/release_18_02.rst     |  5 +++
 drivers/crypto/Makefile                    |  1 +
 drivers/crypto/ccp/Makefile                | 55 ++++++++++++++++++++++++++
 drivers/crypto/ccp/rte_ccp_pmd.c           | 62 ++++++++++++++++++++++++++++++
 drivers/crypto/ccp/rte_pmd_ccp_version.map |  4 ++
 mk/rte.app.mk                              |  2 +
 8 files changed, 140 insertions(+)
 create mode 100644 drivers/crypto/ccp/Makefile
 create mode 100644 drivers/crypto/ccp/rte_ccp_pmd.c
 create mode 100644 drivers/crypto/ccp/rte_pmd_ccp_version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index b51c2d0..e609244 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -594,6 +594,12 @@ M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
 T: git://dpdk.org/next/dpdk-next-crypto
 F: doc/guides/cryptodevs/features/default.ini
 
+AMD CCP Crypto PMD
+M: Ravi Kumar <ravi1.kumar@amd.com>
+F: drivers/crypto/ccp/
+F: doc/guides/cryptodevs/ccp.rst
+F: doc/guides/cryptodevs/features/ccp.ini
+
 ARMv8 Crypto
 M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
 F: drivers/crypto/armv8/
diff --git a/config/common_base b/config/common_base
index e74febe..88826c8 100644
--- a/config/common_base
+++ b/config/common_base
@@ -557,6 +557,11 @@ CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n
 CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
 
 #
+# Compile PMD for AMD CCP crypto device
+#
+CONFIG_RTE_LIBRTE_PMD_CCP=n
+
+#
 # Compile PMD for Marvell Crypto device
 #
 CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO=n
diff --git a/doc/guides/rel_notes/release_18_02.rst b/doc/guides/rel_notes/release_18_02.rst
index 24b67bb..42ebeeb 100644
--- a/doc/guides/rel_notes/release_18_02.rst
+++ b/doc/guides/rel_notes/release_18_02.rst
@@ -41,6 +41,11 @@ New Features
      Also, make sure to start the actual text at the margin.
      =========================================================
 
+* **Added a new crypto poll mode driver for AMD CCP devices.**
+
+  Added the new ``ccp`` crypto driver for AMD CCP devices. See the
+  :doc:`../cryptodevs/ccp` crypto driver guide for more details on
+  this new driver.
 
 API Changes
 -----------
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 628bd14..fe41edd 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -16,5 +16,6 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += mrvl
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp
 
 include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
new file mode 100644
index 0000000..51c5e5b
--- /dev/null
+++ b/drivers/crypto/ccp/Makefile
@@ -0,0 +1,55 @@
+#
+#   Copyright(c) 2018 Advanced Micro Devices, Inc.
+#   All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+# 	* Redistributions of source code must retain the above copyright
+# 	notice, this list of conditions and the following disclaimer.
+# 	* Redistributions in binary form must reproduce the above copyright
+# 	notice, this list of conditions and the following disclaimer in the
+# 	documentation and/or other materials provided with the distribution.
+# 	* Neither the name of the copyright holder nor the names of its
+# 	contributors may be used to endorse or promote products derived from
+# 	this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_ccp.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += -I$(SRCDIR)
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# external library include paths
+LDLIBS += -lcrypto
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_pci -lrte_bus_pci
+
+# versioning export map
+EXPORT_MAP := rte_pmd_ccp_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
new file mode 100644
index 0000000..6fa14bd
--- /dev/null
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -0,0 +1,62 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_bus_vdev.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+
+uint8_t ccp_cryptodev_driver_id;
+
+/** Remove ccp pmd */
+static int
+cryptodev_ccp_remove(struct rte_vdev_device *dev __rte_unused)
+{
+	return 0;
+}
+
+/** Probe ccp pmd */
+static int
+cryptodev_ccp_probe(struct rte_vdev_device *vdev __rte_unused)
+{
+	return 0;
+}
+
+static struct rte_vdev_driver cryptodev_ccp_pmd_drv = {
+	.probe = cryptodev_ccp_probe,
+	.remove = cryptodev_ccp_remove
+};
+
+static struct cryptodev_driver ccp_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD,
+	"max_nb_queue_pairs=<int> max_nb_sessions=<int> socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv,
+			       ccp_cryptodev_driver_id);
diff --git a/drivers/crypto/ccp/rte_pmd_ccp_version.map b/drivers/crypto/ccp/rte_pmd_ccp_version.map
new file mode 100644
index 0000000..179140f
--- /dev/null
+++ b/drivers/crypto/ccp/rte_pmd_ccp_version.map
@@ -0,0 +1,4 @@
+DPDK_18.02 {
+
+	local: *;
+};
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 6a6a745..0453b7f 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -191,6 +191,8 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC)   += -lrte_bus_dpaa
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC)   += -lrte_pmd_dpaa_sec
 endif # CONFIG_RTE_LIBRTE_DPAA_BUS
 
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_CCP)   += -lrte_pmd_ccp -lcrypto
+
 endif # CONFIG_RTE_LIBRTE_CRYPTODEV
 
 ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y)
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v3 02/19] crypto/ccp: support ccp device initialization and deintialization
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
@ 2018-01-10  9:42     ` Ravi Kumar
  2018-01-10  9:42     ` [PATCH v3 03/19] crypto/ccp: support basic pmd ops Ravi Kumar
                       ` (18 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-10  9:42 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

CCP PMD is a virtual crypto PMD which schedules all the
available actual hardware engines. The PMD creates a
linked list of all CCP engines which will be scheduled
in a round-robin fashion to the CPU core requesting crypto
operations.

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/Makefile          |   3 +
 drivers/crypto/ccp/ccp_dev.c         | 787 +++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_dev.h         | 310 ++++++++++++++
 drivers/crypto/ccp/ccp_pci.c         | 262 ++++++++++++
 drivers/crypto/ccp/ccp_pci.h         |  53 +++
 drivers/crypto/ccp/ccp_pmd_ops.c     |  55 +++
 drivers/crypto/ccp/ccp_pmd_private.h |  82 ++++
 drivers/crypto/ccp/rte_ccp_pmd.c     | 151 ++++++-
 8 files changed, 1701 insertions(+), 2 deletions(-)
 create mode 100644 drivers/crypto/ccp/ccp_dev.c
 create mode 100644 drivers/crypto/ccp/ccp_dev.h
 create mode 100644 drivers/crypto/ccp/ccp_pci.c
 create mode 100644 drivers/crypto/ccp/ccp_pci.h
 create mode 100644 drivers/crypto/ccp/ccp_pmd_ops.c
 create mode 100644 drivers/crypto/ccp/ccp_pmd_private.h

diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 51c5e5b..5e58c31 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -51,5 +51,8 @@ EXPORT_MAP := rte_pmd_ccp_version.map
 
 # library source files
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
new file mode 100644
index 0000000..5af2b49
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -0,0 +1,787 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <sys/file.h>
+#include <unistd.h>
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+
+#include "ccp_dev.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
+static int ccp_dev_id;
+
+static const struct rte_memzone *
+ccp_queue_dma_zone_reserve(const char *queue_name,
+			   uint32_t queue_size,
+			   int socket_id)
+{
+	const struct rte_memzone *mz;
+	unsigned int memzone_flags = 0;
+	const struct rte_memseg *ms;
+
+	mz = rte_memzone_lookup(queue_name);
+	if (mz != 0)
+		return mz;
+
+	ms = rte_eal_get_physmem_layout();
+	switch (ms[0].hugepage_sz) {
+	case(RTE_PGSIZE_2M):
+		memzone_flags = RTE_MEMZONE_2MB;
+		break;
+	case(RTE_PGSIZE_1G):
+		memzone_flags = RTE_MEMZONE_1GB;
+		break;
+	case(RTE_PGSIZE_16M):
+		memzone_flags = RTE_MEMZONE_16MB;
+		break;
+	case(RTE_PGSIZE_16G):
+		memzone_flags = RTE_MEMZONE_16GB;
+		break;
+	default:
+		memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
+	}
+
+	return rte_memzone_reserve_aligned(queue_name,
+					   queue_size,
+					   socket_id,
+					   memzone_flags,
+					   queue_size);
+}
+
+/* bitmap support apis */
+static inline void
+ccp_set_bit(unsigned long *bitmap, int n)
+{
+	__sync_fetch_and_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)));
+}
+
+static inline void
+ccp_clear_bit(unsigned long *bitmap, int n)
+{
+	__sync_fetch_and_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)));
+}
+
+static inline uint32_t
+ccp_get_bit(unsigned long *bitmap, int n)
+{
+	return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0);
+}
+
+
+static inline uint32_t
+ccp_ffz(unsigned long word)
+{
+	unsigned long first_zero;
+
+	first_zero = __builtin_ffsl(~word);
+	return first_zero ? (first_zero - 1) :
+		BITS_PER_WORD;
+}
+
+static inline uint32_t
+ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit)
+{
+	uint32_t i;
+	uint32_t nwords = 0;
+
+	nwords = (limit - 1) / BITS_PER_WORD + 1;
+	for (i = 0; i < nwords; i++) {
+		if (addr[i] == 0UL)
+			return i * BITS_PER_WORD;
+		if (addr[i] < ~(0UL))
+			break;
+	}
+	return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]);
+}
+
+static void
+ccp_bitmap_set(unsigned long *map, unsigned int start, int len)
+{
+	unsigned long *p = map + WORD_OFFSET(start);
+	const unsigned int size = start + len;
+	int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD);
+	unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+	while (len - bits_to_set >= 0) {
+		*p |= mask_to_set;
+		len -= bits_to_set;
+		bits_to_set = BITS_PER_WORD;
+		mask_to_set = ~0UL;
+		p++;
+	}
+	if (len) {
+		mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size);
+		*p |= mask_to_set;
+	}
+}
+
+static void
+ccp_bitmap_clear(unsigned long *map, unsigned int start, int len)
+{
+	unsigned long *p = map + WORD_OFFSET(start);
+	const unsigned int size = start + len;
+	int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD);
+	unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+	while (len - bits_to_clear >= 0) {
+		*p &= ~mask_to_clear;
+		len -= bits_to_clear;
+		bits_to_clear = BITS_PER_WORD;
+		mask_to_clear = ~0UL;
+		p++;
+	}
+	if (len) {
+		mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size);
+		*p &= ~mask_to_clear;
+	}
+}
+
+
+static unsigned long
+_ccp_find_next_bit(const unsigned long *addr,
+		   unsigned long nbits,
+		   unsigned long start,
+		   unsigned long invert)
+{
+	unsigned long tmp;
+
+	if (!nbits || start >= nbits)
+		return nbits;
+
+	tmp = addr[start / BITS_PER_WORD] ^ invert;
+
+	/* Handle 1st word. */
+	tmp &= CCP_BITMAP_FIRST_WORD_MASK(start);
+	start = ccp_round_down(start, BITS_PER_WORD);
+
+	while (!tmp) {
+		start += BITS_PER_WORD;
+		if (start >= nbits)
+			return nbits;
+
+		tmp = addr[start / BITS_PER_WORD] ^ invert;
+	}
+
+	return RTE_MIN(start + (ffs(tmp) - 1), nbits);
+}
+
+static unsigned long
+ccp_find_next_bit(const unsigned long *addr,
+		  unsigned long size,
+		  unsigned long offset)
+{
+	return _ccp_find_next_bit(addr, size, offset, 0UL);
+}
+
+static unsigned long
+ccp_find_next_zero_bit(const unsigned long *addr,
+		       unsigned long size,
+		       unsigned long offset)
+{
+	return _ccp_find_next_bit(addr, size, offset, ~0UL);
+}
+
+/**
+ * bitmap_find_next_zero_area - find a contiguous aligned zero area
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ */
+static unsigned long
+ccp_bitmap_find_next_zero_area(unsigned long *map,
+			       unsigned long size,
+			       unsigned long start,
+			       unsigned int nr)
+{
+	unsigned long index, end, i;
+
+again:
+	index = ccp_find_next_zero_bit(map, size, start);
+
+	end = index + nr;
+	if (end > size)
+		return end;
+	i = ccp_find_next_bit(map, end, index);
+	if (i < end) {
+		start = i + 1;
+		goto again;
+	}
+	return index;
+}
+
+static uint32_t
+ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count)
+{
+	struct ccp_device *ccp;
+	int start;
+
+	/* First look at the map for the queue */
+	if (cmd_q->lsb >= 0) {
+		start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap,
+								 LSB_SIZE, 0,
+								 count);
+		if (start < LSB_SIZE) {
+			ccp_bitmap_set(cmd_q->lsbmap, start, count);
+			return start + cmd_q->lsb * LSB_SIZE;
+		}
+	}
+
+	/* try to get an entry from the shared blocks */
+	ccp = cmd_q->dev;
+
+	rte_spinlock_lock(&ccp->lsb_lock);
+
+	start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap,
+						    MAX_LSB_CNT * LSB_SIZE,
+						    0, count);
+	if (start <= MAX_LSB_CNT * LSB_SIZE) {
+		ccp_bitmap_set(ccp->lsbmap, start, count);
+		rte_spinlock_unlock(&ccp->lsb_lock);
+		return start * LSB_ITEM_SIZE;
+	}
+	CCP_LOG_ERR("NO LSBs available");
+
+	rte_spinlock_unlock(&ccp->lsb_lock);
+
+	return 0;
+}
+
+static void __rte_unused
+ccp_lsb_free(struct ccp_queue *cmd_q,
+	     unsigned int start,
+	     unsigned int count)
+{
+	int lsbno = start / LSB_SIZE;
+
+	if (!start)
+		return;
+
+	if (cmd_q->lsb == lsbno) {
+		/* An entry from the private LSB */
+		ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
+	} else {
+		/* From the shared LSBs */
+		struct ccp_device *ccp = cmd_q->dev;
+
+		rte_spinlock_lock(&ccp->lsb_lock);
+		ccp_bitmap_clear(ccp->lsbmap, start, count);
+		rte_spinlock_unlock(&ccp->lsb_lock);
+	}
+}
+
+static int
+ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status)
+{
+	int q_mask = 1 << cmd_q->id;
+	int weight = 0;
+	int j;
+
+	/* Build a bit mask to know which LSBs
+	 * this queue has access to.
+	 * Don't bother with segment 0
+	 * as it has special
+	 * privileges.
+	 */
+	cmd_q->lsbmask = 0;
+	status >>= LSB_REGION_WIDTH;
+	for (j = 1; j < MAX_LSB_CNT; j++) {
+		if (status & q_mask)
+			ccp_set_bit(&cmd_q->lsbmask, j);
+
+		status >>= LSB_REGION_WIDTH;
+	}
+
+	for (j = 0; j < MAX_LSB_CNT; j++)
+		if (ccp_get_bit(&cmd_q->lsbmask, j))
+			weight++;
+
+	printf("Queue %d can access %d LSB regions  of mask  %lu\n",
+	       (int)cmd_q->id, weight, cmd_q->lsbmask);
+
+	return weight ? 0 : -EINVAL;
+}
+
+static int
+ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
+			     int lsb_cnt, int n_lsbs,
+			     unsigned long *lsb_pub)
+{
+	unsigned long qlsb = 0;
+	int bitno = 0;
+	int qlsb_wgt = 0;
+	int i, j;
+
+	/* For each queue:
+	 * If the count of potential LSBs available to a queue matches the
+	 * ordinal given to us in lsb_cnt:
+	 * Copy the mask of possible LSBs for this queue into "qlsb";
+	 * For each bit in qlsb, see if the corresponding bit in the
+	 * aggregation mask is set; if so, we have a match.
+	 *     If we have a match, clear the bit in the aggregation to
+	 *     mark it as no longer available.
+	 *     If there is no match, clear the bit in qlsb and keep looking.
+	 */
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		struct ccp_queue *cmd_q = &ccp->cmd_q[i];
+
+		qlsb_wgt = 0;
+		for (j = 0; j < MAX_LSB_CNT; j++)
+			if (ccp_get_bit(&cmd_q->lsbmask, j))
+				qlsb_wgt++;
+
+		if (qlsb_wgt == lsb_cnt) {
+			qlsb = cmd_q->lsbmask;
+
+			bitno = ffs(qlsb) - 1;
+			while (bitno < MAX_LSB_CNT) {
+				if (ccp_get_bit(lsb_pub, bitno)) {
+					/* We found an available LSB
+					 * that this queue can access
+					 */
+					cmd_q->lsb = bitno;
+					ccp_clear_bit(lsb_pub, bitno);
+					break;
+				}
+				ccp_clear_bit(&qlsb, bitno);
+				bitno = ffs(qlsb) - 1;
+			}
+			if (bitno >= MAX_LSB_CNT)
+				return -EINVAL;
+			n_lsbs--;
+		}
+	}
+	return n_lsbs;
+}
+
+/* For each queue, from the most- to least-constrained:
+ * find an LSB that can be assigned to the queue. If there are N queues that
+ * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
+ * dedicated LSB. Remaining LSB regions become a shared resource.
+ * If we have fewer LSBs than queues, all LSB regions become shared
+ * resources.
+ */
+static int
+ccp_assign_lsbs(struct ccp_device *ccp)
+{
+	unsigned long lsb_pub = 0, qlsb = 0;
+	int n_lsbs = 0;
+	int bitno;
+	int i, lsb_cnt;
+	int rc = 0;
+
+	rte_spinlock_init(&ccp->lsb_lock);
+
+	/* Create an aggregate bitmap to get a total count of available LSBs */
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		lsb_pub |= ccp->cmd_q[i].lsbmask;
+
+	for (i = 0; i < MAX_LSB_CNT; i++)
+		if (ccp_get_bit(&lsb_pub, i))
+			n_lsbs++;
+
+	if (n_lsbs >= ccp->cmd_q_count) {
+		/* We have enough LSBS to give every queue a private LSB.
+		 * Brute force search to start with the queues that are more
+		 * constrained in LSB choice. When an LSB is privately
+		 * assigned, it is removed from the public mask.
+		 * This is an ugly N squared algorithm with some optimization.
+		 */
+		for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
+		     lsb_cnt++) {
+			rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
+							  &lsb_pub);
+			if (rc < 0)
+				return -EINVAL;
+			n_lsbs = rc;
+		}
+	}
+
+	rc = 0;
+	/* What's left of the LSBs, according to the public mask, now become
+	 * shared. Any zero bits in the lsb_pub mask represent an LSB region
+	 * that can't be used as a shared resource, so mark the LSB slots for
+	 * them as "in use".
+	 */
+	qlsb = lsb_pub;
+	bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+	while (bitno < MAX_LSB_CNT) {
+		ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
+		ccp_set_bit(&qlsb, bitno);
+		bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+	}
+
+	return rc;
+}
+
+static int
+ccp_add_device(struct ccp_device *dev, int type)
+{
+	int i;
+	uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi;
+	uint64_t status;
+	struct ccp_queue *cmd_q;
+	const struct rte_memzone *q_mz;
+	void *vaddr;
+
+	if (dev == NULL)
+		return -1;
+
+	dev->id = ccp_dev_id++;
+	dev->qidx = 0;
+	vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+	if (type == CCP_VERSION_5B) {
+		CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57);
+		CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003);
+		for (i = 0; i < 12; i++) {
+			CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET,
+				      CCP_READ_REG(vaddr, TRNG_OUT_REG));
+		}
+		CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F);
+		CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D);
+		CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000);
+
+		CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF);
+		CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF);
+
+		CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
+	}
+	CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
+
+	/* Copy the private LSB mask to the public registers */
+	status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
+	status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET);
+	CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo);
+	CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi);
+	status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo);
+
+	dev->cmd_q_count = 0;
+	/* Find available queues */
+	qmr = CCP_READ_REG(vaddr, Q_MASK_REG);
+	for (i = 0; i < MAX_HW_QUEUES; i++) {
+		if (!(qmr & (1 << i)))
+			continue;
+		cmd_q = &dev->cmd_q[dev->cmd_q_count++];
+		cmd_q->dev = dev;
+		cmd_q->id = i;
+		cmd_q->qidx = 0;
+		cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+
+		cmd_q->reg_base = (uint8_t *)vaddr +
+			CMD_Q_STATUS_INCR * (i + 1);
+
+		/* CCP queue memory */
+		snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name),
+			 "%s_%d_%s_%d_%s",
+			 "ccp_dev",
+			 (int)dev->id, "queue",
+			 (int)cmd_q->id, "mem");
+		q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name,
+						  cmd_q->qsize, SOCKET_ID_ANY);
+		cmd_q->qbase_addr = (void *)q_mz->addr;
+		cmd_q->qbase_desc = (void *)q_mz->addr;
+		cmd_q->qbase_phys_addr =  q_mz->phys_addr;
+
+		cmd_q->qcontrol = 0;
+		/* init control reg to zero */
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol);
+
+		/* Disable the interrupts */
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00);
+		CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE);
+		CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE);
+
+		/* Clear the interrupts */
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE,
+			      ALL_INTERRUPTS);
+
+		/* Configure size of each virtual queue accessible to host */
+		cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT);
+		cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT;
+
+		dma_addr_lo = low32_value(cmd_q->qbase_phys_addr);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+			      (uint32_t)dma_addr_lo);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE,
+			      (uint32_t)dma_addr_lo);
+
+		dma_addr_hi = high32_value(cmd_q->qbase_phys_addr);
+		cmd_q->qcontrol |= (dma_addr_hi << 16);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol);
+
+		/* create LSB Mask map */
+		if (ccp_find_lsb_regions(cmd_q, status))
+			CCP_LOG_ERR("queue doesn't have lsb regions");
+		cmd_q->lsb = -1;
+
+		rte_atomic64_init(&cmd_q->free_slots);
+		rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1));
+		/* unused slot barrier b/w H&T */
+	}
+
+	if (ccp_assign_lsbs(dev))
+		CCP_LOG_ERR("Unable to assign lsb region");
+
+	/* pre-allocate LSB slots */
+	for (i = 0; i < dev->cmd_q_count; i++) {
+		dev->cmd_q[i].sb_key =
+			ccp_lsb_alloc(&dev->cmd_q[i], 1);
+		dev->cmd_q[i].sb_iv =
+			ccp_lsb_alloc(&dev->cmd_q[i], 1);
+		dev->cmd_q[i].sb_sha =
+			ccp_lsb_alloc(&dev->cmd_q[i], 2);
+		dev->cmd_q[i].sb_hmac =
+			ccp_lsb_alloc(&dev->cmd_q[i], 2);
+	}
+
+	TAILQ_INSERT_TAIL(&ccp_list, dev, next);
+	return 0;
+}
+
+static void
+ccp_remove_device(struct ccp_device *dev)
+{
+	if (dev == NULL)
+		return;
+
+	TAILQ_REMOVE(&ccp_list, dev, next);
+}
+
+static int
+is_ccp_device(const char *dirname,
+	      const struct rte_pci_id *ccp_id,
+	      int *type)
+{
+	char filename[PATH_MAX];
+	const struct rte_pci_id *id;
+	uint16_t vendor, device_id;
+	int i;
+	unsigned long tmp;
+
+	/* get vendor id */
+	snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		return 0;
+	vendor = (uint16_t)tmp;
+
+	/* get device id */
+	snprintf(filename, sizeof(filename), "%s/device", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		return 0;
+	device_id = (uint16_t)tmp;
+
+	for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) {
+		if (vendor == id->vendor_id &&
+		    device_id == id->device_id) {
+			*type = i;
+			return 1; /* Matched device */
+		}
+	}
+	return 0;
+}
+
+static int
+ccp_probe_device(const char *dirname, uint16_t domain,
+		 uint8_t bus, uint8_t devid,
+		 uint8_t function, int ccp_type)
+{
+	struct ccp_device *ccp_dev = NULL;
+	struct rte_pci_device *pci;
+	char filename[PATH_MAX];
+	unsigned long tmp;
+	int uio_fd = -1, i, uio_num;
+	char uio_devname[PATH_MAX];
+	void *map_addr;
+
+	ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
+			      RTE_CACHE_LINE_SIZE);
+	if (ccp_dev == NULL)
+		goto fail;
+	pci = &(ccp_dev->pci);
+
+	pci->addr.domain = domain;
+	pci->addr.bus = bus;
+	pci->addr.devid = devid;
+	pci->addr.function = function;
+
+	/* get vendor id */
+	snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.vendor_id = (uint16_t)tmp;
+
+	/* get device id */
+	snprintf(filename, sizeof(filename), "%s/device", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.device_id = (uint16_t)tmp;
+
+	/* get subsystem_vendor id */
+	snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
+			dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.subsystem_vendor_id = (uint16_t)tmp;
+
+	/* get subsystem_device id */
+	snprintf(filename, sizeof(filename), "%s/subsystem_device",
+			dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.subsystem_device_id = (uint16_t)tmp;
+
+	/* get class_id */
+	snprintf(filename, sizeof(filename), "%s/class",
+			dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	/* the least 24 bits are valid: class, subclass, program interface */
+	pci->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
+
+	/* parse resources */
+	snprintf(filename, sizeof(filename), "%s/resource", dirname);
+	if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
+		goto fail;
+
+	uio_num = ccp_find_uio_devname(dirname);
+	if (uio_num < 0) {
+		/*
+		 * It may take time for uio device to appear,
+		 * wait  here and try again
+		 */
+		usleep(100000);
+		uio_num = ccp_find_uio_devname(dirname);
+		if (uio_num < 0)
+			goto fail;
+	}
+	snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
+
+	uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK);
+	if (uio_fd < 0)
+		goto fail;
+	if (flock(uio_fd, LOCK_EX | LOCK_NB))
+		goto fail;
+
+	/* Map the PCI memory resource of device */
+	for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+
+		char devname[PATH_MAX];
+		int res_fd;
+
+		if (pci->mem_resource[i].phys_addr == 0)
+			continue;
+		snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
+		res_fd = open(devname, O_RDWR);
+		if (res_fd < 0)
+			goto fail;
+		map_addr = mmap(NULL, pci->mem_resource[i].len,
+				PROT_READ | PROT_WRITE,
+				MAP_SHARED, res_fd, 0);
+		if (map_addr == MAP_FAILED)
+			goto fail;
+
+		pci->mem_resource[i].addr = map_addr;
+	}
+
+	/* device is valid, add in list */
+	if (ccp_add_device(ccp_dev, ccp_type)) {
+		ccp_remove_device(ccp_dev);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	CCP_LOG_ERR("CCP Device probe failed");
+	if (uio_fd > 0)
+		close(uio_fd);
+	if (ccp_dev)
+		rte_free(ccp_dev);
+	return -1;
+}
+
+int
+ccp_probe_devices(const struct rte_pci_id *ccp_id)
+{
+	int dev_cnt = 0;
+	int ccp_type = 0;
+	struct dirent *d;
+	DIR *dir;
+	int ret = 0;
+	int module_idx = 0;
+	uint16_t domain;
+	uint8_t bus, devid, function;
+	char dirname[PATH_MAX];
+
+	module_idx = ccp_check_pci_uio_module();
+	if (module_idx < 0)
+		return -1;
+
+	TAILQ_INIT(&ccp_list);
+	dir = opendir(SYSFS_PCI_DEVICES);
+	if (dir == NULL)
+		return -1;
+	while ((d = readdir(dir)) != NULL) {
+		if (d->d_name[0] == '.')
+			continue;
+		if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name),
+					&domain, &bus, &devid, &function) != 0)
+			continue;
+		snprintf(dirname, sizeof(dirname), "%s/%s",
+			     SYSFS_PCI_DEVICES, d->d_name);
+		if (is_ccp_device(dirname, ccp_id, &ccp_type)) {
+			printf("CCP : Detected CCP device with ID = 0x%x\n",
+			       ccp_id[ccp_type].device_id);
+			ret = ccp_probe_device(dirname, domain, bus, devid,
+					       function, ccp_type);
+			if (ret == 0)
+				dev_cnt++;
+		}
+	}
+	closedir(dir);
+	return dev_cnt;
+}
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
new file mode 100644
index 0000000..fe05bf0
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -0,0 +1,310 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CCP_DEV_H_
+#define _CCP_DEV_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_bus_pci.h>
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+/**< CCP sspecific */
+#define MAX_HW_QUEUES                   5
+
+/**< CCP Register Mappings */
+#define Q_MASK_REG                      0x000
+#define TRNG_OUT_REG                    0x00c
+
+/* CCP Version 5 Specifics */
+#define CMD_QUEUE_MASK_OFFSET		0x00
+#define	CMD_QUEUE_PRIO_OFFSET		0x04
+#define CMD_REQID_CONFIG_OFFSET		0x08
+#define	CMD_CMD_TIMEOUT_OFFSET		0x10
+#define LSB_PUBLIC_MASK_LO_OFFSET	0x18
+#define LSB_PUBLIC_MASK_HI_OFFSET	0x1C
+#define LSB_PRIVATE_MASK_LO_OFFSET	0x20
+#define LSB_PRIVATE_MASK_HI_OFFSET	0x24
+
+#define CMD_Q_CONTROL_BASE		0x0000
+#define CMD_Q_TAIL_LO_BASE		0x0004
+#define CMD_Q_HEAD_LO_BASE		0x0008
+#define CMD_Q_INT_ENABLE_BASE		0x000C
+#define CMD_Q_INTERRUPT_STATUS_BASE	0x0010
+
+#define CMD_Q_STATUS_BASE		0x0100
+#define CMD_Q_INT_STATUS_BASE		0x0104
+
+#define	CMD_CONFIG_0_OFFSET		0x6000
+#define	CMD_TRNG_CTL_OFFSET		0x6008
+#define	CMD_AES_MASK_OFFSET		0x6010
+#define	CMD_CLK_GATE_CTL_OFFSET		0x603C
+
+/* Address offset between two virtual queue registers */
+#define CMD_Q_STATUS_INCR		0x1000
+
+/* Bit masks */
+#define CMD_Q_RUN			0x1
+#define CMD_Q_SIZE			0x1F
+#define CMD_Q_SHIFT			3
+#define COMMANDS_PER_QUEUE		2048
+
+#define QUEUE_SIZE_VAL                  ((ffs(COMMANDS_PER_QUEUE) - 2) & \
+					 CMD_Q_SIZE)
+#define Q_DESC_SIZE                     sizeof(struct ccp_desc)
+#define Q_SIZE(n)                       (COMMANDS_PER_QUEUE*(n))
+
+#define INT_COMPLETION                  0x1
+#define INT_ERROR                       0x2
+#define INT_QUEUE_STOPPED               0x4
+#define ALL_INTERRUPTS                  (INT_COMPLETION| \
+					 INT_ERROR| \
+					 INT_QUEUE_STOPPED)
+
+#define LSB_REGION_WIDTH                5
+#define MAX_LSB_CNT                     8
+
+#define LSB_SIZE                        16
+#define LSB_ITEM_SIZE                   32
+#define SLSB_MAP_SIZE                   (MAX_LSB_CNT * LSB_SIZE)
+
+/* bitmap */
+enum {
+	BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT
+};
+
+#define WORD_OFFSET(b) ((b) / BITS_PER_WORD)
+#define BIT_OFFSET(b)  ((b) % BITS_PER_WORD)
+
+#define CCP_DIV_ROUND_UP(n, d)  (((n) + (d) - 1) / (d))
+#define CCP_BITMAP_SIZE(nr) \
+	CCP_DIV_ROUND_UP(nr, CHAR_BIT * sizeof(unsigned long))
+
+#define CCP_BITMAP_FIRST_WORD_MASK(start) \
+	(~0UL << ((start) & (BITS_PER_WORD - 1)))
+#define CCP_BITMAP_LAST_WORD_MASK(nbits) \
+	(~0UL >> (-(nbits) & (BITS_PER_WORD - 1)))
+
+#define __ccp_round_mask(x, y) ((typeof(x))((y)-1))
+#define ccp_round_down(x, y) ((x) & ~__ccp_round_mask(x, y))
+
+/** CCP registers Write/Read */
+
+static inline void ccp_pci_reg_write(void *base, int offset,
+				     uint32_t value)
+{
+	volatile void *reg_addr = ((uint8_t *)base + offset);
+
+	rte_write32((rte_cpu_to_le_32(value)), reg_addr);
+}
+
+static inline uint32_t ccp_pci_reg_read(void *base, int offset)
+{
+	volatile void *reg_addr = ((uint8_t *)base + offset);
+
+	return rte_le_to_cpu_32(rte_read32(reg_addr));
+}
+
+#define CCP_READ_REG(hw_addr, reg_offset) \
+	ccp_pci_reg_read(hw_addr, reg_offset)
+
+#define CCP_WRITE_REG(hw_addr, reg_offset, value) \
+	ccp_pci_reg_write(hw_addr, reg_offset, value)
+
+TAILQ_HEAD(ccp_list, ccp_device);
+
+extern struct ccp_list ccp_list;
+
+/**
+ * CCP device version
+ */
+enum ccp_device_version {
+	CCP_VERSION_5A = 0,
+	CCP_VERSION_5B,
+};
+
+/**
+ * A structure describing a CCP command queue.
+ */
+struct ccp_queue {
+	struct ccp_device *dev;
+	char memz_name[RTE_MEMZONE_NAMESIZE];
+
+	rte_atomic64_t free_slots;
+	/**< available free slots updated from enq/deq calls */
+
+	/* Queue identifier */
+	uint64_t id;	/**< queue id */
+	uint64_t qidx;	/**< queue index */
+	uint64_t qsize;	/**< queue size */
+
+	/* Queue address */
+	struct ccp_desc *qbase_desc;
+	void *qbase_addr;
+	phys_addr_t qbase_phys_addr;
+	/**< queue-page registers addr */
+	void *reg_base;
+
+	uint32_t qcontrol;
+	/**< queue ctrl reg */
+
+	int lsb;
+	/**< lsb region assigned to queue */
+	unsigned long lsbmask;
+	/**< lsb regions queue can access */
+	unsigned long lsbmap[CCP_BITMAP_SIZE(LSB_SIZE)];
+	/**< all lsb resources which queue is using */
+	uint32_t sb_key;
+	/**< lsb assigned for queue */
+	uint32_t sb_iv;
+	/**< lsb assigned for iv */
+	uint32_t sb_sha;
+	/**< lsb assigned for sha ctx */
+	uint32_t sb_hmac;
+	/**< lsb assigned for hmac ctx */
+} ____cacheline_aligned;
+
+/**
+ * A structure describing a CCP device.
+ */
+struct ccp_device {
+	TAILQ_ENTRY(ccp_device) next;
+	int id;
+	/**< ccp dev id on platform */
+	struct ccp_queue cmd_q[MAX_HW_QUEUES];
+	/**< ccp queue */
+	int cmd_q_count;
+	/**< no. of ccp Queues */
+	struct rte_pci_device pci;
+	/**< ccp pci identifier */
+	unsigned long lsbmap[CCP_BITMAP_SIZE(SLSB_MAP_SIZE)];
+	/**< shared lsb mask of ccp */
+	rte_spinlock_t lsb_lock;
+	/**< protection for shared lsb region allocation */
+	int qidx;
+	/**< current queue index */
+} __rte_cache_aligned;
+
+/**
+ * descriptor for version 5 CPP commands
+ * 8 32-bit words:
+ * word 0: function; engine; control bits
+ * word 1: length of source data
+ * word 2: low 32 bits of source pointer
+ * word 3: upper 16 bits of source pointer; source memory type
+ * word 4: low 32 bits of destination pointer
+ * word 5: upper 16 bits of destination pointer; destination memory
+ * type
+ * word 6: low 32 bits of key pointer
+ * word 7: upper 16 bits of key pointer; key memory type
+ */
+struct dword0 {
+	uint32_t soc:1;
+	uint32_t ioc:1;
+	uint32_t rsvd1:1;
+	uint32_t init:1;
+	uint32_t eom:1;
+	uint32_t function:15;
+	uint32_t engine:4;
+	uint32_t prot:1;
+	uint32_t rsvd2:7;
+};
+
+struct dword3 {
+	uint32_t src_hi:16;
+	uint32_t src_mem:2;
+	uint32_t lsb_cxt_id:8;
+	uint32_t rsvd1:5;
+	uint32_t fixed:1;
+};
+
+union dword4 {
+	uint32_t dst_lo;	/* NON-SHA */
+	uint32_t sha_len_lo;	/* SHA */
+};
+
+union dword5 {
+	struct {
+		uint32_t dst_hi:16;
+		uint32_t dst_mem:2;
+		uint32_t rsvd1:13;
+		uint32_t fixed:1;
+	}
+	fields;
+	uint32_t sha_len_hi;
+};
+
+struct dword7 {
+	uint32_t key_hi:16;
+	uint32_t key_mem:2;
+	uint32_t rsvd1:14;
+};
+
+struct ccp_desc {
+	struct dword0 dw0;
+	uint32_t length;
+	uint32_t src_lo;
+	struct dword3 dw3;
+	union dword4 dw4;
+	union dword5 dw5;
+	uint32_t key_lo;
+	struct dword7 dw7;
+};
+
+static inline uint32_t
+low32_value(unsigned long addr)
+{
+	return ((uint64_t)addr) & 0x0ffffffff;
+}
+
+static inline uint32_t
+high32_value(unsigned long addr)
+{
+	return ((uint64_t)addr >> 32) & 0x00000ffff;
+}
+
+/**
+ * Detect ccp platform and initialize all ccp devices
+ *
+ * @param ccp_id rte_pci_id list for supported CCP devices
+ * @return no. of successfully initialized CCP devices
+ */
+int ccp_probe_devices(const struct rte_pci_id *ccp_id);
+
+#endif /* _CCP_DEV_H_ */
diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c
new file mode 100644
index 0000000..ddf4b49
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pci.c
@@ -0,0 +1,262 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_string_fns.h>
+
+#include "ccp_pci.h"
+
+static const char * const uio_module_names[] = {
+	"igb_uio",
+	"uio_pci_generic",
+};
+
+int
+ccp_check_pci_uio_module(void)
+{
+	FILE *fp;
+	int i;
+	char buf[BUFSIZ];
+
+	fp = fopen(PROC_MODULES, "r");
+	if (fp == NULL)
+		return -1;
+	i = 0;
+	while (uio_module_names[i] != NULL) {
+		while (fgets(buf, sizeof(buf), fp) != NULL) {
+			if (!strncmp(buf, uio_module_names[i],
+				     strlen(uio_module_names[i])))
+				return i;
+		}
+		i++;
+		rewind(fp);
+	}
+	printf("Insert igb_uio or uio_pci_generic kernel module(s)");
+	return -1;/* uio not inserted */
+}
+
+/*
+ * split up a pci address into its constituent parts.
+ */
+int
+ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+			  uint8_t *bus, uint8_t *devid, uint8_t *function)
+{
+	/* first split on ':' */
+	union splitaddr {
+		struct {
+			char *domain;
+			char *bus;
+			char *devid;
+			char *function;
+		};
+		char *str[PCI_FMT_NVAL];
+		/* last element-separator is "." not ":" */
+	} splitaddr;
+
+	char *buf_copy = strndup(buf, bufsize);
+
+	if (buf_copy == NULL)
+		return -1;
+
+	if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':')
+			!= PCI_FMT_NVAL - 1)
+		goto error;
+	/* final split is on '.' between devid and function */
+	splitaddr.function = strchr(splitaddr.devid, '.');
+	if (splitaddr.function == NULL)
+		goto error;
+	*splitaddr.function++ = '\0';
+
+	/* now convert to int values */
+	errno = 0;
+	*domain = (uint8_t)strtoul(splitaddr.domain, NULL, 16);
+	*bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16);
+	*devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16);
+	*function = (uint8_t)strtoul(splitaddr.function, NULL, 10);
+	if (errno != 0)
+		goto error;
+
+	free(buf_copy); /* free the copy made with strdup */
+	return 0;
+error:
+	free(buf_copy);
+	return -1;
+}
+
+int
+ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val)
+{
+	FILE *f;
+	char buf[BUFSIZ];
+	char *end = NULL;
+
+	f = fopen(filename, "r");
+	if (f == NULL)
+		return -1;
+	if (fgets(buf, sizeof(buf), f) == NULL) {
+		fclose(f);
+		return -1;
+	}
+	*val = strtoul(buf, &end, 0);
+	if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
+		fclose(f);
+		return -1;
+	}
+	fclose(f);
+	return 0;
+}
+
+/** IO resource type: */
+#define IORESOURCE_IO         0x00000100
+#define IORESOURCE_MEM        0x00000200
+
+/* parse one line of the "resource" sysfs file (note that the 'line'
+ * string is modified)
+ */
+static int
+ccp_pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr,
+				 uint64_t *end_addr, uint64_t *flags)
+{
+	union pci_resource_info {
+		struct {
+			char *phys_addr;
+			char *end_addr;
+			char *flags;
+		};
+		char *ptrs[PCI_RESOURCE_FMT_NVAL];
+	} res_info;
+
+	if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3)
+		return -1;
+	errno = 0;
+	*phys_addr = strtoull(res_info.phys_addr, NULL, 16);
+	*end_addr = strtoull(res_info.end_addr, NULL, 16);
+	*flags = strtoull(res_info.flags, NULL, 16);
+	if (errno != 0)
+		return -1;
+
+	return 0;
+}
+
+/* parse the "resource" sysfs file */
+int
+ccp_pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
+{
+	FILE *fp;
+	char buf[BUFSIZ];
+	int i;
+	uint64_t phys_addr, end_addr, flags;
+
+	fp = fopen(filename, "r");
+	if (fp == NULL)
+		return -1;
+
+	for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+		if (fgets(buf, sizeof(buf), fp) == NULL)
+			goto error;
+		if (ccp_pci_parse_one_sysfs_resource(buf, sizeof(buf),
+				&phys_addr, &end_addr, &flags) < 0)
+			goto error;
+
+		if (flags & IORESOURCE_MEM) {
+			dev->mem_resource[i].phys_addr = phys_addr;
+			dev->mem_resource[i].len = end_addr - phys_addr + 1;
+			/* not mapped for now */
+			dev->mem_resource[i].addr = NULL;
+		}
+	}
+	fclose(fp);
+	return 0;
+
+error:
+	fclose(fp);
+	return -1;
+}
+
+int
+ccp_find_uio_devname(const char *dirname)
+{
+
+	DIR *dir;
+	struct dirent *e;
+	char dirname_uio[PATH_MAX];
+	unsigned int uio_num;
+	int ret = -1;
+
+	/* depending on kernel version, uio can be located in uio/uioX
+	 * or uio:uioX
+	 */
+	snprintf(dirname_uio, sizeof(dirname_uio), "%s/uio", dirname);
+	dir = opendir(dirname_uio);
+	if (dir == NULL) {
+	/* retry with the parent directory might be different kernel version*/
+		dir = opendir(dirname);
+		if (dir == NULL)
+			return -1;
+	}
+
+	/* take the first file starting with "uio" */
+	while ((e = readdir(dir)) != NULL) {
+		/* format could be uio%d ...*/
+		int shortprefix_len = sizeof("uio") - 1;
+		/* ... or uio:uio%d */
+		int longprefix_len = sizeof("uio:uio") - 1;
+		char *endptr;
+
+		if (strncmp(e->d_name, "uio", 3) != 0)
+			continue;
+
+		/* first try uio%d */
+		errno = 0;
+		uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
+		if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
+			ret = uio_num;
+			break;
+		}
+
+		/* then try uio:uio%d */
+		errno = 0;
+		uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
+		if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
+			ret = uio_num;
+			break;
+		}
+	}
+	closedir(dir);
+	return ret;
+
+
+}
diff --git a/drivers/crypto/ccp/ccp_pci.h b/drivers/crypto/ccp/ccp_pci.h
new file mode 100644
index 0000000..a4c09c8
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pci.h
@@ -0,0 +1,53 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CCP_PCI_H_
+#define _CCP_PCI_H_
+
+#include <stdint.h>
+
+#include <rte_bus_pci.h>
+
+#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
+#define PROC_MODULES "/proc/modules"
+
+int ccp_check_pci_uio_module(void);
+
+int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+			      uint8_t *bus, uint8_t *devid, uint8_t *function);
+
+int ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val);
+
+int ccp_pci_parse_sysfs_resource(const char *filename,
+				 struct rte_pci_device *dev);
+
+int ccp_find_uio_devname(const char *dirname);
+
+#endif /* _CCP_PCI_H_ */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
new file mode 100644
index 0000000..bc4120b
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -0,0 +1,55 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_cryptodev_pmd.h>
+
+struct rte_cryptodev_ops ccp_ops = {
+		.dev_configure		= NULL,
+		.dev_start		= NULL,
+		.dev_stop		= NULL,
+		.dev_close		= NULL,
+
+		.stats_get		= NULL,
+		.stats_reset		= NULL,
+
+		.dev_infos_get		= NULL,
+
+		.queue_pair_setup	= NULL,
+		.queue_pair_release	= NULL,
+		.queue_pair_start	= NULL,
+		.queue_pair_stop	= NULL,
+		.queue_pair_count	= NULL,
+
+		.session_get_size	= NULL,
+		.session_configure	= NULL,
+		.session_clear		= NULL,
+};
+
+struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops;
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
new file mode 100644
index 0000000..f5b6061
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -0,0 +1,82 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CCP_PMD_PRIVATE_H_
+#define _CCP_PMD_PRIVATE_H_
+
+#include <rte_cryptodev.h>
+
+#define CRYPTODEV_NAME_CCP_PMD crypto_ccp
+
+#define CCP_LOG_ERR(fmt, args...) \
+	RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",  \
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+			__func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_CCP_DEBUG
+#define CCP_LOG_INFO(fmt, args...) \
+	RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+			__func__, __LINE__, ## args)
+
+#define CCP_LOG_DBG(fmt, args...) \
+	RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+			__func__, __LINE__, ## args)
+#else
+#define CCP_LOG_INFO(fmt, args...)
+#define CCP_LOG_DBG(fmt, args...)
+#endif
+
+/**< Maximum queue pairs supported by CCP PMD */
+#define CCP_PMD_MAX_QUEUE_PAIRS	1
+#define CCP_NB_MAX_DESCRIPTORS 1024
+#define CCP_MAX_BURST 64
+
+/* private data structure for each CCP crypto device */
+struct ccp_private {
+	unsigned int max_nb_qpairs;	/**< Max number of queue pairs */
+	unsigned int max_nb_sessions;	/**< Max number of sessions */
+	uint8_t crypto_num_dev;		/**< Number of working crypto devices */
+};
+
+/**< device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *ccp_pmd_ops;
+
+uint16_t
+ccp_cpu_pmd_enqueue_burst(void *queue_pair,
+			  struct rte_crypto_op **ops,
+			  uint16_t nb_ops);
+uint16_t
+ccp_cpu_pmd_dequeue_burst(void *queue_pair,
+			  struct rte_crypto_op **ops,
+			  uint16_t nb_ops);
+
+#endif /* _CCP_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index 6fa14bd..cc35a97 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -28,23 +28,170 @@
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <rte_bus_pci.h>
 #include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_config.h>
 #include <rte_cryptodev.h>
 #include <rte_cryptodev_pmd.h>
+#include <rte_pci.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
 
+#include "ccp_dev.h"
+#include "ccp_pmd_private.h"
+
+/**
+ * Global static parameter used to find if CCP device is already initialized.
+ */
+static unsigned int ccp_pmd_init_done;
 uint8_t ccp_cryptodev_driver_id;
 
+static uint16_t
+ccp_pmd_enqueue_burst(void *queue_pair __rte_unused,
+		      struct rte_crypto_op **ops __rte_unused,
+		      uint16_t nb_ops __rte_unused)
+{
+	uint16_t enq_cnt = 0;
+
+	return enq_cnt;
+}
+
+static uint16_t
+ccp_pmd_dequeue_burst(void *queue_pair __rte_unused,
+		      struct rte_crypto_op **ops __rte_unused,
+		      uint16_t nb_ops __rte_unused)
+{
+	uint16_t nb_dequeued = 0;
+
+	return nb_dequeued;
+}
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id ccp_pci_id[] = {
+	{
+		RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */
+	},
+	{
+		RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */
+	},
+	{.device_id = 0},
+};
+
 /** Remove ccp pmd */
 static int
-cryptodev_ccp_remove(struct rte_vdev_device *dev __rte_unused)
+cryptodev_ccp_remove(struct rte_vdev_device *dev)
 {
+	const char *name;
+
+	ccp_pmd_init_done = 0;
+	name = rte_vdev_device_name(dev);
+	if (name == NULL)
+		return -EINVAL;
+
+	RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n",
+			name, rte_socket_id());
+
 	return 0;
 }
 
+/** Create crypto device */
+static int
+cryptodev_ccp_create(const char *name,
+		     struct rte_vdev_device *vdev,
+		     struct rte_cryptodev_pmd_init_params *init_params)
+{
+	struct rte_cryptodev *dev;
+	struct ccp_private *internals;
+	uint8_t cryptodev_cnt = 0;
+
+	if (init_params->name[0] == '\0')
+		snprintf(init_params->name, sizeof(init_params->name),
+				"%s", name);
+
+	dev = rte_cryptodev_pmd_create(init_params->name,
+				       &vdev->device,
+				       init_params);
+	if (dev == NULL) {
+		CCP_LOG_ERR("failed to create cryptodev vdev");
+		goto init_error;
+	}
+
+	cryptodev_cnt = ccp_probe_devices(ccp_pci_id);
+
+	if (cryptodev_cnt == 0) {
+		CCP_LOG_ERR("failed to detect CCP crypto device");
+		goto init_error;
+	}
+
+	printf("CCP : Crypto device count = %d\n", cryptodev_cnt);
+	dev->driver_id = ccp_cryptodev_driver_id;
+
+	/* register rx/tx burst functions for data path */
+	dev->dev_ops = ccp_pmd_ops;
+	dev->enqueue_burst = ccp_pmd_enqueue_burst;
+	dev->dequeue_burst = ccp_pmd_dequeue_burst;
+
+	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+			RTE_CRYPTODEV_FF_HW_ACCELERATED |
+			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+	internals = dev->data->dev_private;
+
+	internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+	internals->max_nb_sessions = init_params->max_nb_sessions;
+	internals->crypto_num_dev = cryptodev_cnt;
+
+	return 0;
+
+init_error:
+	CCP_LOG_ERR("driver %s: %s() failed",
+		    init_params->name, __func__);
+	cryptodev_ccp_remove(vdev);
+
+	return -EFAULT;
+}
+
 /** Probe ccp pmd */
 static int
-cryptodev_ccp_probe(struct rte_vdev_device *vdev __rte_unused)
+cryptodev_ccp_probe(struct rte_vdev_device *vdev)
 {
+	int rc = 0;
+	const char *name;
+	struct rte_cryptodev_pmd_init_params init_params = {
+		"",
+		sizeof(struct ccp_private),
+		rte_socket_id(),
+		CCP_PMD_MAX_QUEUE_PAIRS,
+		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+	};
+	const char *input_args;
+
+	if (ccp_pmd_init_done) {
+		RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
+		return -EFAULT;
+	}
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	input_args = rte_vdev_device_args(vdev);
+	rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
+	init_params.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS;
+
+	RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+			init_params.socket_id);
+	RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n",
+			init_params.max_nb_queue_pairs);
+	RTE_LOG(INFO, PMD, "Max number of sessions = %d\n",
+			init_params.max_nb_sessions);
+
+	rc = cryptodev_ccp_create(name, vdev, &init_params);
+	if (rc)
+		return rc;
+	ccp_pmd_init_done = 1;
 	return 0;
 }
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v3 03/19] crypto/ccp: support basic pmd ops
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
  2018-01-10  9:42     ` [PATCH v3 02/19] crypto/ccp: support ccp device initialization and deintialization Ravi Kumar
@ 2018-01-10  9:42     ` Ravi Kumar
  2018-01-10  9:42     ` [PATCH v3 04/19] crypto/ccp: support session related crypto " Ravi Kumar
                       ` (17 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-10  9:42 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_dev.c         |  9 ++++++
 drivers/crypto/ccp/ccp_dev.h         |  9 ++++++
 drivers/crypto/ccp/ccp_pmd_ops.c     | 61 +++++++++++++++++++++++++++++++++---
 drivers/crypto/ccp/ccp_pmd_private.h | 43 +++++++++++++++++++++++++
 4 files changed, 117 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index 5af2b49..57bccf4 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -52,6 +52,15 @@
 struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
 static int ccp_dev_id;
 
+int
+ccp_dev_start(struct rte_cryptodev *dev)
+{
+	struct ccp_private *priv = dev->data->dev_private;
+
+	priv->last_dev = TAILQ_FIRST(&ccp_list);
+	return 0;
+}
+
 static const struct rte_memzone *
 ccp_queue_dma_zone_reserve(const char *queue_name,
 			   uint32_t queue_size,
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index fe05bf0..b321530 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -104,6 +104,10 @@
 #define LSB_ITEM_SIZE                   32
 #define SLSB_MAP_SIZE                   (MAX_LSB_CNT * LSB_SIZE)
 
+/* General CCP Defines */
+
+#define CCP_SB_BYTES                    32
+
 /* bitmap */
 enum {
 	BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT
@@ -299,6 +303,11 @@ high32_value(unsigned long addr)
 	return ((uint64_t)addr >> 32) & 0x00000ffff;
 }
 
+/*
+ * Start CCP device
+ */
+int ccp_dev_start(struct rte_cryptodev *dev);
+
 /**
  * Detect ccp platform and initialize all ccp devices
  *
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index bc4120b..b6f8c48 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -28,18 +28,69 @@
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <string.h>
+
+#include <rte_common.h>
 #include <rte_cryptodev_pmd.h>
+#include <rte_malloc.h>
+
+#include "ccp_pmd_private.h"
+#include "ccp_dev.h"
+
+static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+ccp_pmd_config(struct rte_cryptodev *dev __rte_unused,
+	       struct rte_cryptodev_config *config __rte_unused)
+{
+	return 0;
+}
+
+static int
+ccp_pmd_start(struct rte_cryptodev *dev)
+{
+	return ccp_dev_start(dev);
+}
+
+static void
+ccp_pmd_stop(struct rte_cryptodev *dev __rte_unused)
+{
+
+}
+
+static int
+ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
+{
+	return 0;
+}
+
+static void
+ccp_pmd_info_get(struct rte_cryptodev *dev,
+		 struct rte_cryptodev_info *dev_info)
+{
+	struct ccp_private *internals = dev->data->dev_private;
+
+	if (dev_info != NULL) {
+		dev_info->driver_id = dev->driver_id;
+		dev_info->feature_flags = dev->feature_flags;
+		dev_info->capabilities = ccp_pmd_capabilities;
+		dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+		dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+	}
+}
 
 struct rte_cryptodev_ops ccp_ops = {
-		.dev_configure		= NULL,
-		.dev_start		= NULL,
-		.dev_stop		= NULL,
-		.dev_close		= NULL,
+		.dev_configure		= ccp_pmd_config,
+		.dev_start		= ccp_pmd_start,
+		.dev_stop		= ccp_pmd_stop,
+		.dev_close		= ccp_pmd_close,
 
 		.stats_get		= NULL,
 		.stats_reset		= NULL,
 
-		.dev_infos_get		= NULL,
+		.dev_infos_get		= ccp_pmd_info_get,
 
 		.queue_pair_setup	= NULL,
 		.queue_pair_release	= NULL,
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
index f5b6061..d2283e8 100644
--- a/drivers/crypto/ccp/ccp_pmd_private.h
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -60,13 +60,56 @@
 #define CCP_NB_MAX_DESCRIPTORS 1024
 #define CCP_MAX_BURST 64
 
+#include "ccp_dev.h"
+
 /* private data structure for each CCP crypto device */
 struct ccp_private {
 	unsigned int max_nb_qpairs;	/**< Max number of queue pairs */
 	unsigned int max_nb_sessions;	/**< Max number of sessions */
 	uint8_t crypto_num_dev;		/**< Number of working crypto devices */
+	struct ccp_device *last_dev;	/**< Last working crypto device */
 };
 
+/* CCP batch info */
+struct ccp_batch_info {
+	struct rte_crypto_op *op[CCP_MAX_BURST];
+	/**< optable populated at enque time from app*/
+	int op_idx;
+	struct ccp_queue *cmd_q;
+	uint16_t opcnt;
+	/**< no. of crypto ops in batch*/
+	int desccnt;
+	/**< no. of ccp queue descriptors*/
+	uint32_t head_offset;
+	/**< ccp queue head tail offsets time of enqueue*/
+	uint32_t tail_offset;
+	uint8_t lsb_buf[CCP_SB_BYTES * CCP_MAX_BURST];
+	phys_addr_t lsb_buf_phys;
+	/**< LSB intermediate buf for passthru */
+	int lsb_buf_idx;
+} __rte_cache_aligned;
+
+/**< CCP crypto queue pair */
+struct ccp_qp {
+	uint16_t id;
+	/**< Queue Pair Identifier */
+	char name[RTE_CRYPTODEV_NAME_LEN];
+	/**< Unique Queue Pair Name */
+	struct rte_ring *processed_pkts;
+	/**< Ring for placing process packets */
+	struct rte_mempool *sess_mp;
+	/**< Session Mempool */
+	struct rte_mempool *batch_mp;
+	/**< Session Mempool for batch info */
+	struct rte_cryptodev_stats qp_stats;
+	/**< Queue pair statistics */
+	struct ccp_batch_info *b_info;
+	/**< Store ops pulled out of queue */
+	struct rte_cryptodev *dev;
+	/**< rte crypto device to which this qp belongs */
+} __rte_cache_aligned;
+
+
 /**< device specific operations function pointer structure */
 extern struct rte_cryptodev_ops *ccp_pmd_ops;
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v3 04/19] crypto/ccp: support session related crypto pmd ops
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
  2018-01-10  9:42     ` [PATCH v3 02/19] crypto/ccp: support ccp device initialization and deintialization Ravi Kumar
  2018-01-10  9:42     ` [PATCH v3 03/19] crypto/ccp: support basic pmd ops Ravi Kumar
@ 2018-01-10  9:42     ` Ravi Kumar
  2018-01-10  9:42     ` [PATCH v3 05/19] crypto/ccp: support queue pair related " Ravi Kumar
                       ` (16 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-10  9:42 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/Makefile      |   3 +-
 drivers/crypto/ccp/ccp_crypto.c  | 229 +++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_crypto.h  | 267 +++++++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_dev.h     | 129 +++++++++++++++++++
 drivers/crypto/ccp/ccp_pmd_ops.c |  61 ++++++++-
 5 files changed, 685 insertions(+), 4 deletions(-)
 create mode 100644 drivers/crypto/ccp/ccp_crypto.c
 create mode 100644 drivers/crypto/ccp/ccp_crypto.h

diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 5e58c31..5241465 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -51,8 +51,9 @@ EXPORT_MAP := rte_pmd_ccp_version.map
 
 # library source files
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_crypto.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
new file mode 100644
index 0000000..c365c0f
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -0,0 +1,229 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "ccp_dev.h"
+#include "ccp_crypto.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+static enum ccp_cmd_order
+ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+{
+	enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
+
+	if (xform == NULL)
+		return res;
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (xform->next == NULL)
+			return CCP_CMD_AUTH;
+		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+			return CCP_CMD_HASH_CIPHER;
+	}
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (xform->next == NULL)
+			return CCP_CMD_CIPHER;
+		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+			return CCP_CMD_CIPHER_HASH;
+	}
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+		return CCP_CMD_COMBINED;
+	return res;
+}
+
+/* configure session */
+static int
+ccp_configure_session_cipher(struct ccp_session *sess,
+			     const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+
+	cipher_xform = &xform->cipher;
+
+	/* set cipher direction */
+	if (cipher_xform->op ==  RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+		sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+	else
+		sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+
+	/* set cipher key */
+	sess->cipher.key_length = cipher_xform->key.length;
+	rte_memcpy(sess->cipher.key, cipher_xform->key.data,
+		   cipher_xform->key.length);
+
+	/* set iv parameters */
+	sess->iv.offset = cipher_xform->iv.offset;
+	sess->iv.length = cipher_xform->iv.length;
+
+	switch (cipher_xform->algo) {
+	default:
+		CCP_LOG_ERR("Unsupported cipher algo");
+		return -1;
+	}
+
+
+	switch (sess->cipher.engine) {
+	default:
+		CCP_LOG_ERR("Invalid CCP Engine");
+		return -ENOTSUP;
+	}
+	return 0;
+}
+
+static int
+ccp_configure_session_auth(struct ccp_session *sess,
+			   const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_auth_xform *auth_xform = NULL;
+
+	auth_xform = &xform->auth;
+
+	sess->auth.digest_length = auth_xform->digest_length;
+	if (auth_xform->op ==  RTE_CRYPTO_AUTH_OP_GENERATE)
+		sess->auth.op = CCP_AUTH_OP_GENERATE;
+	else
+		sess->auth.op = CCP_AUTH_OP_VERIFY;
+	switch (auth_xform->algo) {
+	default:
+		CCP_LOG_ERR("Unsupported hash algo");
+		return -ENOTSUP;
+	}
+	return 0;
+}
+
+static int
+ccp_configure_session_aead(struct ccp_session *sess,
+			   const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_aead_xform *aead_xform = NULL;
+
+	aead_xform = &xform->aead;
+
+	sess->cipher.key_length = aead_xform->key.length;
+	rte_memcpy(sess->cipher.key, aead_xform->key.data,
+		   aead_xform->key.length);
+
+	if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+		sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+		sess->auth.op = CCP_AUTH_OP_GENERATE;
+	} else {
+		sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+		sess->auth.op = CCP_AUTH_OP_VERIFY;
+	}
+	sess->auth.aad_length = aead_xform->aad_length;
+	sess->auth.digest_length = aead_xform->digest_length;
+
+	/* set iv parameters */
+	sess->iv.offset = aead_xform->iv.offset;
+	sess->iv.length = aead_xform->iv.length;
+
+	switch (aead_xform->algo) {
+	default:
+		CCP_LOG_ERR("Unsupported aead algo");
+		return -ENOTSUP;
+	}
+	return 0;
+}
+
+int
+ccp_set_session_parameters(struct ccp_session *sess,
+			   const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_sym_xform *cipher_xform = NULL;
+	const struct rte_crypto_sym_xform *auth_xform = NULL;
+	const struct rte_crypto_sym_xform *aead_xform = NULL;
+	int ret = 0;
+
+	sess->cmd_id = ccp_get_cmd_id(xform);
+
+	switch (sess->cmd_id) {
+	case CCP_CMD_CIPHER:
+		cipher_xform = xform;
+		break;
+	case CCP_CMD_AUTH:
+		auth_xform = xform;
+		break;
+	case CCP_CMD_CIPHER_HASH:
+		cipher_xform = xform;
+		auth_xform = xform->next;
+		break;
+	case CCP_CMD_HASH_CIPHER:
+		auth_xform = xform;
+		cipher_xform = xform->next;
+		break;
+	case CCP_CMD_COMBINED:
+		aead_xform = xform;
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported cmd_id");
+		return -1;
+	}
+
+	/* Default IV length = 0 */
+	sess->iv.length = 0;
+	if (cipher_xform) {
+		ret = ccp_configure_session_cipher(sess, cipher_xform);
+		if (ret != 0) {
+			CCP_LOG_ERR("Invalid/unsupported cipher parameters");
+			return ret;
+		}
+	}
+	if (auth_xform) {
+		ret = ccp_configure_session_auth(sess, auth_xform);
+		if (ret != 0) {
+			CCP_LOG_ERR("Invalid/unsupported auth parameters");
+			return ret;
+		}
+	}
+	if (aead_xform) {
+		ret = ccp_configure_session_aead(sess, aead_xform);
+		if (ret != 0) {
+			CCP_LOG_ERR("Invalid/unsupported aead parameters");
+			return ret;
+		}
+	}
+	return ret;
+}
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
new file mode 100644
index 0000000..346d5ee
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -0,0 +1,267 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CCP_CRYPTO_H_
+#define _CCP_CRYPTO_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+#include "ccp_dev.h"
+
+#define CCP_SHA3_CTX_SIZE 200
+/**
+ * CCP supported AES modes
+ */
+enum ccp_aes_mode {
+	CCP_AES_MODE_ECB = 0,
+	CCP_AES_MODE_CBC,
+	CCP_AES_MODE_OFB,
+	CCP_AES_MODE_CFB,
+	CCP_AES_MODE_CTR,
+	CCP_AES_MODE_CMAC,
+	CCP_AES_MODE_GHASH,
+	CCP_AES_MODE_GCTR,
+	CCP_AES_MODE__LAST,
+};
+
+/**
+ * CCP AES GHASH mode
+ */
+enum ccp_aes_ghash_mode {
+	CCP_AES_MODE_GHASH_AAD = 0,
+	CCP_AES_MODE_GHASH_FINAL
+};
+
+/**
+ * CCP supported AES types
+ */
+enum ccp_aes_type {
+	CCP_AES_TYPE_128 = 0,
+	CCP_AES_TYPE_192,
+	CCP_AES_TYPE_256,
+	CCP_AES_TYPE__LAST,
+};
+
+/***** 3DES engine *****/
+
+/**
+ * CCP supported DES/3DES modes
+ */
+enum ccp_des_mode {
+	CCP_DES_MODE_ECB = 0, /* Not supported */
+	CCP_DES_MODE_CBC,
+	CCP_DES_MODE_CFB,
+};
+
+/**
+ * CCP supported DES types
+ */
+enum ccp_des_type {
+	CCP_DES_TYPE_128 = 0,	/* 112 + 16 parity */
+	CCP_DES_TYPE_192,	/* 168 + 24 parity */
+	CCP_DES_TYPE__LAST,
+};
+
+/***** SHA engine *****/
+
+/**
+ * ccp_sha_type - type of SHA operation
+ *
+ * @CCP_SHA_TYPE_1: SHA-1 operation
+ * @CCP_SHA_TYPE_224: SHA-224 operation
+ * @CCP_SHA_TYPE_256: SHA-256 operation
+ */
+enum ccp_sha_type {
+	CCP_SHA_TYPE_1 = 1,
+	CCP_SHA_TYPE_224,
+	CCP_SHA_TYPE_256,
+	CCP_SHA_TYPE_384,
+	CCP_SHA_TYPE_512,
+	CCP_SHA_TYPE_RSVD1,
+	CCP_SHA_TYPE_RSVD2,
+	CCP_SHA3_TYPE_224,
+	CCP_SHA3_TYPE_256,
+	CCP_SHA3_TYPE_384,
+	CCP_SHA3_TYPE_512,
+	CCP_SHA_TYPE__LAST,
+};
+
+/**
+ * CCP supported cipher algorithms
+ */
+enum ccp_cipher_algo {
+	CCP_CIPHER_ALGO_AES_CBC = 0,
+	CCP_CIPHER_ALGO_AES_ECB,
+	CCP_CIPHER_ALGO_AES_CTR,
+	CCP_CIPHER_ALGO_AES_GCM,
+	CCP_CIPHER_ALGO_3DES_CBC,
+};
+
+/**
+ * CCP cipher operation type
+ */
+enum ccp_cipher_dir {
+	CCP_CIPHER_DIR_DECRYPT = 0,
+	CCP_CIPHER_DIR_ENCRYPT = 1,
+};
+
+/**
+ * CCP supported hash algorithms
+ */
+enum ccp_hash_algo {
+	CCP_AUTH_ALGO_SHA1 = 0,
+	CCP_AUTH_ALGO_SHA1_HMAC,
+	CCP_AUTH_ALGO_SHA224,
+	CCP_AUTH_ALGO_SHA224_HMAC,
+	CCP_AUTH_ALGO_SHA3_224,
+	CCP_AUTH_ALGO_SHA3_224_HMAC,
+	CCP_AUTH_ALGO_SHA256,
+	CCP_AUTH_ALGO_SHA256_HMAC,
+	CCP_AUTH_ALGO_SHA3_256,
+	CCP_AUTH_ALGO_SHA3_256_HMAC,
+	CCP_AUTH_ALGO_SHA384,
+	CCP_AUTH_ALGO_SHA384_HMAC,
+	CCP_AUTH_ALGO_SHA3_384,
+	CCP_AUTH_ALGO_SHA3_384_HMAC,
+	CCP_AUTH_ALGO_SHA512,
+	CCP_AUTH_ALGO_SHA512_HMAC,
+	CCP_AUTH_ALGO_SHA3_512,
+	CCP_AUTH_ALGO_SHA3_512_HMAC,
+	CCP_AUTH_ALGO_AES_CMAC,
+	CCP_AUTH_ALGO_AES_GCM,
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	CCP_AUTH_ALGO_MD5_HMAC,
+#endif
+};
+
+/**
+ * CCP hash operation type
+ */
+enum ccp_hash_op {
+	CCP_AUTH_OP_GENERATE = 0,
+	CCP_AUTH_OP_VERIFY = 1,
+};
+
+/* CCP crypto private session structure */
+struct ccp_session {
+	enum ccp_cmd_order cmd_id;
+	/**< chain order mode */
+	struct {
+		uint16_t length;
+		uint16_t offset;
+	} iv;
+	/**< IV parameters */
+	struct {
+		enum ccp_cipher_algo  algo;
+		enum ccp_engine  engine;
+		union {
+			enum ccp_aes_mode aes_mode;
+			enum ccp_des_mode des_mode;
+		} um;
+		union {
+			enum ccp_aes_type aes_type;
+			enum ccp_des_type des_type;
+		} ut;
+		enum ccp_cipher_dir dir;
+		uint64_t key_length;
+		/**< max cipher key size 256 bits */
+		uint8_t key[32];
+		/**ccp key format*/
+		uint8_t key_ccp[32];
+		phys_addr_t key_phys;
+		/**AES-ctr nonce(4) iv(8) ctr*/
+		uint8_t nonce[32];
+		phys_addr_t nonce_phys;
+	} cipher;
+	/**< Cipher Parameters */
+
+	struct {
+		enum ccp_hash_algo algo;
+		enum ccp_engine  engine;
+		union {
+			enum ccp_aes_mode aes_mode;
+		} um;
+		union {
+			enum ccp_sha_type sha_type;
+			enum ccp_aes_type aes_type;
+		} ut;
+		enum ccp_hash_op op;
+		uint64_t key_length;
+		/**< max hash key size 144 bytes (struct capabilties) */
+		uint8_t key[144];
+		/**< max be key size of AES is 32*/
+		uint8_t key_ccp[32];
+		phys_addr_t key_phys;
+		uint64_t digest_length;
+		void *ctx;
+		int ctx_len;
+		int offset;
+		int block_size;
+		/**< Buffer to store  Software generated precomute values*/
+		/**< For HMAC H(ipad ^ key) and H(opad ^ key) */
+		/**< For CMAC K1 IV and K2 IV*/
+		uint8_t pre_compute[2 * CCP_SHA3_CTX_SIZE];
+		/**< SHA3 initial ctx all zeros*/
+		uint8_t sha3_ctx[200];
+		int aad_length;
+	} auth;
+	/**< Authentication Parameters */
+	enum rte_crypto_aead_algorithm aead_algo;
+	/**< AEAD Algorithm */
+
+	uint32_t reserved;
+} __rte_cache_aligned;
+
+extern uint8_t ccp_cryptodev_driver_id;
+
+struct ccp_qp;
+
+/**
+ * Set and validate CCP crypto session parameters
+ *
+ * @param sess ccp private session
+ * @param xform crypto xform for this session
+ * @return 0 on success otherwise -1
+ */
+int ccp_set_session_parameters(struct ccp_session *sess,
+			       const struct rte_crypto_sym_xform *xform);
+
+#endif /* _CCP_CRYPTO_H_ */
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index b321530..a16ba81 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -225,6 +225,123 @@ struct ccp_device {
 	/**< current queue index */
 } __rte_cache_aligned;
 
+/**< CCP H/W engine related */
+/**
+ * ccp_engine - CCP operation identifiers
+ *
+ * @CCP_ENGINE_AES: AES operation
+ * @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation
+ * @CCP_ENGINE_3DES: DES/3DES operation
+ * @CCP_ENGINE_SHA: SHA operation
+ * @CCP_ENGINE_RSA: RSA operation
+ * @CCP_ENGINE_PASSTHRU: pass-through operation
+ * @CCP_ENGINE_ZLIB_DECOMPRESS: unused
+ * @CCP_ENGINE_ECC: ECC operation
+ */
+enum ccp_engine {
+	CCP_ENGINE_AES = 0,
+	CCP_ENGINE_XTS_AES_128,
+	CCP_ENGINE_3DES,
+	CCP_ENGINE_SHA,
+	CCP_ENGINE_RSA,
+	CCP_ENGINE_PASSTHRU,
+	CCP_ENGINE_ZLIB_DECOMPRESS,
+	CCP_ENGINE_ECC,
+	CCP_ENGINE__LAST,
+};
+
+/* Passthru engine */
+/**
+ * ccp_passthru_bitwise - type of bitwise passthru operation
+ *
+ * @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed
+ * @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask
+ * @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask
+ * @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask
+ * @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask
+ */
+enum ccp_passthru_bitwise {
+	CCP_PASSTHRU_BITWISE_NOOP = 0,
+	CCP_PASSTHRU_BITWISE_AND,
+	CCP_PASSTHRU_BITWISE_OR,
+	CCP_PASSTHRU_BITWISE_XOR,
+	CCP_PASSTHRU_BITWISE_MASK,
+	CCP_PASSTHRU_BITWISE__LAST,
+};
+
+/**
+ * ccp_passthru_byteswap - type of byteswap passthru operation
+ *
+ * @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed
+ * @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words
+ * @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words
+ */
+enum ccp_passthru_byteswap {
+	CCP_PASSTHRU_BYTESWAP_NOOP = 0,
+	CCP_PASSTHRU_BYTESWAP_32BIT,
+	CCP_PASSTHRU_BYTESWAP_256BIT,
+	CCP_PASSTHRU_BYTESWAP__LAST,
+};
+
+/**
+ * CCP passthru
+ */
+struct ccp_passthru {
+	phys_addr_t src_addr;
+	phys_addr_t dest_addr;
+	enum ccp_passthru_bitwise bit_mod;
+	enum ccp_passthru_byteswap byte_swap;
+	int len;
+	int dir;
+};
+
+/* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
+union ccp_function {
+	struct {
+		uint16_t size:7;
+		uint16_t encrypt:1;
+		uint16_t mode:5;
+		uint16_t type:2;
+	} aes;
+	struct {
+		uint16_t size:7;
+		uint16_t encrypt:1;
+		uint16_t mode:5;
+		uint16_t type:2;
+	} des;
+	struct {
+		uint16_t size:7;
+		uint16_t encrypt:1;
+		uint16_t rsvd:5;
+		uint16_t type:2;
+	} aes_xts;
+	struct {
+		uint16_t rsvd1:10;
+		uint16_t type:4;
+		uint16_t rsvd2:1;
+	} sha;
+	struct {
+		uint16_t mode:3;
+		uint16_t size:12;
+	} rsa;
+	struct {
+		uint16_t byteswap:2;
+		uint16_t bitwise:3;
+		uint16_t reflect:2;
+		uint16_t rsvd:8;
+	} pt;
+	struct  {
+		uint16_t rsvd:13;
+	} zlib;
+	struct {
+		uint16_t size:10;
+		uint16_t type:2;
+		uint16_t mode:3;
+	} ecc;
+	uint16_t raw;
+};
+
+
 /**
  * descriptor for version 5 CPP commands
  * 8 32-bit words:
@@ -291,6 +408,18 @@ struct ccp_desc {
 	struct dword7 dw7;
 };
 
+/**
+ * cmd id to follow order
+ */
+enum ccp_cmd_order {
+	CCP_CMD_CIPHER = 0,
+	CCP_CMD_AUTH,
+	CCP_CMD_CIPHER_HASH,
+	CCP_CMD_HASH_CIPHER,
+	CCP_CMD_COMBINED,
+	CCP_CMD_NOT_SUPPORTED,
+};
+
 static inline uint32_t
 low32_value(unsigned long addr)
 {
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index b6f8c48..ad0a670 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -36,6 +36,7 @@
 
 #include "ccp_pmd_private.h"
 #include "ccp_dev.h"
+#include "ccp_crypto.h"
 
 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
@@ -81,6 +82,60 @@ ccp_pmd_info_get(struct rte_cryptodev *dev,
 	}
 }
 
+static unsigned
+ccp_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct ccp_session);
+}
+
+static int
+ccp_pmd_session_configure(struct rte_cryptodev *dev,
+			  struct rte_crypto_sym_xform *xform,
+			  struct rte_cryptodev_sym_session *sess,
+			  struct rte_mempool *mempool)
+{
+	int ret;
+	void *sess_private_data;
+
+	if (unlikely(sess == NULL || xform == NULL)) {
+		CCP_LOG_ERR("Invalid session struct or xform");
+		return -ENOMEM;
+	}
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		CCP_LOG_ERR("Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+	ret = ccp_set_session_parameters(sess_private_data, xform);
+	if (ret != 0) {
+		CCP_LOG_ERR("failed configure session parameters");
+
+		/* Return session to mempool */
+		rte_mempool_put(mempool, sess_private_data);
+		return ret;
+	}
+	set_session_private_data(sess, dev->driver_id,
+				 sess_private_data);
+
+	return 0;
+}
+
+static void
+ccp_pmd_session_clear(struct rte_cryptodev *dev,
+		      struct rte_cryptodev_sym_session *sess)
+{
+	uint8_t index = dev->driver_id;
+	void *sess_priv = get_session_private_data(sess, index);
+
+	if (sess_priv) {
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+		rte_mempool_put(sess_mp, sess_priv);
+		memset(sess_priv, 0, sizeof(struct ccp_session));
+		set_session_private_data(sess, index, NULL);
+	}
+}
+
 struct rte_cryptodev_ops ccp_ops = {
 		.dev_configure		= ccp_pmd_config,
 		.dev_start		= ccp_pmd_start,
@@ -98,9 +153,9 @@ struct rte_cryptodev_ops ccp_ops = {
 		.queue_pair_stop	= NULL,
 		.queue_pair_count	= NULL,
 
-		.session_get_size	= NULL,
-		.session_configure	= NULL,
-		.session_clear		= NULL,
+		.session_get_size	= ccp_pmd_session_get_size,
+		.session_configure	= ccp_pmd_session_configure,
+		.session_clear		= ccp_pmd_session_clear,
 };
 
 struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v3 05/19] crypto/ccp: support queue pair related pmd ops
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
                       ` (2 preceding siblings ...)
  2018-01-10  9:42     ` [PATCH v3 04/19] crypto/ccp: support session related crypto " Ravi Kumar
@ 2018-01-10  9:42     ` Ravi Kumar
  2018-01-10  9:42     ` [PATCH v3 06/19] crypto/ccp: support crypto enqueue and dequeue burst api Ravi Kumar
                       ` (15 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-10  9:42 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_pmd_ops.c | 149 +++++++++++++++++++++++++++++++++++++--
 1 file changed, 144 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index ad0a670..a02aa6f 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -82,6 +82,145 @@ ccp_pmd_info_get(struct rte_cryptodev *dev,
 	}
 }
 
+static int
+ccp_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+	struct ccp_qp *qp;
+
+	if (dev->data->queue_pairs[qp_id] != NULL) {
+		qp = (struct ccp_qp *)dev->data->queue_pairs[qp_id];
+		rte_ring_free(qp->processed_pkts);
+		rte_mempool_free(qp->batch_mp);
+		rte_free(qp);
+		dev->data->queue_pairs[qp_id] = NULL;
+	}
+	return 0;
+}
+
+static int
+ccp_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+		struct ccp_qp *qp)
+{
+	unsigned int n = snprintf(qp->name, sizeof(qp->name),
+			"ccp_pmd_%u_qp_%u",
+			dev->data->dev_id, qp->id);
+
+	if (n > sizeof(qp->name))
+		return -1;
+
+	return 0;
+}
+
+static struct rte_ring *
+ccp_pmd_qp_create_batch_info_ring(struct ccp_qp *qp,
+				  unsigned int ring_size, int socket_id)
+{
+	struct rte_ring *r;
+
+	r = rte_ring_lookup(qp->name);
+	if (r) {
+		if (r->size >= ring_size) {
+			CCP_LOG_INFO(
+				"Reusing ring %s for processed packets",
+				qp->name);
+			return r;
+		}
+		CCP_LOG_INFO(
+			"Unable to reuse ring %s for processed packets",
+			 qp->name);
+		return NULL;
+	}
+
+	return rte_ring_create(qp->name, ring_size, socket_id,
+			RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+static int
+ccp_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+		 const struct rte_cryptodev_qp_conf *qp_conf,
+		 int socket_id, struct rte_mempool *session_pool)
+{
+	struct ccp_private *internals = dev->data->dev_private;
+	struct ccp_qp *qp;
+	int retval = 0;
+
+	if (qp_id >= internals->max_nb_qpairs) {
+		CCP_LOG_ERR("Invalid qp_id %u, should be less than %u",
+			    qp_id, internals->max_nb_qpairs);
+		return (-EINVAL);
+	}
+
+	/* Free memory prior to re-allocation if needed. */
+	if (dev->data->queue_pairs[qp_id] != NULL)
+		ccp_pmd_qp_release(dev, qp_id);
+
+	/* Allocate the queue pair data structure. */
+	qp = rte_zmalloc_socket("CCP Crypto PMD Queue Pair", sizeof(*qp),
+					RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp == NULL) {
+		CCP_LOG_ERR("Failed to allocate queue pair memory");
+		return (-ENOMEM);
+	}
+
+	qp->dev = dev;
+	qp->id = qp_id;
+	dev->data->queue_pairs[qp_id] = qp;
+
+	retval = ccp_pmd_qp_set_unique_name(dev, qp);
+	if (retval) {
+		CCP_LOG_ERR("Failed to create unique name for ccp qp");
+		goto qp_setup_cleanup;
+	}
+
+	qp->processed_pkts = ccp_pmd_qp_create_batch_info_ring(qp,
+			qp_conf->nb_descriptors, socket_id);
+	if (qp->processed_pkts == NULL) {
+		CCP_LOG_ERR("Failed to create batch info ring");
+		goto qp_setup_cleanup;
+	}
+
+	qp->sess_mp = session_pool;
+
+	/* mempool for batch info */
+	qp->batch_mp = rte_mempool_create(
+				qp->name,
+				qp_conf->nb_descriptors,
+				sizeof(struct ccp_batch_info),
+				RTE_CACHE_LINE_SIZE,
+				0, NULL, NULL, NULL, NULL,
+				SOCKET_ID_ANY, 0);
+	if (qp->batch_mp == NULL)
+		goto qp_setup_cleanup;
+	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	return 0;
+
+qp_setup_cleanup:
+	dev->data->queue_pairs[qp_id] = NULL;
+	if (qp)
+		rte_free(qp);
+	return -1;
+}
+
+static int
+ccp_pmd_qp_start(struct rte_cryptodev *dev __rte_unused,
+		 uint16_t queue_pair_id __rte_unused)
+{
+	return -ENOTSUP;
+}
+
+static int
+ccp_pmd_qp_stop(struct rte_cryptodev *dev __rte_unused,
+		uint16_t queue_pair_id __rte_unused)
+{
+	return -ENOTSUP;
+}
+
+static uint32_t
+ccp_pmd_qp_count(struct rte_cryptodev *dev)
+{
+	return dev->data->nb_queue_pairs;
+}
+
 static unsigned
 ccp_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
 {
@@ -147,11 +286,11 @@ struct rte_cryptodev_ops ccp_ops = {
 
 		.dev_infos_get		= ccp_pmd_info_get,
 
-		.queue_pair_setup	= NULL,
-		.queue_pair_release	= NULL,
-		.queue_pair_start	= NULL,
-		.queue_pair_stop	= NULL,
-		.queue_pair_count	= NULL,
+		.queue_pair_setup	= ccp_pmd_qp_setup,
+		.queue_pair_release	= ccp_pmd_qp_release,
+		.queue_pair_start	= ccp_pmd_qp_start,
+		.queue_pair_stop	= ccp_pmd_qp_stop,
+		.queue_pair_count	= ccp_pmd_qp_count,
 
 		.session_get_size	= ccp_pmd_session_get_size,
 		.session_configure	= ccp_pmd_session_configure,
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v3 06/19] crypto/ccp: support crypto enqueue and dequeue burst api
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
                       ` (3 preceding siblings ...)
  2018-01-10  9:42     ` [PATCH v3 05/19] crypto/ccp: support queue pair related " Ravi Kumar
@ 2018-01-10  9:42     ` Ravi Kumar
  2018-01-10  9:42     ` [PATCH v3 07/19] crypto/ccp: support for RTE_CRYPTO_OP_SESSIONLESS Ravi Kumar
                       ` (14 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-10  9:42 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 360 +++++++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_crypto.h  |  35 ++++
 drivers/crypto/ccp/ccp_dev.c     |  27 +++
 drivers/crypto/ccp/ccp_dev.h     |   9 +
 drivers/crypto/ccp/rte_ccp_pmd.c |  64 ++++++-
 5 files changed, 488 insertions(+), 7 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index c365c0f..c17e84f 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -227,3 +227,363 @@ ccp_set_session_parameters(struct ccp_session *sess,
 	}
 	return ret;
 }
+
+/* calculate CCP descriptors requirement */
+static inline int
+ccp_cipher_slot(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->cipher.algo) {
+	default:
+		CCP_LOG_ERR("Unsupported cipher algo %d",
+			    session->cipher.algo);
+	}
+	return count;
+}
+
+static inline int
+ccp_auth_slot(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->auth.algo) {
+	default:
+		CCP_LOG_ERR("Unsupported auth algo %d",
+			    session->auth.algo);
+	}
+
+	return count;
+}
+
+static int
+ccp_aead_slot(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->aead_algo) {
+	default:
+		CCP_LOG_ERR("Unsupported aead algo %d",
+			    session->aead_algo);
+	}
+	return count;
+}
+
+int
+ccp_compute_slot_count(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->cmd_id) {
+	case CCP_CMD_CIPHER:
+		count = ccp_cipher_slot(session);
+		break;
+	case CCP_CMD_AUTH:
+		count = ccp_auth_slot(session);
+		break;
+	case CCP_CMD_CIPHER_HASH:
+	case CCP_CMD_HASH_CIPHER:
+		count = ccp_cipher_slot(session);
+		count += ccp_auth_slot(session);
+		break;
+	case CCP_CMD_COMBINED:
+		count = ccp_aead_slot(session);
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported cmd_id");
+
+	}
+
+	return count;
+}
+
+static inline int
+ccp_crypto_cipher(struct rte_crypto_op *op,
+		  struct ccp_queue *cmd_q __rte_unused,
+		  struct ccp_batch_info *b_info __rte_unused)
+{
+	int result = 0;
+	struct ccp_session *session;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 ccp_cryptodev_driver_id);
+
+	switch (session->cipher.algo) {
+	default:
+		CCP_LOG_ERR("Unsupported cipher algo %d",
+			    session->cipher.algo);
+		return -ENOTSUP;
+	}
+	return result;
+}
+
+static inline int
+ccp_crypto_auth(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q __rte_unused,
+		struct ccp_batch_info *b_info __rte_unused)
+{
+
+	int result = 0;
+	struct ccp_session *session;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	switch (session->auth.algo) {
+	default:
+		CCP_LOG_ERR("Unsupported auth algo %d",
+			    session->auth.algo);
+		return -ENOTSUP;
+	}
+
+	return result;
+}
+
+static inline int
+ccp_crypto_aead(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q __rte_unused,
+		struct ccp_batch_info *b_info __rte_unused)
+{
+	int result = 0;
+	struct ccp_session *session;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	switch (session->aead_algo) {
+	default:
+		CCP_LOG_ERR("Unsupported aead algo %d",
+			    session->aead_algo);
+		return -ENOTSUP;
+	}
+	return result;
+}
+
+int
+process_ops_to_enqueue(const struct ccp_qp *qp,
+		       struct rte_crypto_op **op,
+		       struct ccp_queue *cmd_q,
+		       uint16_t nb_ops,
+		       int slots_req)
+{
+	int i, result = 0;
+	struct ccp_batch_info *b_info;
+	struct ccp_session *session;
+
+	if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
+		CCP_LOG_ERR("batch info allocation failed");
+		return 0;
+	}
+	/* populate batch info necessary for dequeue */
+	b_info->op_idx = 0;
+	b_info->lsb_buf_idx = 0;
+	b_info->desccnt = 0;
+	b_info->cmd_q = cmd_q;
+	b_info->lsb_buf_phys =
+		(phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+	rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
+
+	b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+					 Q_DESC_SIZE);
+	for (i = 0; i < nb_ops; i++) {
+		session = (struct ccp_session *)get_session_private_data(
+						 op[i]->sym->session,
+						 ccp_cryptodev_driver_id);
+		switch (session->cmd_id) {
+		case CCP_CMD_CIPHER:
+			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_AUTH:
+			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_CIPHER_HASH:
+			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+			if (result)
+				break;
+			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_HASH_CIPHER:
+			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+			if (result)
+				break;
+			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_COMBINED:
+			result = ccp_crypto_aead(op[i], cmd_q, b_info);
+			break;
+		default:
+			CCP_LOG_ERR("Unsupported cmd_id");
+			result = -1;
+		}
+		if (unlikely(result < 0)) {
+			rte_atomic64_add(&b_info->cmd_q->free_slots,
+					 (slots_req - b_info->desccnt));
+			break;
+		}
+		b_info->op[i] = op[i];
+	}
+
+	b_info->opcnt = i;
+	b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+					 Q_DESC_SIZE);
+
+	rte_wmb();
+	/* Write the new tail address back to the queue register */
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+			      b_info->tail_offset);
+	/* Turn the queue back on using our cached control register */
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol | CMD_Q_RUN);
+
+	rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
+
+	return i;
+}
+
+static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
+{
+	struct ccp_session *session;
+	uint8_t *digest_data, *addr;
+	struct rte_mbuf *m_last;
+	int offset, digest_offset;
+	uint8_t digest_le[64];
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	if (session->cmd_id == CCP_CMD_COMBINED) {
+		digest_data = op->sym->aead.digest.data;
+		digest_offset = op->sym->aead.data.offset +
+					op->sym->aead.data.length;
+	} else {
+		digest_data = op->sym->auth.digest.data;
+		digest_offset = op->sym->auth.data.offset +
+					op->sym->auth.data.length;
+	}
+	m_last = rte_pktmbuf_lastseg(op->sym->m_src);
+	addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
+			   m_last->data_len - session->auth.ctx_len);
+
+	rte_mb();
+	offset = session->auth.offset;
+
+	if (session->auth.engine == CCP_ENGINE_SHA)
+		if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
+		    (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
+		    (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
+			/* All other algorithms require byte
+			 * swap done by host
+			 */
+			unsigned int i;
+
+			offset = session->auth.ctx_len -
+				session->auth.offset - 1;
+			for (i = 0; i < session->auth.digest_length; i++)
+				digest_le[i] = addr[offset - i];
+			offset = 0;
+			addr = digest_le;
+		}
+
+	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	if (session->auth.op == CCP_AUTH_OP_VERIFY) {
+		if (memcmp(addr + offset, digest_data,
+			   session->auth.digest_length) != 0)
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+	} else {
+		if (unlikely(digest_data == 0))
+			digest_data = rte_pktmbuf_mtod_offset(
+					op->sym->m_dst, uint8_t *,
+					digest_offset);
+		rte_memcpy(digest_data, addr + offset,
+			   session->auth.digest_length);
+	}
+	/* Trim area used for digest from mbuf. */
+	rte_pktmbuf_trim(op->sym->m_src,
+			 session->auth.ctx_len);
+}
+
+static int
+ccp_prepare_ops(struct rte_crypto_op **op_d,
+		struct ccp_batch_info *b_info,
+		uint16_t nb_ops)
+{
+	int i, min_ops;
+	struct ccp_session *session;
+
+	min_ops = RTE_MIN(nb_ops, b_info->opcnt);
+
+	for (i = 0; i < min_ops; i++) {
+		op_d[i] = b_info->op[b_info->op_idx++];
+		session = (struct ccp_session *)get_session_private_data(
+						 op_d[i]->sym->session,
+						ccp_cryptodev_driver_id);
+		switch (session->cmd_id) {
+		case CCP_CMD_CIPHER:
+			op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+			break;
+		case CCP_CMD_AUTH:
+		case CCP_CMD_CIPHER_HASH:
+		case CCP_CMD_HASH_CIPHER:
+		case CCP_CMD_COMBINED:
+			ccp_auth_dq_prepare(op_d[i]);
+			break;
+		default:
+			CCP_LOG_ERR("Unsupported cmd_id");
+		}
+	}
+
+	b_info->opcnt -= min_ops;
+	return min_ops;
+}
+
+int
+process_ops_to_dequeue(struct ccp_qp *qp,
+		       struct rte_crypto_op **op,
+		       uint16_t nb_ops)
+{
+	struct ccp_batch_info *b_info;
+	uint32_t cur_head_offset;
+
+	if (qp->b_info != NULL) {
+		b_info = qp->b_info;
+		if (unlikely(b_info->op_idx > 0))
+			goto success;
+	} else if (rte_ring_dequeue(qp->processed_pkts,
+				    (void **)&b_info))
+		return 0;
+	cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
+				       CMD_Q_HEAD_LO_BASE);
+
+	if (b_info->head_offset < b_info->tail_offset) {
+		if ((cur_head_offset >= b_info->head_offset) &&
+		    (cur_head_offset < b_info->tail_offset)) {
+			qp->b_info = b_info;
+			return 0;
+		}
+	} else {
+		if ((cur_head_offset >= b_info->head_offset) ||
+		    (cur_head_offset < b_info->tail_offset)) {
+			qp->b_info = b_info;
+			return 0;
+		}
+	}
+
+
+success:
+	nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
+	rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
+	b_info->desccnt = 0;
+	if (b_info->opcnt > 0) {
+		qp->b_info = b_info;
+	} else {
+		rte_mempool_put(qp->batch_mp, (void *)b_info);
+		qp->b_info = NULL;
+	}
+
+	return nb_ops;
+}
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 346d5ee..4455497 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -264,4 +264,39 @@ struct ccp_qp;
 int ccp_set_session_parameters(struct ccp_session *sess,
 			       const struct rte_crypto_sym_xform *xform);
 
+/**
+ * Find count of slots
+ *
+ * @param session CCP private session
+ * @return count of free slots available
+ */
+int ccp_compute_slot_count(struct ccp_session *session);
+
+/**
+ * process crypto ops to be enqueued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param cmd_q CCP cmd queue
+ * @param nb_ops No. of ops to be submitted
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_enqueue(const struct ccp_qp *qp,
+			   struct rte_crypto_op **op,
+			   struct ccp_queue *cmd_q,
+			   uint16_t nb_ops,
+			   int slots_req);
+
+/**
+ * process crypto ops to be dequeued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param nb_ops requested no. of ops
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_dequeue(struct ccp_qp *qp,
+			   struct rte_crypto_op **op,
+			   uint16_t nb_ops);
+
 #endif /* _CCP_CRYPTO_H_ */
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index 57bccf4..fee90e3 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -61,6 +61,33 @@ ccp_dev_start(struct rte_cryptodev *dev)
 	return 0;
 }
 
+struct ccp_queue *
+ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
+{
+	int i, ret = 0;
+	struct ccp_device *dev;
+	struct ccp_private *priv = cdev->data->dev_private;
+
+	dev = TAILQ_NEXT(priv->last_dev, next);
+	if (unlikely(dev == NULL))
+		dev = TAILQ_FIRST(&ccp_list);
+	priv->last_dev = dev;
+	if (dev->qidx >= dev->cmd_q_count)
+		dev->qidx = 0;
+	ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+	if (ret >= slot_req)
+		return &dev->cmd_q[dev->qidx];
+	for (i = 0; i < dev->cmd_q_count; i++) {
+		dev->qidx++;
+		if (dev->qidx >= dev->cmd_q_count)
+			dev->qidx = 0;
+		ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+		if (ret >= slot_req)
+			return &dev->cmd_q[dev->qidx];
+	}
+	return NULL;
+}
+
 static const struct rte_memzone *
 ccp_queue_dma_zone_reserve(const char *queue_name,
 			   uint32_t queue_size,
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index a16ba81..cfb3b03 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -445,4 +445,13 @@ int ccp_dev_start(struct rte_cryptodev *dev);
  */
 int ccp_probe_devices(const struct rte_pci_id *ccp_id);
 
+/**
+ * allocate a ccp command queue
+ *
+ * @dev rte crypto device
+ * @param slot_req number of required
+ * @return allotted CCP queue on success otherwise NULL
+ */
+struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
+
 #endif /* _CCP_DEV_H_ */
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index cc35a97..ed6ca5d 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -38,6 +38,7 @@
 #include <rte_dev.h>
 #include <rte_malloc.h>
 
+#include "ccp_crypto.h"
 #include "ccp_dev.h"
 #include "ccp_pmd_private.h"
 
@@ -47,23 +48,72 @@
 static unsigned int ccp_pmd_init_done;
 uint8_t ccp_cryptodev_driver_id;
 
+static struct ccp_session *
+get_ccp_session(struct ccp_qp *qp __rte_unused, struct rte_crypto_op *op)
+{
+	struct ccp_session *sess = NULL;
+
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		if (unlikely(op->sym->session == NULL))
+			return NULL;
+
+		sess = (struct ccp_session *)
+			get_session_private_data(
+				op->sym->session,
+				ccp_cryptodev_driver_id);
+	}
+
+	return sess;
+}
+
 static uint16_t
-ccp_pmd_enqueue_burst(void *queue_pair __rte_unused,
-		      struct rte_crypto_op **ops __rte_unused,
-		      uint16_t nb_ops __rte_unused)
+ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		      uint16_t nb_ops)
 {
-	uint16_t enq_cnt = 0;
+	struct ccp_session *sess = NULL;
+	struct ccp_qp *qp = queue_pair;
+	struct ccp_queue *cmd_q;
+	struct rte_cryptodev *dev = qp->dev;
+	uint16_t i, enq_cnt = 0, slots_req = 0;
+
+	if (nb_ops == 0)
+		return 0;
+
+	if (unlikely(rte_ring_full(qp->processed_pkts) != 0))
+		return 0;
+
+	for (i = 0; i < nb_ops; i++) {
+		sess = get_ccp_session(qp, ops[i]);
+		if (unlikely(sess == NULL) && (i == 0)) {
+			qp->qp_stats.enqueue_err_count++;
+			return 0;
+		} else if (sess == NULL) {
+			nb_ops = i;
+			break;
+		}
+		slots_req += ccp_compute_slot_count(sess);
+	}
+
+	cmd_q = ccp_allot_queue(dev, slots_req);
+	if (unlikely(cmd_q == NULL))
+		return 0;
 
+	enq_cnt = process_ops_to_enqueue(qp, ops, cmd_q, nb_ops, slots_req);
+	qp->qp_stats.enqueued_count += enq_cnt;
 	return enq_cnt;
 }
 
 static uint16_t
-ccp_pmd_dequeue_burst(void *queue_pair __rte_unused,
-		      struct rte_crypto_op **ops __rte_unused,
-		      uint16_t nb_ops __rte_unused)
+ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
 {
+	struct ccp_qp *qp = queue_pair;
 	uint16_t nb_dequeued = 0;
 
+	nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops);
+
+	qp->qp_stats.dequeued_count += nb_dequeued;
+
 	return nb_dequeued;
 }
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v3 07/19] crypto/ccp: support for RTE_CRYPTO_OP_SESSIONLESS
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
                       ` (4 preceding siblings ...)
  2018-01-10  9:42     ` [PATCH v3 06/19] crypto/ccp: support crypto enqueue and dequeue burst api Ravi Kumar
@ 2018-01-10  9:42     ` Ravi Kumar
  2018-01-10  9:42     ` [PATCH v3 08/19] crypto/ccp: support stats related crypto pmd ops Ravi Kumar
                       ` (13 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-10  9:42 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/rte_ccp_pmd.c | 33 +++++++++++++++++++++++++++++++--
 1 file changed, 31 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index ed6ca5d..23d3af3 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -49,7 +49,7 @@ static unsigned int ccp_pmd_init_done;
 uint8_t ccp_cryptodev_driver_id;
 
 static struct ccp_session *
-get_ccp_session(struct ccp_qp *qp __rte_unused, struct rte_crypto_op *op)
+get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op)
 {
 	struct ccp_session *sess = NULL;
 
@@ -61,6 +61,27 @@ get_ccp_session(struct ccp_qp *qp __rte_unused, struct rte_crypto_op *op)
 			get_session_private_data(
 				op->sym->session,
 				ccp_cryptodev_driver_id);
+	} else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		void *_sess;
+		void *_sess_private_data = NULL;
+
+		if (rte_mempool_get(qp->sess_mp, &_sess))
+			return NULL;
+		if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+			return NULL;
+
+		sess = (struct ccp_session *)_sess_private_data;
+
+		if (unlikely(ccp_set_session_parameters(sess,
+							op->sym->xform) != 0)) {
+			rte_mempool_put(qp->sess_mp, _sess);
+			rte_mempool_put(qp->sess_mp, _sess_private_data);
+			sess = NULL;
+		}
+		op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+		set_session_private_data(op->sym->session,
+					 ccp_cryptodev_driver_id,
+					 _sess_private_data);
 	}
 
 	return sess;
@@ -108,10 +129,18 @@ ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
 	struct ccp_qp *qp = queue_pair;
-	uint16_t nb_dequeued = 0;
+	uint16_t nb_dequeued = 0, i;
 
 	nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops);
 
+	/* Free session if a session-less crypto op */
+	for (i = 0; i < nb_dequeued; i++)
+		if (unlikely(ops[i]->sess_type ==
+			     RTE_CRYPTO_OP_SESSIONLESS)) {
+			rte_mempool_put(qp->sess_mp,
+					ops[i]->sym->session);
+			ops[i]->sym->session = NULL;
+		}
 	qp->qp_stats.dequeued_count += nb_dequeued;
 
 	return nb_dequeued;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v3 08/19] crypto/ccp: support stats related crypto pmd ops
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
                       ` (5 preceding siblings ...)
  2018-01-10  9:42     ` [PATCH v3 07/19] crypto/ccp: support for RTE_CRYPTO_OP_SESSIONLESS Ravi Kumar
@ 2018-01-10  9:42     ` Ravi Kumar
  2018-01-10  9:42     ` [PATCH v3 09/19] crypto/ccp: support ccp hwrng feature Ravi Kumar
                       ` (12 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-10  9:42 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_pmd_ops.c | 34 ++++++++++++++++++++++++++++++++--
 1 file changed, 32 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index a02aa6f..d483a74 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -68,6 +68,36 @@ ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
 }
 
 static void
+ccp_pmd_stats_get(struct rte_cryptodev *dev,
+		  struct rte_cryptodev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+		stats->enqueued_count += qp->qp_stats.enqueued_count;
+		stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+	}
+
+}
+
+static void
+ccp_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	}
+}
+
+static void
 ccp_pmd_info_get(struct rte_cryptodev *dev,
 		 struct rte_cryptodev_info *dev_info)
 {
@@ -281,8 +311,8 @@ struct rte_cryptodev_ops ccp_ops = {
 		.dev_stop		= ccp_pmd_stop,
 		.dev_close		= ccp_pmd_close,
 
-		.stats_get		= NULL,
-		.stats_reset		= NULL,
+		.stats_get		= ccp_pmd_stats_get,
+		.stats_reset		= ccp_pmd_stats_reset,
 
 		.dev_infos_get		= ccp_pmd_info_get,
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v3 09/19] crypto/ccp: support ccp hwrng feature
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
                       ` (6 preceding siblings ...)
  2018-01-10  9:42     ` [PATCH v3 08/19] crypto/ccp: support stats related crypto pmd ops Ravi Kumar
@ 2018-01-10  9:42     ` Ravi Kumar
  2018-01-10  9:42     ` [PATCH v3 10/19] crypto/ccp: support aes cipher algo Ravi Kumar
                       ` (11 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-10  9:42 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_dev.c | 20 ++++++++++++++++++++
 drivers/crypto/ccp/ccp_dev.h | 11 +++++++++++
 2 files changed, 31 insertions(+)

diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index fee90e3..d8c0ab4 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -88,6 +88,26 @@ ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
 	return NULL;
 }
 
+int
+ccp_read_hwrng(uint32_t *value)
+{
+	struct ccp_device *dev;
+
+	TAILQ_FOREACH(dev, &ccp_list, next) {
+		void *vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+		while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) {
+			*value = CCP_READ_REG(vaddr, TRNG_OUT_REG);
+			if (*value) {
+				dev->hwrng_retries = 0;
+				return 0;
+			}
+		}
+		dev->hwrng_retries = 0;
+	}
+	return -1;
+}
+
 static const struct rte_memzone *
 ccp_queue_dma_zone_reserve(const char *queue_name,
 			   uint32_t queue_size,
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index cfb3b03..a5c9ef3 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -47,6 +47,7 @@
 
 /**< CCP sspecific */
 #define MAX_HW_QUEUES                   5
+#define CCP_MAX_TRNG_RETRIES		10
 
 /**< CCP Register Mappings */
 #define Q_MASK_REG                      0x000
@@ -223,6 +224,8 @@ struct ccp_device {
 	/**< protection for shared lsb region allocation */
 	int qidx;
 	/**< current queue index */
+	int hwrng_retries;
+	/**< retry counter for CCP TRNG */
 } __rte_cache_aligned;
 
 /**< CCP H/W engine related */
@@ -454,4 +457,12 @@ int ccp_probe_devices(const struct rte_pci_id *ccp_id);
  */
 struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
 
+/**
+ * read hwrng value
+ *
+ * @param trng_value data pointer to write RNG value
+ * @return 0 on success otherwise -1
+ */
+int ccp_read_hwrng(uint32_t *trng_value);
+
 #endif /* _CCP_DEV_H_ */
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v3 10/19] crypto/ccp: support aes cipher algo
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
                       ` (7 preceding siblings ...)
  2018-01-10  9:42     ` [PATCH v3 09/19] crypto/ccp: support ccp hwrng feature Ravi Kumar
@ 2018-01-10  9:42     ` Ravi Kumar
  2018-01-10  9:42     ` [PATCH v3 11/19] crypto/ccp: support 3des " Ravi Kumar
                       ` (10 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-10  9:42 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 197 ++++++++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_crypto.h  |  13 +++
 drivers/crypto/ccp/ccp_dev.h     |  53 +++++++++++
 drivers/crypto/ccp/ccp_pmd_ops.c |  60 ++++++++++++
 4 files changed, 321 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index c17e84f..b097355 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -80,6 +80,7 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 			     const struct rte_crypto_sym_xform *xform)
 {
 	const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+	size_t i;
 
 	cipher_xform = &xform->cipher;
 
@@ -99,6 +100,21 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 	sess->iv.length = cipher_xform->iv.length;
 
 	switch (cipher_xform->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_ECB:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo");
 		return -1;
@@ -106,10 +122,27 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 
 
 	switch (sess->cipher.engine) {
+	case CCP_ENGINE_AES:
+		if (sess->cipher.key_length == 16)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+		else if (sess->cipher.key_length == 24)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+		else if (sess->cipher.key_length == 32)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+		else {
+			CCP_LOG_ERR("Invalid cipher key length");
+			return -1;
+		}
+		for (i = 0; i < sess->cipher.key_length ; i++)
+			sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+				sess->cipher.key[i];
+		break;
 	default:
 		CCP_LOG_ERR("Invalid CCP Engine");
 		return -ENOTSUP;
 	}
+	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
 	return 0;
 }
 
@@ -235,6 +268,18 @@ ccp_cipher_slot(struct ccp_session *session)
 	int count = 0;
 
 	switch (session->cipher.algo) {
+	case CCP_CIPHER_ALGO_AES_CBC:
+		count = 2;
+		/**< op + passthrough for iv */
+		break;
+	case CCP_CIPHER_ALGO_AES_ECB:
+		count = 1;
+		/**<only op*/
+		break;
+	case CCP_CIPHER_ALGO_AES_CTR:
+		count = 2;
+		/**< op + passthrough for iv */
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo %d",
 			    session->cipher.algo);
@@ -297,10 +342,146 @@ ccp_compute_slot_count(struct ccp_session *session)
 	return count;
 }
 
+static void
+ccp_perform_passthru(struct ccp_passthru *pst,
+		     struct ccp_queue *cmd_q)
+{
+	struct ccp_desc *desc;
+	union ccp_function function;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 0;
+	CCP_CMD_EOM(desc) = 0;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_PT_BYTESWAP(&function) = pst->byte_swap;
+	CCP_PT_BITWISE(&function) = pst->bit_mod;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = pst->len;
+
+	if (pst->dir) {
+		CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+		CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+		CCP_CMD_DST_HI(desc) = 0;
+		CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+		if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+			CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
+	} else {
+
+		CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+		CCP_CMD_SRC_HI(desc) = 0;
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
+
+		CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+		CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
+		CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+	}
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+}
+
+static int
+ccp_perform_aes(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	uint8_t *lsb_buf;
+	struct ccp_passthru pst = {0};
+	struct ccp_desc *desc;
+	phys_addr_t src_addr, dest_addr, key_addr;
+	uint8_t *iv;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+	function.raw = 0;
+
+	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+	if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
+		if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
+			rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
+				   iv, session->iv.length);
+			pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
+			CCP_AES_SIZE(&function) = 0x1F;
+		} else {
+			lsb_buf =
+			&(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+			rte_memcpy(lsb_buf +
+				   (CCP_SB_BYTES - session->iv.length),
+				   iv, session->iv.length);
+			pst.src_addr = b_info->lsb_buf_phys +
+				(b_info->lsb_buf_idx * CCP_SB_BYTES);
+			b_info->lsb_buf_idx++;
+		}
+
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+	}
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->cipher.data.offset);
+	if (likely(op->sym->m_dst != NULL))
+		dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+						op->sym->cipher.data.offset);
+	else
+		dest_addr = src_addr;
+	key_addr = session->cipher.key_phys;
+
+	/* prepare desc for aes command */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+	CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
 static inline int
 ccp_crypto_cipher(struct rte_crypto_op *op,
-		  struct ccp_queue *cmd_q __rte_unused,
-		  struct ccp_batch_info *b_info __rte_unused)
+		  struct ccp_queue *cmd_q,
+		  struct ccp_batch_info *b_info)
 {
 	int result = 0;
 	struct ccp_session *session;
@@ -310,6 +491,18 @@ ccp_crypto_cipher(struct rte_crypto_op *op,
 					 ccp_cryptodev_driver_id);
 
 	switch (session->cipher.algo) {
+	case CCP_CIPHER_ALGO_AES_CBC:
+		result = ccp_perform_aes(op, cmd_q, b_info);
+		b_info->desccnt += 2;
+		break;
+	case CCP_CIPHER_ALGO_AES_CTR:
+		result = ccp_perform_aes(op, cmd_q, b_info);
+		b_info->desccnt += 2;
+		break;
+	case CCP_CIPHER_ALGO_AES_ECB:
+		result = ccp_perform_aes(op, cmd_q, b_info);
+		b_info->desccnt += 1;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo %d",
 			    session->cipher.algo);
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 4455497..614cd47 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -46,7 +46,20 @@
 
 #include "ccp_dev.h"
 
+#define AES_BLOCK_SIZE 16
+#define CMAC_PAD_VALUE 0x80
+#define CTR_NONCE_SIZE 4
+#define CTR_IV_SIZE 8
 #define CCP_SHA3_CTX_SIZE 200
+
+/**Macro helpers for CCP command creation*/
+#define	CCP_AES_SIZE(p)		((p)->aes.size)
+#define	CCP_AES_ENCRYPT(p)	((p)->aes.encrypt)
+#define	CCP_AES_MODE(p)		((p)->aes.mode)
+#define	CCP_AES_TYPE(p)		((p)->aes.type)
+#define	CCP_PT_BYTESWAP(p)	((p)->pt.byteswap)
+#define	CCP_PT_BITWISE(p)	((p)->pt.bitwise)
+
 /**
  * CCP supported AES modes
  */
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index a5c9ef3..759afc1 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -48,6 +48,7 @@
 /**< CCP sspecific */
 #define MAX_HW_QUEUES                   5
 #define CCP_MAX_TRNG_RETRIES		10
+#define CCP_ALIGN(x, y) ((((x) + (y - 1)) / y) * y)
 
 /**< CCP Register Mappings */
 #define Q_MASK_REG                      0x000
@@ -104,10 +105,52 @@
 #define LSB_SIZE                        16
 #define LSB_ITEM_SIZE                   32
 #define SLSB_MAP_SIZE                   (MAX_LSB_CNT * LSB_SIZE)
+#define LSB_ENTRY_NUMBER(LSB_ADDR)      (LSB_ADDR / LSB_ITEM_SIZE)
 
 /* General CCP Defines */
 
 #define CCP_SB_BYTES                    32
+/* Word 0 */
+#define CCP_CMD_DW0(p)		((p)->dw0)
+#define CCP_CMD_SOC(p)		(CCP_CMD_DW0(p).soc)
+#define CCP_CMD_IOC(p)		(CCP_CMD_DW0(p).ioc)
+#define CCP_CMD_INIT(p)	        (CCP_CMD_DW0(p).init)
+#define CCP_CMD_EOM(p)		(CCP_CMD_DW0(p).eom)
+#define CCP_CMD_FUNCTION(p)	(CCP_CMD_DW0(p).function)
+#define CCP_CMD_ENGINE(p)	(CCP_CMD_DW0(p).engine)
+#define CCP_CMD_PROT(p)	        (CCP_CMD_DW0(p).prot)
+
+/* Word 1 */
+#define CCP_CMD_DW1(p)		((p)->length)
+#define CCP_CMD_LEN(p)		(CCP_CMD_DW1(p))
+
+/* Word 2 */
+#define CCP_CMD_DW2(p)		((p)->src_lo)
+#define CCP_CMD_SRC_LO(p)	(CCP_CMD_DW2(p))
+
+/* Word 3 */
+#define CCP_CMD_DW3(p)		((p)->dw3)
+#define CCP_CMD_SRC_MEM(p)	((p)->dw3.src_mem)
+#define CCP_CMD_SRC_HI(p)	((p)->dw3.src_hi)
+#define CCP_CMD_LSB_ID(p)	((p)->dw3.lsb_cxt_id)
+#define CCP_CMD_FIX_SRC(p)	((p)->dw3.fixed)
+
+/* Words 4/5 */
+#define CCP_CMD_DW4(p)		((p)->dw4)
+#define CCP_CMD_DST_LO(p)	(CCP_CMD_DW4(p).dst_lo)
+#define CCP_CMD_DW5(p)		((p)->dw5.fields.dst_hi)
+#define CCP_CMD_DST_HI(p)	(CCP_CMD_DW5(p))
+#define CCP_CMD_DST_MEM(p)	((p)->dw5.fields.dst_mem)
+#define CCP_CMD_FIX_DST(p)	((p)->dw5.fields.fixed)
+#define CCP_CMD_SHA_LO(p)	((p)->dw4.sha_len_lo)
+#define CCP_CMD_SHA_HI(p)	((p)->dw5.sha_len_hi)
+
+/* Word 6/7 */
+#define CCP_CMD_DW6(p)		((p)->key_lo)
+#define CCP_CMD_KEY_LO(p)	(CCP_CMD_DW6(p))
+#define CCP_CMD_DW7(p)		((p)->dw7)
+#define CCP_CMD_KEY_HI(p)	((p)->dw7.key_hi)
+#define CCP_CMD_KEY_MEM(p)	((p)->dw7.key_mem)
 
 /* bitmap */
 enum {
@@ -412,6 +455,16 @@ struct ccp_desc {
 };
 
 /**
+ * ccp memory type
+ */
+enum ccp_memtype {
+	CCP_MEMTYPE_SYSTEM = 0,
+	CCP_MEMTYPE_SB,
+	CCP_MEMTYPE_LOCAL,
+	CCP_MEMTYPE_LAST,
+};
+
+/**
  * cmd id to follow order
  */
 enum ccp_cmd_order {
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index d483a74..5f56242 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -39,6 +39,66 @@
 #include "ccp_crypto.h"
 
 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+	{       /* AES ECB */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_ECB,
+				.block_size = 16,
+				.key_size = {
+				   .min = 16,
+				   .max = 32,
+				   .increment = 8
+				},
+				.iv_size = {
+				   .min = 0,
+				   .max = 0,
+				   .increment = 0
+				}
+			}, }
+		}, }
+	},
+	{       /* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v3 11/19] crypto/ccp: support 3des cipher algo
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
                       ` (8 preceding siblings ...)
  2018-01-10  9:42     ` [PATCH v3 10/19] crypto/ccp: support aes cipher algo Ravi Kumar
@ 2018-01-10  9:42     ` Ravi Kumar
  2018-01-10  9:42     ` [PATCH v3 12/19] crypto/ccp: support aes-cmac auth algo Ravi Kumar
                       ` (9 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-10  9:42 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 132 ++++++++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_crypto.h  |   3 +
 drivers/crypto/ccp/ccp_pmd_ops.c |  20 ++++++
 3 files changed, 154 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index b097355..0660761 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -80,7 +80,7 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 			     const struct rte_crypto_sym_xform *xform)
 {
 	const struct rte_crypto_cipher_xform *cipher_xform = NULL;
-	size_t i;
+	size_t i, j, x;
 
 	cipher_xform = &xform->cipher;
 
@@ -115,6 +115,11 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 		sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
 		sess->cipher.engine = CCP_ENGINE_AES;
 		break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
+		sess->cipher.um.des_mode = CCP_DES_MODE_CBC;
+		sess->cipher.engine = CCP_ENGINE_3DES;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo");
 		return -1;
@@ -137,6 +142,20 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 			sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
 				sess->cipher.key[i];
 		break;
+	case CCP_ENGINE_3DES:
+		if (sess->cipher.key_length == 16)
+			sess->cipher.ut.des_type = CCP_DES_TYPE_128;
+		else if (sess->cipher.key_length == 24)
+			sess->cipher.ut.des_type = CCP_DES_TYPE_192;
+		else {
+			CCP_LOG_ERR("Invalid cipher key length");
+			return -1;
+		}
+		for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
+			for (i = 0; i < 8; i++)
+				sess->cipher.key_ccp[(8 + x) - i - 1] =
+					sess->cipher.key[i + x];
+		break;
 	default:
 		CCP_LOG_ERR("Invalid CCP Engine");
 		return -ENOTSUP;
@@ -280,6 +299,10 @@ ccp_cipher_slot(struct ccp_session *session)
 		count = 2;
 		/**< op + passthrough for iv */
 		break;
+	case CCP_CIPHER_ALGO_3DES_CBC:
+		count = 2;
+		/**< op + passthrough for iv */
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo %d",
 			    session->cipher.algo);
@@ -478,6 +501,109 @@ ccp_perform_aes(struct rte_crypto_op *op,
 	return 0;
 }
 
+static int
+ccp_perform_3des(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	unsigned char *lsb_buf;
+	struct ccp_passthru pst;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	uint8_t *iv;
+	phys_addr_t src_addr, dest_addr, key_addr;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+	switch (session->cipher.um.des_mode) {
+	case CCP_DES_MODE_CBC:
+		lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+		b_info->lsb_buf_idx++;
+
+		rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
+			   iv, session->iv.length);
+
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+		break;
+	case CCP_DES_MODE_CFB:
+	case CCP_DES_MODE_ECB:
+		CCP_LOG_ERR("Unsupported DES cipher mode");
+		return -ENOTSUP;
+	}
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->cipher.data.offset);
+	if (unlikely(op->sym->m_dst != NULL))
+		dest_addr =
+			rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+						   op->sym->cipher.data.offset);
+	else
+		dest_addr = src_addr;
+
+	key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+	memset(desc, 0, Q_DESC_SIZE);
+
+	/* prepare desc for des command */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_DES_ENCRYPT(&function) = session->cipher.dir;
+	CCP_DES_MODE(&function) = session->cipher.um.des_mode;
+	CCP_DES_TYPE(&function) = session->cipher.ut.des_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	if (session->cipher.um.des_mode)
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	/* Write the new tail address back to the queue register */
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	/* Turn the queue back on using our cached control register */
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
 static inline int
 ccp_crypto_cipher(struct rte_crypto_op *op,
 		  struct ccp_queue *cmd_q,
@@ -503,6 +629,10 @@ ccp_crypto_cipher(struct rte_crypto_op *op,
 		result = ccp_perform_aes(op, cmd_q, b_info);
 		b_info->desccnt += 1;
 		break;
+	case CCP_CIPHER_ALGO_3DES_CBC:
+		result = ccp_perform_3des(op, cmd_q, b_info);
+		b_info->desccnt += 2;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo %d",
 			    session->cipher.algo);
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 614cd47..d528ec9 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -57,6 +57,9 @@
 #define	CCP_AES_ENCRYPT(p)	((p)->aes.encrypt)
 #define	CCP_AES_MODE(p)		((p)->aes.mode)
 #define	CCP_AES_TYPE(p)		((p)->aes.type)
+#define	CCP_DES_ENCRYPT(p)	((p)->des.encrypt)
+#define	CCP_DES_MODE(p)		((p)->des.mode)
+#define	CCP_DES_TYPE(p)		((p)->des.type)
 #define	CCP_PT_BYTESWAP(p)	((p)->pt.byteswap)
 #define	CCP_PT_BITWISE(p)	((p)->pt.bitwise)
 
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 5f56242..3a16be8 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -99,6 +99,26 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			}, }
 		}, }
 	},
+	{	/* 3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 16,
+					.max = 24,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v3 12/19] crypto/ccp: support aes-cmac auth algo
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
                       ` (9 preceding siblings ...)
  2018-01-10  9:42     ` [PATCH v3 11/19] crypto/ccp: support 3des " Ravi Kumar
@ 2018-01-10  9:42     ` Ravi Kumar
  2018-01-10  9:42     ` [PATCH v3 13/19] crypto/ccp: support aes-gcm aead algo Ravi Kumar
                       ` (8 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-10  9:42 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 277 ++++++++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_pmd_ops.c |  20 +++
 2 files changed, 295 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 0660761..6e593d8 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -36,6 +36,8 @@
 #include <sys/queue.h>
 #include <sys/types.h>
 #include <unistd.h>
+#include <openssl/cmac.h> /*sub key apis*/
+#include <openssl/evp.h> /*sub key apis*/
 
 #include <rte_hexdump.h>
 #include <rte_memzone.h>
@@ -74,6 +76,84 @@ ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 	return res;
 }
 
+/* prepare temporary keys K1 and K2 */
+static void prepare_key(unsigned char *k, unsigned char *l, int bl)
+{
+	int i;
+	/* Shift block to left, including carry */
+	for (i = 0; i < bl; i++) {
+		k[i] = l[i] << 1;
+		if (i < bl - 1 && l[i + 1] & 0x80)
+			k[i] |= 1;
+	}
+	/* If MSB set fixup with R */
+	if (l[0] & 0x80)
+		k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b;
+}
+
+/* subkeys K1 and K2 generation for CMAC */
+static int
+generate_cmac_subkeys(struct ccp_session *sess)
+{
+	const EVP_CIPHER *algo;
+	EVP_CIPHER_CTX *ctx;
+	unsigned char *ccp_ctx;
+	size_t i;
+	int dstlen, totlen;
+	unsigned char zero_iv[AES_BLOCK_SIZE] = {0};
+	unsigned char dst[2 * AES_BLOCK_SIZE] = {0};
+	unsigned char k1[AES_BLOCK_SIZE] = {0};
+	unsigned char k2[AES_BLOCK_SIZE] = {0};
+
+	if (sess->auth.ut.aes_type == CCP_AES_TYPE_128)
+		algo =  EVP_aes_128_cbc();
+	else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192)
+		algo =  EVP_aes_192_cbc();
+	else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256)
+		algo =  EVP_aes_256_cbc();
+	else {
+		CCP_LOG_ERR("Invalid CMAC type length");
+		return -1;
+	}
+
+	ctx = EVP_CIPHER_CTX_new();
+	if (!ctx) {
+		CCP_LOG_ERR("ctx creation failed");
+		return -1;
+	}
+	if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key,
+			    (unsigned char *)zero_iv) <= 0)
+		goto key_generate_err;
+	if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
+		goto key_generate_err;
+	if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv,
+			      AES_BLOCK_SIZE) <= 0)
+		goto key_generate_err;
+	if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
+		goto key_generate_err;
+
+	memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2);
+
+	ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1);
+	prepare_key(k1, dst, AES_BLOCK_SIZE);
+	for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
+		*ccp_ctx = k1[i];
+
+	ccp_ctx = (unsigned char *)(sess->auth.pre_compute +
+				   (2 * CCP_SB_BYTES) - 1);
+	prepare_key(k2, k1, AES_BLOCK_SIZE);
+	for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
+		*ccp_ctx = k2[i];
+
+	EVP_CIPHER_CTX_free(ctx);
+
+	return 0;
+
+key_generate_err:
+	CCP_LOG_ERR("CMAC Init failed");
+		return -1;
+}
+
 /* configure session */
 static int
 ccp_configure_session_cipher(struct ccp_session *sess,
@@ -170,6 +250,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   const struct rte_crypto_sym_xform *xform)
 {
 	const struct rte_crypto_auth_xform *auth_xform = NULL;
+	size_t i;
 
 	auth_xform = &xform->auth;
 
@@ -179,6 +260,33 @@ ccp_configure_session_auth(struct ccp_session *sess,
 	else
 		sess->auth.op = CCP_AUTH_OP_VERIFY;
 	switch (auth_xform->algo) {
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
+		sess->auth.engine = CCP_ENGINE_AES;
+		sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
+		sess->auth.key_length = auth_xform->key.length;
+		/**<padding and hash result*/
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = AES_BLOCK_SIZE;
+		sess->auth.block_size = AES_BLOCK_SIZE;
+		if (sess->auth.key_length == 16)
+			sess->auth.ut.aes_type = CCP_AES_TYPE_128;
+		else if (sess->auth.key_length == 24)
+			sess->auth.ut.aes_type = CCP_AES_TYPE_192;
+		else if (sess->auth.key_length == 32)
+			sess->auth.ut.aes_type = CCP_AES_TYPE_256;
+		else {
+			CCP_LOG_ERR("Invalid CMAC key length");
+			return -1;
+		}
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   sess->auth.key_length);
+		for (i = 0; i < sess->auth.key_length; i++)
+			sess->auth.key_ccp[sess->auth.key_length - i - 1] =
+				sess->auth.key[i];
+		if (generate_cmac_subkeys(sess))
+			return -1;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported hash algo");
 		return -ENOTSUP;
@@ -316,6 +424,15 @@ ccp_auth_slot(struct ccp_session *session)
 	int count = 0;
 
 	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_AES_CMAC:
+		count = 4;
+		/**
+		 * op
+		 * extra descriptor in padding case
+		 * (k1/k2(255:128) with iv(127:0))
+		 * Retrieve result
+		 */
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported auth algo %d",
 			    session->auth.algo);
@@ -415,6 +532,158 @@ ccp_perform_passthru(struct ccp_passthru *pst,
 }
 
 static int
+ccp_perform_aes_cmac(struct rte_crypto_op *op,
+		     struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_passthru pst;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	uint8_t *src_tb, *append_ptr, *ctx_addr;
+	phys_addr_t src_addr, dest_addr, key_addr;
+	int length, non_align_len;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+	key_addr = rte_mem_virt2phy(session->auth.key_ccp);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
+	CCP_AES_MODE(&function) = session->auth.um.aes_mode;
+	CCP_AES_TYPE(&function) = session->auth.ut.aes_type;
+
+	if (op->sym->auth.data.length % session->auth.block_size == 0) {
+
+		ctx_addr = session->auth.pre_compute;
+		memset(ctx_addr, 0, AES_BLOCK_SIZE);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		desc = &cmd_q->qbase_desc[cmd_q->qidx];
+		memset(desc, 0, Q_DESC_SIZE);
+
+		/* prepare desc for aes-cmac command */
+		CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+		CCP_CMD_EOM(desc) = 1;
+		CCP_CMD_FUNCTION(desc) = function.raw;
+
+		CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+		CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+		CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+		CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+		CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+		cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+		rte_wmb();
+
+		tail =
+		(uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol | CMD_Q_RUN);
+	} else {
+		ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
+		memset(ctx_addr, 0, AES_BLOCK_SIZE);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		length = (op->sym->auth.data.length / AES_BLOCK_SIZE);
+		length *= AES_BLOCK_SIZE;
+		non_align_len = op->sym->auth.data.length - length;
+		/* prepare desc for aes-cmac command */
+		/*Command 1*/
+		desc = &cmd_q->qbase_desc[cmd_q->qidx];
+		memset(desc, 0, Q_DESC_SIZE);
+
+		CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+		CCP_CMD_INIT(desc) = 1;
+		CCP_CMD_FUNCTION(desc) = function.raw;
+
+		CCP_CMD_LEN(desc) = length;
+		CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+		CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+		CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+		CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+		cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+		/*Command 2*/
+		append_ptr = append_ptr + CCP_SB_BYTES;
+		memset(append_ptr, 0, AES_BLOCK_SIZE);
+		src_tb = rte_pktmbuf_mtod_offset(op->sym->m_src,
+						 uint8_t *,
+						 op->sym->auth.data.offset +
+						 length);
+		rte_memcpy(append_ptr, src_tb, non_align_len);
+		append_ptr[non_align_len] = CMAC_PAD_VALUE;
+
+		desc = &cmd_q->qbase_desc[cmd_q->qidx];
+		memset(desc, 0, Q_DESC_SIZE);
+
+		CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+		CCP_CMD_EOM(desc) = 1;
+		CCP_CMD_FUNCTION(desc) = function.raw;
+		CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+		CCP_CMD_SRC_LO(desc) = ((uint32_t)(dest_addr + CCP_SB_BYTES));
+		CCP_CMD_SRC_HI(desc) = high32_value(dest_addr + CCP_SB_BYTES);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+		CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+		CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+		cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+		rte_wmb();
+		tail =
+		(uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol | CMD_Q_RUN);
+	}
+	/* Retrieve result */
+	pst.dest_addr = dest_addr;
+	pst.src_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+	pst.len = CCP_SB_BYTES;
+	pst.dir = 0;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
 ccp_perform_aes(struct rte_crypto_op *op,
 		struct ccp_queue *cmd_q,
 		struct ccp_batch_info *b_info)
@@ -643,8 +912,8 @@ ccp_crypto_cipher(struct rte_crypto_op *op,
 
 static inline int
 ccp_crypto_auth(struct rte_crypto_op *op,
-		struct ccp_queue *cmd_q __rte_unused,
-		struct ccp_batch_info *b_info __rte_unused)
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
 {
 
 	int result = 0;
@@ -655,6 +924,10 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 					ccp_cryptodev_driver_id);
 
 	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_AES_CMAC:
+		result = ccp_perform_aes_cmac(op, cmd_q);
+		b_info->desccnt += 4;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported auth algo %d",
 			    session->auth.algo);
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 3a16be8..1fb6a6d 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -39,6 +39,26 @@
 #include "ccp_crypto.h"
 
 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+	{	/*AES-CMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				 .block_size = 16,
+				 .key_size = {
+					 .min = 16,
+					 .max = 32,
+					 .increment = 8
+				 },
+				 .digest_size = {
+					 .min = 16,
+					 .max = 16,
+					 .increment = 0
+				 },
+			}, }
+		}, }
+	},
 	{       /* AES ECB */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v3 13/19] crypto/ccp: support aes-gcm aead algo
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
                       ` (10 preceding siblings ...)
  2018-01-10  9:42     ` [PATCH v3 12/19] crypto/ccp: support aes-cmac auth algo Ravi Kumar
@ 2018-01-10  9:42     ` Ravi Kumar
  2018-01-10  9:42     ` [PATCH v3 14/19] crypto/ccp: support sha1 authentication algo Ravi Kumar
                       ` (7 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-10  9:42 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 240 ++++++++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_pmd_ops.c |  30 +++++
 2 files changed, 266 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 6e593d8..4ced193 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -299,6 +299,7 @@ ccp_configure_session_aead(struct ccp_session *sess,
 			   const struct rte_crypto_sym_xform *xform)
 {
 	const struct rte_crypto_aead_xform *aead_xform = NULL;
+	size_t i;
 
 	aead_xform = &xform->aead;
 
@@ -313,6 +314,7 @@ ccp_configure_session_aead(struct ccp_session *sess,
 		sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
 		sess->auth.op = CCP_AUTH_OP_VERIFY;
 	}
+	sess->aead_algo = aead_xform->algo;
 	sess->auth.aad_length = aead_xform->aad_length;
 	sess->auth.digest_length = aead_xform->digest_length;
 
@@ -321,10 +323,37 @@ ccp_configure_session_aead(struct ccp_session *sess,
 	sess->iv.length = aead_xform->iv.length;
 
 	switch (aead_xform->algo) {
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_GCM;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_GCTR;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		if (sess->cipher.key_length == 16)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+		else if (sess->cipher.key_length == 24)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+		else if (sess->cipher.key_length == 32)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+		else {
+			CCP_LOG_ERR("Invalid aead key length");
+			return -1;
+		}
+		for (i = 0; i < sess->cipher.key_length; i++)
+			sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+				sess->cipher.key[i];
+		sess->auth.algo = CCP_AUTH_ALGO_AES_GCM;
+		sess->auth.engine = CCP_ENGINE_AES;
+		sess->auth.um.aes_mode = CCP_AES_MODE_GHASH;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = 0;
+		sess->auth.block_size = AES_BLOCK_SIZE;
+		sess->cmd_id = CCP_CMD_COMBINED;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported aead algo");
 		return -ENOTSUP;
 	}
+	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
 	return 0;
 }
 
@@ -447,10 +476,27 @@ ccp_aead_slot(struct ccp_session *session)
 	int count = 0;
 
 	switch (session->aead_algo) {
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported aead algo %d",
 			    session->aead_algo);
 	}
+	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_AES_GCM:
+		count = 5;
+		/**
+		 * 1. Passthru iv
+		 * 2. Hash AAD
+		 * 3. GCTR
+		 * 4. Reload passthru
+		 * 5. Hash Final tag
+		 */
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported combined auth ALGO %d",
+			    session->auth.algo);
+	}
 	return count;
 }
 
@@ -873,6 +919,184 @@ ccp_perform_3des(struct rte_crypto_op *op,
 	return 0;
 }
 
+static int
+ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	uint8_t *lsb_buf, *append_ptr, *iv;
+	struct ccp_passthru pst;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	uint64_t *temp;
+	phys_addr_t src_addr, dest_addr, key_addr, aad_addr;
+	phys_addr_t digest_dest_addr;
+	int length, non_align_len, i;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 ccp_cryptodev_driver_id);
+	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+	key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->cipher.data.offset);
+	if (unlikely(op->sym->m_dst != NULL))
+		dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+						op->sym->cipher.data.offset);
+	else
+		dest_addr = src_addr;
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						   session->auth.ctx_len);
+	digest_dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	append_ptr += AES_BLOCK_SIZE;
+	temp = (uint64_t *)append_ptr;
+	*temp++ =  rte_bswap64(session->auth.aad_length << 3);
+	*temp =  rte_bswap64(op->sym->cipher.data.length << 3);
+
+	non_align_len = op->sym->cipher.data.length % AES_BLOCK_SIZE;
+	length = CCP_ALIGN(op->sym->cipher.data.length, AES_BLOCK_SIZE);
+
+	aad_addr = rte_mem_virt2phy((void *)op->sym->aead.aad.data);
+
+	/* CMD1 IV Passthru */
+	for (i = 0;  i < CTR_IV_SIZE; i++)
+		session->cipher.nonce[CTR_NONCE_SIZE + CTR_IV_SIZE - 1 - i] =
+			iv[i];
+	lsb_buf = session->cipher.nonce;
+
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+	pst.len = CCP_SB_BYTES;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/* CMD2 GHASH-AAD */
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_AAD;
+	CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = session->auth.aad_length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)aad_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(aad_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* CMD3 : GCTR Plain text */
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+	CCP_AES_MODE(&function) = CCP_AES_MODE_GCTR;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+	if (non_align_len == 0)
+		CCP_AES_SIZE(&function) = (AES_BLOCK_SIZE << 3) - 1;
+	else
+		CCP_AES_SIZE(&function) = (non_align_len << 3) - 1;
+
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* CMD4 : PT to copy IV */
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+	pst.len = AES_BLOCK_SIZE;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/* CMD5 : GHASH-Final */
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_FINAL;
+	CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+	/* Last block (AAD_len || PT_len)*/
+	CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)digest_dest_addr + AES_BLOCK_SIZE);
+	CCP_CMD_SRC_HI(desc) = high32_value(digest_dest_addr + AES_BLOCK_SIZE);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)digest_dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(digest_dest_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
 static inline int
 ccp_crypto_cipher(struct rte_crypto_op *op,
 		  struct ccp_queue *cmd_q,
@@ -939,17 +1163,25 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 
 static inline int
 ccp_crypto_aead(struct rte_crypto_op *op,
-		struct ccp_queue *cmd_q __rte_unused,
-		struct ccp_batch_info *b_info __rte_unused)
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
 {
 	int result = 0;
 	struct ccp_session *session;
 
 	session = (struct ccp_session *)get_session_private_data(
-					 op->sym->session,
+					op->sym->session,
 					ccp_cryptodev_driver_id);
 
-	switch (session->aead_algo) {
+	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_AES_GCM:
+		if (session->cipher.algo != CCP_CIPHER_ALGO_AES_GCM) {
+			CCP_LOG_ERR("Incorrect chain order");
+			return -1;
+		}
+		result = ccp_perform_aes_gcm(op, cmd_q);
+		b_info->desccnt += 5;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported aead algo %d",
 			    session->aead_algo);
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 1fb6a6d..24f577a 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -139,6 +139,36 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			}, }
 		}, }
 	},
+	{       /* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				 .algo = RTE_CRYPTO_AEAD_AES_GCM,
+				 .block_size = 16,
+				 .key_size = {
+					 .min = 16,
+					 .max = 32,
+					 .increment = 8
+				 },
+				 .digest_size = {
+					 .min = 16,
+					 .max = 16,
+					 .increment = 0
+				 },
+				 .aad_size = {
+					 .min = 0,
+					 .max = 65535,
+					 .increment = 1
+				 },
+				 .iv_size = {
+					 .min = 12,
+					 .max = 16,
+					 .increment = 4
+				 },
+			}, }
+		}, }
+	},
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v3 14/19] crypto/ccp: support sha1 authentication algo
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
                       ` (11 preceding siblings ...)
  2018-01-10  9:42     ` [PATCH v3 13/19] crypto/ccp: support aes-gcm aead algo Ravi Kumar
@ 2018-01-10  9:42     ` Ravi Kumar
  2018-01-10  9:42     ` [PATCH v3 15/19] crypto/ccp: support sha2 family " Ravi Kumar
                       ` (6 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-10  9:42 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 367 +++++++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_crypto.h  |  23 +++
 drivers/crypto/ccp/ccp_pmd_ops.c |  42 +++++
 3 files changed, 432 insertions(+)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 4ced193..ace6bc2 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -36,6 +36,7 @@
 #include <sys/queue.h>
 #include <sys/types.h>
 #include <unistd.h>
+#include <openssl/sha.h>
 #include <openssl/cmac.h> /*sub key apis*/
 #include <openssl/evp.h> /*sub key apis*/
 
@@ -52,6 +53,14 @@
 #include "ccp_pci.h"
 #include "ccp_pmd_private.h"
 
+/* SHA initial context values */
+static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
+	SHA1_H4, SHA1_H3,
+	SHA1_H2, SHA1_H1,
+	SHA1_H0, 0x0U,
+	0x0U, 0x0U,
+};
+
 static enum ccp_cmd_order
 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 {
@@ -76,6 +85,59 @@ ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 	return res;
 }
 
+/* partial hash using openssl */
+static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA_CTX ctx;
+
+	if (!SHA1_Init(&ctx))
+		return -EFAULT;
+	SHA1_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
+	return 0;
+}
+
+static int generate_partial_hash(struct ccp_session *sess)
+{
+
+	uint8_t ipad[sess->auth.block_size];
+	uint8_t	opad[sess->auth.block_size];
+	uint8_t *ipad_t, *opad_t;
+	uint32_t *hash_value_be32, hash_temp32[8];
+	int i, count;
+
+	opad_t = ipad_t = (uint8_t *)sess->auth.key;
+
+	hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
+
+	/* considering key size is always equal to block size of algorithm */
+	for (i = 0; i < sess->auth.block_size; i++) {
+		ipad[i] = (ipad_t[i] ^ HMAC_IPAD_VALUE);
+		opad[i] = (opad_t[i] ^ HMAC_OPAD_VALUE);
+	}
+
+	switch (sess->auth.algo) {
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+		count = SHA1_DIGEST_SIZE >> 2;
+
+		if (partial_hash_sha1(ipad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+
+		hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha1(opad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+		return 0;
+	default:
+		CCP_LOG_ERR("Invalid auth algo");
+		return -1;
+	}
+}
+
 /* prepare temporary keys K1 and K2 */
 static void prepare_key(unsigned char *k, unsigned char *l, int bl)
 {
@@ -260,6 +322,31 @@ ccp_configure_session_auth(struct ccp_session *sess,
 	else
 		sess->auth.op = CCP_AUTH_OP_VERIFY;
 	switch (auth_xform->algo) {
+	case RTE_CRYPTO_AUTH_SHA1:
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA1;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+		sess->auth.ctx = (void *)ccp_sha1_init;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+			return -1;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		sess->auth.block_size = SHA1_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_AES_CMAC:
 		sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
 		sess->auth.engine = CCP_ENGINE_AES;
@@ -453,6 +540,13 @@ ccp_auth_slot(struct ccp_session *session)
 	int count = 0;
 
 	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_SHA1:
+		count = 3;
+		/**< op + lsb passthrough cpy to/from*/
+		break;
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+		count = 6;
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		count = 4;
 		/**
@@ -578,6 +672,271 @@ ccp_perform_passthru(struct ccp_passthru *pst,
 }
 
 static int
+ccp_perform_hmac(struct rte_crypto_op *op,
+		 struct ccp_queue *cmd_q)
+{
+
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr, dest_addr_t;
+	struct ccp_passthru pst;
+	uint64_t auth_msg_bits;
+	void *append_ptr;
+	uint8_t *addr;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 ccp_cryptodev_driver_id);
+	addr = session->auth.pre_compute;
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+	dest_addr_t = dest_addr;
+
+	/** Load PHash1 to LSB*/
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.len = session->auth.ctx_len;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/**sha engine command descriptor for IntermediateHash*/
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+	auth_msg_bits = (op->sym->auth.data.length +
+			 session->auth.block_size)  * 8;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+	CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+	CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Intermediate Hash value retrieve */
+	if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA_TYPE_512)) {
+
+		pst.src_addr =
+			(phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+	} else {
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = session->auth.ctx_len;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+	}
+
+	/** Load PHash2 to LSB*/
+	addr += session->auth.ctx_len;
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.len = session->auth.ctx_len;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/**sha engine command descriptor for FinalHash*/
+	dest_addr_t += session->auth.offset;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = (session->auth.ctx_len -
+			     session->auth.offset);
+	auth_msg_bits = (session->auth.block_size +
+			 session->auth.ctx_len -
+			 session->auth.offset) * 8;
+
+	CCP_CMD_SRC_LO(desc) = (uint32_t)(dest_addr_t);
+	CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+	CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+	CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Retrieve hmac output */
+	pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.dest_addr = dest_addr;
+	pst.len = session->auth.ctx_len;
+	pst.dir = 0;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	else
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+
+}
+
+static int
+ccp_perform_sha(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr;
+	struct ccp_passthru pst;
+	void *append_ptr;
+	uint64_t auth_msg_bits;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+
+	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+
+	/** Passthru sha context*/
+
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
+						     session->auth.ctx);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.len = session->auth.ctx_len;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/**prepare sha command descriptor*/
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+	auth_msg_bits = op->sym->auth.data.length * 8;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+	CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+	CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Hash value retrieve */
+	pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.dest_addr = dest_addr;
+	pst.len = session->auth.ctx_len;
+	pst.dir = 0;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	else
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+
+}
+
+static int
 ccp_perform_aes_cmac(struct rte_crypto_op *op,
 		     struct ccp_queue *cmd_q)
 {
@@ -1148,6 +1507,14 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 					ccp_cryptodev_driver_id);
 
 	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_SHA1:
+		result = ccp_perform_sha(op, cmd_q);
+		b_info->desccnt += 3;
+		break;
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+		result = ccp_perform_hmac(op, cmd_q);
+		b_info->desccnt += 6;
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		result = ccp_perform_aes_cmac(op, cmd_q);
 		b_info->desccnt += 4;
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index d528ec9..42179de 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -60,9 +60,32 @@
 #define	CCP_DES_ENCRYPT(p)	((p)->des.encrypt)
 #define	CCP_DES_MODE(p)		((p)->des.mode)
 #define	CCP_DES_TYPE(p)		((p)->des.type)
+#define	CCP_SHA_TYPE(p)		((p)->sha.type)
 #define	CCP_PT_BYTESWAP(p)	((p)->pt.byteswap)
 #define	CCP_PT_BITWISE(p)	((p)->pt.bitwise)
 
+/* HMAC */
+#define HMAC_IPAD_VALUE 0x36
+#define HMAC_OPAD_VALUE 0x5c
+
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+#define MD5_DIGEST_SIZE         16
+#define MD5_BLOCK_SIZE          64
+#endif
+
+/* SHA */
+#define SHA_COMMON_DIGEST_SIZE	32
+#define SHA1_DIGEST_SIZE        20
+#define SHA1_BLOCK_SIZE         64
+
+/* SHA LSB intialiazation values */
+
+#define SHA1_H0		0x67452301UL
+#define SHA1_H1		0xefcdab89UL
+#define SHA1_H2		0x98badcfeUL
+#define SHA1_H3		0x10325476UL
+#define SHA1_H4		0xc3d2e1f0UL
+
 /**
  * CCP supported AES modes
  */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 24f577a..6adef1c 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -39,6 +39,48 @@
 #include "ccp_crypto.h"
 
 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+	{	/* SHA1 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA1,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 20,
+					 .max = 20,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 20,
+					 .max = 20,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/*AES-CMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v3 15/19] crypto/ccp: support sha2 family authentication algo
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
                       ` (12 preceding siblings ...)
  2018-01-10  9:42     ` [PATCH v3 14/19] crypto/ccp: support sha1 authentication algo Ravi Kumar
@ 2018-01-10  9:42     ` Ravi Kumar
  2018-01-10  9:42     ` [PATCH v3 16/19] crypto/ccp: support sha3 " Ravi Kumar
                       ` (5 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-10  9:42 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 270 +++++++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_crypto.h  |  48 +++++++
 drivers/crypto/ccp/ccp_pmd_ops.c | 168 ++++++++++++++++++++++++
 3 files changed, 486 insertions(+)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index ace6bc2..31353ed 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -61,6 +61,34 @@ static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
 	0x0U, 0x0U,
 };
 
+uint32_t ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+	SHA224_H7, SHA224_H6,
+	SHA224_H5, SHA224_H4,
+	SHA224_H3, SHA224_H2,
+	SHA224_H1, SHA224_H0,
+};
+
+uint32_t ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+	SHA256_H7, SHA256_H6,
+	SHA256_H5, SHA256_H4,
+	SHA256_H3, SHA256_H2,
+	SHA256_H1, SHA256_H0,
+};
+
+uint64_t ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+	SHA384_H7, SHA384_H6,
+	SHA384_H5, SHA384_H4,
+	SHA384_H3, SHA384_H2,
+	SHA384_H1, SHA384_H0,
+};
+
+uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+	SHA512_H7, SHA512_H6,
+	SHA512_H5, SHA512_H4,
+	SHA512_H3, SHA512_H2,
+	SHA512_H1, SHA512_H0,
+};
+
 static enum ccp_cmd_order
 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 {
@@ -97,6 +125,54 @@ static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
+static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA256_CTX ctx;
+
+	if (!SHA224_Init(&ctx))
+		return -EFAULT;
+	SHA256_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA256_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA256_CTX ctx;
+
+	if (!SHA256_Init(&ctx))
+		return -EFAULT;
+	SHA256_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA256_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA512_CTX ctx;
+
+	if (!SHA384_Init(&ctx))
+		return -EFAULT;
+	SHA512_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA512_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA512_CTX ctx;
+
+	if (!SHA512_Init(&ctx))
+		return -EFAULT;
+	SHA512_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA512_DIGEST_LENGTH);
+	return 0;
+}
+
 static int generate_partial_hash(struct ccp_session *sess)
 {
 
@@ -104,11 +180,13 @@ static int generate_partial_hash(struct ccp_session *sess)
 	uint8_t	opad[sess->auth.block_size];
 	uint8_t *ipad_t, *opad_t;
 	uint32_t *hash_value_be32, hash_temp32[8];
+	uint64_t *hash_value_be64, hash_temp64[8];
 	int i, count;
 
 	opad_t = ipad_t = (uint8_t *)sess->auth.key;
 
 	hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
+	hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute);
 
 	/* considering key size is always equal to block size of algorithm */
 	for (i = 0; i < sess->auth.block_size; i++) {
@@ -132,6 +210,66 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be32++)
 			*hash_value_be32 = hash_temp32[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+		count = SHA256_DIGEST_SIZE >> 2;
+
+		if (partial_hash_sha224(ipad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+
+		hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha224(opad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+		return 0;
+	case CCP_AUTH_ALGO_SHA256_HMAC:
+		count = SHA256_DIGEST_SIZE >> 2;
+
+		if (partial_hash_sha256(ipad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+
+		hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha256(opad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+		return 0;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+		count = SHA512_DIGEST_SIZE >> 3;
+
+		if (partial_hash_sha384(ipad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+
+		hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha384(opad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+		return 0;
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		count = SHA512_DIGEST_SIZE >> 3;
+
+		if (partial_hash_sha512(ipad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+
+		hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha512(opad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+		return 0;
 	default:
 		CCP_LOG_ERR("Invalid auth algo");
 		return -1;
@@ -347,6 +485,107 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
+	case RTE_CRYPTO_AUTH_SHA224:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA224;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+		sess->auth.ctx = (void *)ccp_sha224_init;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		sess->auth.block_size = SHA224_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA256;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+		sess->auth.ctx = (void *)ccp_sha256_init;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		sess->auth.block_size = SHA256_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA384;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+		sess->auth.ctx = (void *)ccp_sha384_init;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		sess->auth.block_size = SHA384_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA512;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+		sess->auth.ctx = (void *)ccp_sha512_init;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		sess->auth.block_size = SHA512_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+
 	case RTE_CRYPTO_AUTH_AES_CMAC:
 		sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
 		sess->auth.engine = CCP_ENGINE_AES;
@@ -541,12 +780,32 @@ ccp_auth_slot(struct ccp_session *session)
 
 	switch (session->auth.algo) {
 	case CCP_AUTH_ALGO_SHA1:
+	case CCP_AUTH_ALGO_SHA224:
+	case CCP_AUTH_ALGO_SHA256:
+	case CCP_AUTH_ALGO_SHA384:
+	case CCP_AUTH_ALGO_SHA512:
 		count = 3;
 		/**< op + lsb passthrough cpy to/from*/
 		break;
 	case CCP_AUTH_ALGO_SHA1_HMAC:
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+	case CCP_AUTH_ALGO_SHA256_HMAC:
 		count = 6;
 		break;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		count = 7;
+		/**
+		 * 1. Load PHash1 = H(k ^ ipad); to LSB
+		 * 2. generate IHash = H(hash on meassage with PHash1
+		 * as init values);
+		 * 3. Retrieve IHash 2 slots for 384/512
+		 * 4. Load Phash2 = H(k ^ opad); to LSB
+		 * 5. generate FHash = H(hash on Ihash with Phash2
+		 * as init value);
+		 * 6. Retrieve HMAC output from LSB to host memory
+		 */
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		count = 4;
 		/**
@@ -1508,13 +1767,24 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 
 	switch (session->auth.algo) {
 	case CCP_AUTH_ALGO_SHA1:
+	case CCP_AUTH_ALGO_SHA224:
+	case CCP_AUTH_ALGO_SHA256:
+	case CCP_AUTH_ALGO_SHA384:
+	case CCP_AUTH_ALGO_SHA512:
 		result = ccp_perform_sha(op, cmd_q);
 		b_info->desccnt += 3;
 		break;
 	case CCP_AUTH_ALGO_SHA1_HMAC:
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+	case CCP_AUTH_ALGO_SHA256_HMAC:
 		result = ccp_perform_hmac(op, cmd_q);
 		b_info->desccnt += 6;
 		break;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		result = ccp_perform_hmac(op, cmd_q);
+		b_info->desccnt += 7;
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		result = ccp_perform_aes_cmac(op, cmd_q);
 		b_info->desccnt += 4;
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 42179de..ca1c1a8 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -78,6 +78,18 @@
 #define SHA1_DIGEST_SIZE        20
 #define SHA1_BLOCK_SIZE         64
 
+#define SHA224_DIGEST_SIZE      28
+#define SHA224_BLOCK_SIZE       64
+
+#define SHA256_DIGEST_SIZE      32
+#define SHA256_BLOCK_SIZE       64
+
+#define SHA384_DIGEST_SIZE      48
+#define SHA384_BLOCK_SIZE       128
+
+#define SHA512_DIGEST_SIZE      64
+#define SHA512_BLOCK_SIZE       128
+
 /* SHA LSB intialiazation values */
 
 #define SHA1_H0		0x67452301UL
@@ -86,6 +98,42 @@
 #define SHA1_H3		0x10325476UL
 #define SHA1_H4		0xc3d2e1f0UL
 
+#define SHA224_H0	0xc1059ed8UL
+#define SHA224_H1	0x367cd507UL
+#define SHA224_H2	0x3070dd17UL
+#define SHA224_H3	0xf70e5939UL
+#define SHA224_H4	0xffc00b31UL
+#define SHA224_H5	0x68581511UL
+#define SHA224_H6	0x64f98fa7UL
+#define SHA224_H7	0xbefa4fa4UL
+
+#define SHA256_H0	0x6a09e667UL
+#define SHA256_H1	0xbb67ae85UL
+#define SHA256_H2	0x3c6ef372UL
+#define SHA256_H3	0xa54ff53aUL
+#define SHA256_H4	0x510e527fUL
+#define SHA256_H5	0x9b05688cUL
+#define SHA256_H6	0x1f83d9abUL
+#define SHA256_H7	0x5be0cd19UL
+
+#define SHA384_H0	0xcbbb9d5dc1059ed8ULL
+#define SHA384_H1	0x629a292a367cd507ULL
+#define SHA384_H2	0x9159015a3070dd17ULL
+#define SHA384_H3	0x152fecd8f70e5939ULL
+#define SHA384_H4	0x67332667ffc00b31ULL
+#define SHA384_H5	0x8eb44a8768581511ULL
+#define SHA384_H6	0xdb0c2e0d64f98fa7ULL
+#define SHA384_H7	0x47b5481dbefa4fa4ULL
+
+#define SHA512_H0	0x6a09e667f3bcc908ULL
+#define SHA512_H1	0xbb67ae8584caa73bULL
+#define SHA512_H2	0x3c6ef372fe94f82bULL
+#define SHA512_H3	0xa54ff53a5f1d36f1ULL
+#define SHA512_H4	0x510e527fade682d1ULL
+#define SHA512_H5	0x9b05688c2b3e6c1fULL
+#define SHA512_H6	0x1f83d9abfb41bd6bULL
+#define SHA512_H7	0x5be0cd19137e2179ULL
+
 /**
  * CCP supported AES modes
  */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 6adef1c..ab6199f 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -81,6 +81,174 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA224 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA224,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA224 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA256 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA256,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA384 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA384,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 1,
+					 .max = 128,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA512  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA512,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 1,
+					 .max = 128,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/*AES-CMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v3 16/19] crypto/ccp: support sha3 family authentication algo
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
                       ` (13 preceding siblings ...)
  2018-01-10  9:42     ` [PATCH v3 15/19] crypto/ccp: support sha2 family " Ravi Kumar
@ 2018-01-10  9:42     ` Ravi Kumar
  2018-01-10  9:42     ` [PATCH v3 17/19] crypto/ccp: support cpu based md5 and sha2 " Ravi Kumar
                       ` (4 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-10  9:42 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c       | 667 +++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_crypto.h       |  22 ++
 drivers/crypto/ccp/ccp_pmd_ops.c      | 168 +++++++++
 lib/librte_cryptodev/rte_crypto_sym.h |  17 +
 4 files changed, 873 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 31353ed..1290cdd 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -89,6 +89,74 @@ uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
 	SHA512_H1, SHA512_H0,
 };
 
+#if defined(_MSC_VER)
+#define SHA3_CONST(x) x
+#else
+#define SHA3_CONST(x) x##L
+#endif
+
+/** 'Words' here refers to uint64_t */
+#define SHA3_KECCAK_SPONGE_WORDS \
+	(((1600) / 8) / sizeof(uint64_t))
+typedef struct sha3_context_ {
+	uint64_t saved;
+	/**
+	 * The portion of the input message that we
+	 * didn't consume yet
+	 */
+	union {
+		uint64_t s[SHA3_KECCAK_SPONGE_WORDS];
+		/* Keccak's state */
+		uint8_t sb[SHA3_KECCAK_SPONGE_WORDS * 8];
+		/**total 200 ctx size**/
+	};
+	unsigned int byteIndex;
+	/**
+	 * 0..7--the next byte after the set one
+	 * (starts from 0; 0--none are buffered)
+	 */
+	unsigned int wordIndex;
+	/**
+	 * 0..24--the next word to integrate input
+	 * (starts from 0)
+	 */
+	unsigned int capacityWords;
+	/**
+	 * the double size of the hash output in
+	 * words (e.g. 16 for Keccak 512)
+	 */
+} sha3_context;
+
+#ifndef SHA3_ROTL64
+#define SHA3_ROTL64(x, y) \
+	(((x) << (y)) | ((x) >> ((sizeof(uint64_t)*8) - (y))))
+#endif
+
+static const uint64_t keccakf_rndc[24] = {
+	SHA3_CONST(0x0000000000000001UL), SHA3_CONST(0x0000000000008082UL),
+	SHA3_CONST(0x800000000000808aUL), SHA3_CONST(0x8000000080008000UL),
+	SHA3_CONST(0x000000000000808bUL), SHA3_CONST(0x0000000080000001UL),
+	SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008009UL),
+	SHA3_CONST(0x000000000000008aUL), SHA3_CONST(0x0000000000000088UL),
+	SHA3_CONST(0x0000000080008009UL), SHA3_CONST(0x000000008000000aUL),
+	SHA3_CONST(0x000000008000808bUL), SHA3_CONST(0x800000000000008bUL),
+	SHA3_CONST(0x8000000000008089UL), SHA3_CONST(0x8000000000008003UL),
+	SHA3_CONST(0x8000000000008002UL), SHA3_CONST(0x8000000000000080UL),
+	SHA3_CONST(0x000000000000800aUL), SHA3_CONST(0x800000008000000aUL),
+	SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008080UL),
+	SHA3_CONST(0x0000000080000001UL), SHA3_CONST(0x8000000080008008UL)
+};
+
+static const unsigned int keccakf_rotc[24] = {
+	1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62,
+	18, 39, 61, 20, 44
+};
+
+static const unsigned int keccakf_piln[24] = {
+	10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20,
+	14, 22, 9, 6, 1
+};
+
 static enum ccp_cmd_order
 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 {
@@ -173,6 +241,223 @@ static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
+static void
+keccakf(uint64_t s[25])
+{
+	int i, j, round;
+	uint64_t t, bc[5];
+#define KECCAK_ROUNDS 24
+
+	for (round = 0; round < KECCAK_ROUNDS; round++) {
+
+		/* Theta */
+		for (i = 0; i < 5; i++)
+			bc[i] = s[i] ^ s[i + 5] ^ s[i + 10] ^ s[i + 15] ^
+				s[i + 20];
+
+		for (i = 0; i < 5; i++) {
+			t = bc[(i + 4) % 5] ^ SHA3_ROTL64(bc[(i + 1) % 5], 1);
+			for (j = 0; j < 25; j += 5)
+				s[j + i] ^= t;
+		}
+
+		/* Rho Pi */
+		t = s[1];
+		for (i = 0; i < 24; i++) {
+			j = keccakf_piln[i];
+			bc[0] = s[j];
+			s[j] = SHA3_ROTL64(t, keccakf_rotc[i]);
+			t = bc[0];
+		}
+
+		/* Chi */
+		for (j = 0; j < 25; j += 5) {
+			for (i = 0; i < 5; i++)
+				bc[i] = s[j + i];
+			for (i = 0; i < 5; i++)
+				s[j + i] ^= (~bc[(i + 1) % 5]) &
+					    bc[(i + 2) % 5];
+		}
+
+		/* Iota */
+		s[0] ^= keccakf_rndc[round];
+	}
+}
+
+static void
+sha3_Init224(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 224 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init256(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 256 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init384(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 384 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init512(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 512 / (8 * sizeof(uint64_t));
+}
+
+
+/* This is simply the 'update' with the padding block.
+ * The padding block is 0x01 || 0x00* || 0x80. First 0x01 and last 0x80
+ * bytes are always present, but they can be the same byte.
+ */
+static void
+sha3_Update(void *priv, void const *bufIn, size_t len)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+	unsigned int old_tail = (8 - ctx->byteIndex) & 7;
+	size_t words;
+	unsigned int tail;
+	size_t i;
+	const uint8_t *buf = bufIn;
+
+	if (len < old_tail) {
+		while (len--)
+			ctx->saved |= (uint64_t) (*(buf++)) <<
+				      ((ctx->byteIndex++) * 8);
+		return;
+	}
+
+	if (old_tail) {
+		len -= old_tail;
+		while (old_tail--)
+			ctx->saved |= (uint64_t) (*(buf++)) <<
+				      ((ctx->byteIndex++) * 8);
+
+		ctx->s[ctx->wordIndex] ^= ctx->saved;
+		ctx->byteIndex = 0;
+		ctx->saved = 0;
+		if (++ctx->wordIndex ==
+		   (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+			keccakf(ctx->s);
+			ctx->wordIndex = 0;
+		}
+	}
+
+	words = len / sizeof(uint64_t);
+	tail = len - words * sizeof(uint64_t);
+
+	for (i = 0; i < words; i++, buf += sizeof(uint64_t)) {
+		const uint64_t t = (uint64_t) (buf[0]) |
+			((uint64_t) (buf[1]) << 8 * 1) |
+			((uint64_t) (buf[2]) << 8 * 2) |
+			((uint64_t) (buf[3]) << 8 * 3) |
+			((uint64_t) (buf[4]) << 8 * 4) |
+			((uint64_t) (buf[5]) << 8 * 5) |
+			((uint64_t) (buf[6]) << 8 * 6) |
+			((uint64_t) (buf[7]) << 8 * 7);
+		ctx->s[ctx->wordIndex] ^= t;
+		if (++ctx->wordIndex ==
+		   (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+			keccakf(ctx->s);
+			ctx->wordIndex = 0;
+		}
+	}
+
+	while (tail--)
+		ctx->saved |= (uint64_t) (*(buf++)) << ((ctx->byteIndex++) * 8);
+}
+
+int partial_hash_sha3_224(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init224(ctx);
+	sha3_Update(ctx, data_in, SHA3_224_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
+int partial_hash_sha3_256(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init256(ctx);
+	sha3_Update(ctx, data_in, SHA3_256_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
+int partial_hash_sha3_384(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init384(ctx);
+	sha3_Update(ctx, data_in, SHA3_384_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
+int partial_hash_sha3_512(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init512(ctx);
+	sha3_Update(ctx, data_in, SHA3_512_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
 static int generate_partial_hash(struct ccp_session *sess)
 {
 
@@ -182,6 +467,7 @@ static int generate_partial_hash(struct ccp_session *sess)
 	uint32_t *hash_value_be32, hash_temp32[8];
 	uint64_t *hash_value_be64, hash_temp64[8];
 	int i, count;
+	uint8_t *hash_value_sha3;
 
 	opad_t = ipad_t = (uint8_t *)sess->auth.key;
 
@@ -225,6 +511,16 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be32++)
 			*hash_value_be32 = hash_temp32[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA3_224_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_224(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha3_224(opad, hash_value_sha3))
+			return -1;
+		return 0;
 	case CCP_AUTH_ALGO_SHA256_HMAC:
 		count = SHA256_DIGEST_SIZE >> 2;
 
@@ -240,6 +536,16 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be32++)
 			*hash_value_be32 = hash_temp32[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA3_256_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_256(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					      + sess->auth.ctx_len);
+		if (partial_hash_sha3_256(opad, hash_value_sha3))
+			return -1;
+		return 0;
 	case CCP_AUTH_ALGO_SHA384_HMAC:
 		count = SHA512_DIGEST_SIZE >> 3;
 
@@ -255,6 +561,16 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be64++)
 			*hash_value_be64 = hash_temp64[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA3_384_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_384(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					      + sess->auth.ctx_len);
+		if (partial_hash_sha3_384(opad, hash_value_sha3))
+			return -1;
+		return 0;
 	case CCP_AUTH_ALGO_SHA512_HMAC:
 		count = SHA512_DIGEST_SIZE >> 3;
 
@@ -270,6 +586,16 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be64++)
 			*hash_value_be64 = hash_temp64[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA3_512_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_512(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					      + sess->auth.ctx_len);
+		if (partial_hash_sha3_512(opad, hash_value_sha3))
+			return -1;
+		return 0;
 	default:
 		CCP_LOG_ERR("Invalid auth algo");
 		return -1;
@@ -510,6 +836,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
+	case RTE_CRYPTO_AUTH_SHA3_224:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_224;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_224_HMAC:
+		if (auth_xform->key.length > SHA3_224_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_224_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_224_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_SHA256:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA256;
 		sess->auth.engine = CCP_ENGINE_SHA;
@@ -535,6 +885,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
+	case RTE_CRYPTO_AUTH_SHA3_256:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_256;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_256_HMAC:
+		if (auth_xform->key.length > SHA3_256_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_256_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_256_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_SHA384:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA384;
 		sess->auth.engine = CCP_ENGINE_SHA;
@@ -560,6 +934,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
+	case RTE_CRYPTO_AUTH_SHA3_384:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_384;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_384_HMAC:
+		if (auth_xform->key.length > SHA3_384_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_384_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_384_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_SHA512:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA512;
 		sess->auth.engine = CCP_ENGINE_SHA;
@@ -585,7 +983,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
-
+	case RTE_CRYPTO_AUTH_SHA3_512:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_512;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_512_HMAC:
+		if (auth_xform->key.length > SHA3_512_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_512_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_512_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_AES_CMAC:
 		sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
 		sess->auth.engine = CCP_ENGINE_AES;
@@ -806,6 +1227,26 @@ ccp_auth_slot(struct ccp_session *session)
 		 * 6. Retrieve HMAC output from LSB to host memory
 		 */
 		break;
+	case CCP_AUTH_ALGO_SHA3_224:
+	case CCP_AUTH_ALGO_SHA3_256:
+	case CCP_AUTH_ALGO_SHA3_384:
+	case CCP_AUTH_ALGO_SHA3_512:
+		count = 1;
+		/**< only op ctx and dst in host memory*/
+		break;
+	case CCP_AUTH_ALGO_SHA3_224_HMAC:
+	case CCP_AUTH_ALGO_SHA3_256_HMAC:
+		count = 3;
+		break;
+	case CCP_AUTH_ALGO_SHA3_384_HMAC:
+	case CCP_AUTH_ALGO_SHA3_512_HMAC:
+		count = 4;
+		/**
+		 * 1. Op to Perform Ihash
+		 * 2. Retrieve result from LSB to host memory
+		 * 3. Perform final hash
+		 */
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		count = 4;
 		/**
@@ -1196,6 +1637,213 @@ ccp_perform_sha(struct rte_crypto_op *op,
 }
 
 static int
+ccp_perform_sha3_hmac(struct rte_crypto_op *op,
+		      struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	struct ccp_passthru pst;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint8_t *append_ptr;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr, ctx_paddr, dest_addr_t;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	if (!append_ptr) {
+		CCP_LOG_ERR("CCP MBUF append failed\n");
+		return -1;
+	}
+	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
+	ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void
+						   *)session->auth.pre_compute);
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	/*desc1 for SHA3-Ihash operation */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = (cmd_q->sb_sha * CCP_SB_BYTES);
+	CCP_CMD_DST_HI(desc) = 0;
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+	CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Intermediate Hash value retrieve */
+	if ((session->auth.ut.sha_type == CCP_SHA3_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA3_TYPE_512)) {
+
+		pst.src_addr =
+			(phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+	} else {
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+	}
+
+	/**sha engine command descriptor for FinalHash*/
+	ctx_paddr += CCP_SHA3_CTX_SIZE;
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	if (session->auth.ut.sha_type == CCP_SHA3_TYPE_224) {
+		dest_addr_t += (CCP_SB_BYTES - SHA224_DIGEST_SIZE);
+		CCP_CMD_LEN(desc) = SHA224_DIGEST_SIZE;
+	} else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_256) {
+		CCP_CMD_LEN(desc) = SHA256_DIGEST_SIZE;
+	} else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_384) {
+		dest_addr_t += (2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE);
+		CCP_CMD_LEN(desc) = SHA384_DIGEST_SIZE;
+	} else {
+		CCP_CMD_LEN(desc) = SHA512_DIGEST_SIZE;
+	}
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)dest_addr_t);
+	CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = (uint32_t)dest_addr;
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+	CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
+ccp_perform_sha3(struct rte_crypto_op *op,
+		 struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint8_t *ctx_addr, *append_ptr;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr, ctx_paddr;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	if (!append_ptr) {
+		CCP_LOG_ERR("CCP MBUF append failed\n");
+		return -1;
+	}
+	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	ctx_addr = session->auth.sha3_ctx;
+	ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	/* prepare desc for SHA3 operation */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+	CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
 ccp_perform_aes_cmac(struct rte_crypto_op *op,
 		     struct ccp_queue *cmd_q)
 {
@@ -1785,6 +2433,23 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 		result = ccp_perform_hmac(op, cmd_q);
 		b_info->desccnt += 7;
 		break;
+	case CCP_AUTH_ALGO_SHA3_224:
+	case CCP_AUTH_ALGO_SHA3_256:
+	case CCP_AUTH_ALGO_SHA3_384:
+	case CCP_AUTH_ALGO_SHA3_512:
+		result = ccp_perform_sha3(op, cmd_q);
+		b_info->desccnt += 1;
+		break;
+	case CCP_AUTH_ALGO_SHA3_224_HMAC:
+	case CCP_AUTH_ALGO_SHA3_256_HMAC:
+		result = ccp_perform_sha3_hmac(op, cmd_q);
+		b_info->desccnt += 3;
+		break;
+	case CCP_AUTH_ALGO_SHA3_384_HMAC:
+	case CCP_AUTH_ALGO_SHA3_512_HMAC:
+		result = ccp_perform_sha3_hmac(op, cmd_q);
+		b_info->desccnt += 4;
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		result = ccp_perform_aes_cmac(op, cmd_q);
 		b_info->desccnt += 4;
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index ca1c1a8..8459b71 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -80,15 +80,19 @@
 
 #define SHA224_DIGEST_SIZE      28
 #define SHA224_BLOCK_SIZE       64
+#define SHA3_224_BLOCK_SIZE     144
 
 #define SHA256_DIGEST_SIZE      32
 #define SHA256_BLOCK_SIZE       64
+#define SHA3_256_BLOCK_SIZE     136
 
 #define SHA384_DIGEST_SIZE      48
 #define SHA384_BLOCK_SIZE       128
+#define SHA3_384_BLOCK_SIZE	104
 
 #define SHA512_DIGEST_SIZE      64
 #define SHA512_BLOCK_SIZE       128
+#define SHA3_512_BLOCK_SIZE     72
 
 /* SHA LSB intialiazation values */
 
@@ -386,4 +390,22 @@ int process_ops_to_dequeue(struct ccp_qp *qp,
 			   struct rte_crypto_op **op,
 			   uint16_t nb_ops);
 
+
+/**
+ * Apis for SHA3 partial hash generation
+ * @param data_in buffer pointer on which phash is applied
+ * @param data_out phash result in ccp be format is written
+ */
+int partial_hash_sha3_224(uint8_t *data_in,
+			  uint8_t *data_out);
+
+int partial_hash_sha3_256(uint8_t *data_in,
+			  uint8_t *data_out);
+
+int partial_hash_sha3_384(uint8_t *data_in,
+			  uint8_t *data_out);
+
+int partial_hash_sha3_512(uint8_t *data_in,
+			  uint8_t *data_out);
+
 #endif /* _CCP_CRYPTO_H_ */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index ab6199f..bb59d15 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -123,6 +123,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA3-224 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_224,
+				 .block_size = 144,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-224  HMAC*/
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+				 .block_size = 144,
+				 .key_size = {
+					 .min = 1,
+					 .max = 144,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/* SHA256 */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -165,6 +207,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA3-256 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_256,
+				 .block_size = 136,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-256-HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+				 .block_size = 136,
+				 .key_size = {
+					 .min = 1,
+					 .max = 136,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/* SHA384 */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -207,6 +291,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA3-384 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_384,
+				 .block_size = 104,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-384-HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+				 .block_size = 104,
+				 .key_size = {
+					 .min = 1,
+					 .max = 104,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/* SHA512  */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -249,6 +375,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA3-512  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_512,
+				 .block_size = 72,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-512-HMAC  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+				 .block_size = 72,
+				 .key_size = {
+					 .min = 1,
+					 .max = 72,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			}, }
+		}, }
+	},
 	{	/*AES-CMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index 60797e9..eb5afc5 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -245,6 +245,23 @@ enum rte_crypto_auth_algorithm {
 	RTE_CRYPTO_AUTH_ZUC_EIA3,
 	/**< ZUC algorithm in EIA3 mode */
 
+	RTE_CRYPTO_AUTH_SHA3_224,
+	/**< 224 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+	/**< HMAC using 224 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_256,
+	/**< 256 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+	/**< HMAC using 256 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_384,
+	/**< 384 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+	/**< HMAC using 384 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_512,
+	/**< 512 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+	/**< HMAC using 512 bit SHA3 algorithm. */
+
 	RTE_CRYPTO_AUTH_LIST_END
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v3 17/19] crypto/ccp: support cpu based md5 and sha2 family authentication algo
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
                       ` (14 preceding siblings ...)
  2018-01-10  9:42     ` [PATCH v3 16/19] crypto/ccp: support sha3 " Ravi Kumar
@ 2018-01-10  9:42     ` Ravi Kumar
  2018-01-10  9:42     ` [PATCH v3 18/19] test/crypto: add test for AMD CCP crypto poll mode driver Ravi Kumar
                       ` (3 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-10  9:42 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Auth operations can be performed on CPU without offloading
to CCP if CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH is enabled in
DPDK configuration.

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 config/common_base                   |   1 +
 drivers/crypto/ccp/ccp_crypto.c      | 282 ++++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_crypto.h      |   5 +-
 drivers/crypto/ccp/ccp_pmd_ops.c     |  23 +++
 drivers/crypto/ccp/ccp_pmd_private.h |  10 ++
 5 files changed, 316 insertions(+), 5 deletions(-)

diff --git a/config/common_base b/config/common_base
index 88826c8..2974581 100644
--- a/config/common_base
+++ b/config/common_base
@@ -560,6 +560,7 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
 # Compile PMD for AMD CCP crypto device
 #
 CONFIG_RTE_LIBRTE_PMD_CCP=n
+CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=n
 
 #
 # Compile PMD for Marvell Crypto device
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 1290cdd..f916055 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -53,6 +53,12 @@
 #include "ccp_pci.h"
 #include "ccp_pmd_private.h"
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+#include <openssl/conf.h>
+#include <openssl/err.h>
+#include <openssl/hmac.h>
+#endif
+
 /* SHA initial context values */
 static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
 	SHA1_H4, SHA1_H3,
@@ -786,6 +792,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 	else
 		sess->auth.op = CCP_AUTH_OP_VERIFY;
 	switch (auth_xform->algo) {
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		sess->auth.algo = CCP_AUTH_ALGO_MD5_HMAC;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - MD5_DIGEST_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		sess->auth.block_size = MD5_BLOCK_SIZE;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		break;
+#endif
 	case RTE_CRYPTO_AUTH_SHA1:
 		sess->auth.engine = CCP_ENGINE_SHA;
 		sess->auth.algo = CCP_AUTH_ALGO_SHA1;
@@ -795,6 +812,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
 		break;
 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		sess->auth.block_size = SHA1_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+#else
 		if (auth_xform->key.length > SHA1_BLOCK_SIZE)
 			return -1;
 		sess->auth.engine = CCP_ENGINE_SHA;
@@ -810,6 +838,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   auth_xform->key.length);
 		if (generate_partial_hash(sess))
 			return -1;
+#endif
 		break;
 	case RTE_CRYPTO_AUTH_SHA224:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA224;
@@ -820,6 +849,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
 		break;
 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		sess->auth.block_size = SHA224_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+#else
 		if (auth_xform->key.length > SHA224_BLOCK_SIZE)
 			return -1;
 		sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
@@ -835,6 +875,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   auth_xform->key.length);
 		if (generate_partial_hash(sess))
 			return -1;
+#endif
 		break;
 	case RTE_CRYPTO_AUTH_SHA3_224:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_224;
@@ -869,6 +910,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
 		break;
 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		sess->auth.block_size = SHA256_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+#else
 		if (auth_xform->key.length > SHA256_BLOCK_SIZE)
 			return -1;
 		sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
@@ -884,6 +936,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   auth_xform->key.length);
 		if (generate_partial_hash(sess))
 			return -1;
+#endif
 		break;
 	case RTE_CRYPTO_AUTH_SHA3_256:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_256;
@@ -918,6 +971,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
 		break;
 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		sess->auth.block_size = SHA384_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+#else
 		if (auth_xform->key.length > SHA384_BLOCK_SIZE)
 			return -1;
 		sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
@@ -933,6 +997,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   auth_xform->key.length);
 		if (generate_partial_hash(sess))
 			return -1;
+#endif
 		break;
 	case RTE_CRYPTO_AUTH_SHA3_384:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_384;
@@ -967,6 +1032,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
 		break;
 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		sess->auth.block_size = SHA512_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+#else
 		if (auth_xform->key.length > SHA512_BLOCK_SIZE)
 			return -1;
 		sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
@@ -982,6 +1058,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   auth_xform->key.length);
 		if (generate_partial_hash(sess))
 			return -1;
+#endif
 		break;
 	case RTE_CRYPTO_AUTH_SHA3_512:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_512;
@@ -1012,7 +1089,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.engine = CCP_ENGINE_AES;
 		sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
 		sess->auth.key_length = auth_xform->key.length;
-		/**<padding and hash result*/
+		/* padding and hash result */
 		sess->auth.ctx_len = CCP_SB_BYTES << 1;
 		sess->auth.offset = AES_BLOCK_SIZE;
 		sess->auth.block_size = AES_BLOCK_SIZE;
@@ -1208,14 +1285,22 @@ ccp_auth_slot(struct ccp_session *session)
 		count = 3;
 		/**< op + lsb passthrough cpy to/from*/
 		break;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	case CCP_AUTH_ALGO_MD5_HMAC:
+		break;
+#endif
 	case CCP_AUTH_ALGO_SHA1_HMAC:
 	case CCP_AUTH_ALGO_SHA224_HMAC:
 	case CCP_AUTH_ALGO_SHA256_HMAC:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
 		count = 6;
+#endif
 		break;
 	case CCP_AUTH_ALGO_SHA384_HMAC:
 	case CCP_AUTH_ALGO_SHA512_HMAC:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
 		count = 7;
+#endif
 		/**
 		 * 1. Load PHash1 = H(k ^ ipad); to LSB
 		 * 2. generate IHash = H(hash on meassage with PHash1
@@ -1322,6 +1407,122 @@ ccp_compute_slot_count(struct ccp_session *session)
 	return count;
 }
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+static uint8_t
+algo_select(int sessalgo,
+	    const EVP_MD **algo)
+{
+	int res = 0;
+
+	switch (sessalgo) {
+	case CCP_AUTH_ALGO_MD5_HMAC:
+		*algo = EVP_md5();
+		break;
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+		*algo = EVP_sha1();
+		break;
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+		*algo = EVP_sha224();
+		break;
+	case CCP_AUTH_ALGO_SHA256_HMAC:
+		*algo = EVP_sha256();
+		break;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+		*algo = EVP_sha384();
+		break;
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		*algo = EVP_sha512();
+		break;
+	default:
+		res = -EINVAL;
+		break;
+	}
+	return res;
+}
+
+static int
+process_cpu_auth_hmac(uint8_t *src, uint8_t *dst,
+		      __rte_unused uint8_t *iv,
+		      EVP_PKEY *pkey,
+		      int srclen,
+		      EVP_MD_CTX *ctx,
+		      const EVP_MD *algo,
+		      uint16_t d_len)
+{
+	size_t dstlen;
+	unsigned char temp_dst[64];
+
+	if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
+		goto process_auth_err;
+
+	if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
+		goto process_auth_err;
+
+	if (EVP_DigestSignFinal(ctx, temp_dst, &dstlen) <= 0)
+		goto process_auth_err;
+
+	memcpy(dst, temp_dst, d_len);
+	return 0;
+process_auth_err:
+	CCP_LOG_ERR("Process cpu auth failed");
+	return -EINVAL;
+}
+
+static int cpu_crypto_auth(struct ccp_qp *qp,
+			   struct rte_crypto_op *op,
+			   struct ccp_session *sess,
+			   EVP_MD_CTX *ctx)
+{
+	uint8_t *src, *dst;
+	int srclen, status;
+	struct rte_mbuf *mbuf_src, *mbuf_dst;
+	const EVP_MD *algo = NULL;
+	EVP_PKEY *pkey;
+
+	algo_select(sess->auth.algo, &algo);
+	pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL, sess->auth.key,
+				    sess->auth.key_length);
+	mbuf_src = op->sym->m_src;
+	mbuf_dst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+	srclen = op->sym->auth.data.length;
+	src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+				      op->sym->auth.data.offset);
+
+	if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+		dst = qp->temp_digest;
+	} else {
+		dst = op->sym->auth.digest.data;
+		if (dst == NULL) {
+			dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+						     op->sym->auth.data.offset +
+						     sess->auth.digest_length);
+		}
+	}
+	status = process_cpu_auth_hmac(src, dst, NULL,
+				       pkey, srclen,
+				       ctx,
+				       algo,
+				       sess->auth.digest_length);
+	if (status) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return status;
+	}
+
+	if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+		if (memcmp(dst, op->sym->auth.digest.data,
+			   sess->auth.digest_length) != 0) {
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+		} else {
+			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+		}
+	} else {
+		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	}
+	EVP_PKEY_free(pkey);
+	return 0;
+}
+#endif
+
 static void
 ccp_perform_passthru(struct ccp_passthru *pst,
 		     struct ccp_queue *cmd_q)
@@ -2422,14 +2623,24 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 		result = ccp_perform_sha(op, cmd_q);
 		b_info->desccnt += 3;
 		break;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	case CCP_AUTH_ALGO_MD5_HMAC:
+		break;
+#endif
 	case CCP_AUTH_ALGO_SHA1_HMAC:
 	case CCP_AUTH_ALGO_SHA224_HMAC:
 	case CCP_AUTH_ALGO_SHA256_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		break;
+#endif
 		result = ccp_perform_hmac(op, cmd_q);
 		b_info->desccnt += 6;
 		break;
 	case CCP_AUTH_ALGO_SHA384_HMAC:
 	case CCP_AUTH_ALGO_SHA512_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		break;
+#endif
 		result = ccp_perform_hmac(op, cmd_q);
 		b_info->desccnt += 7;
 		break;
@@ -2493,7 +2704,7 @@ ccp_crypto_aead(struct rte_crypto_op *op,
 }
 
 int
-process_ops_to_enqueue(const struct ccp_qp *qp,
+process_ops_to_enqueue(struct ccp_qp *qp,
 		       struct rte_crypto_op **op,
 		       struct ccp_queue *cmd_q,
 		       uint16_t nb_ops,
@@ -2502,11 +2713,22 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
 	int i, result = 0;
 	struct ccp_batch_info *b_info;
 	struct ccp_session *session;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX *auth_ctx = NULL;
+#endif
 
 	if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
 		CCP_LOG_ERR("batch info allocation failed");
 		return 0;
 	}
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	auth_ctx = EVP_MD_CTX_create();
+	if (unlikely(!auth_ctx)) {
+		CCP_LOG_ERR("Unable to create auth ctx");
+		return 0;
+	}
+	b_info->auth_ctr = 0;
+#endif
 	/* populate batch info necessary for dequeue */
 	b_info->op_idx = 0;
 	b_info->lsb_buf_idx = 0;
@@ -2528,6 +2750,11 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
 			break;
 		case CCP_CMD_AUTH:
 			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			b_info->auth_ctr++;
+			result = cpu_crypto_auth(qp, op[i],
+						 session, auth_ctx);
+#endif
 			break;
 		case CCP_CMD_CIPHER_HASH:
 			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
@@ -2537,6 +2764,12 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
 			break;
 		case CCP_CMD_HASH_CIPHER:
 			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			result = cpu_crypto_auth(qp, op[i],
+						 session, auth_ctx);
+			if (op[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
+				continue;
+#endif
 			if (result)
 				break;
 			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
@@ -2570,6 +2803,9 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
 
 	rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX_destroy(auth_ctx);
+#endif
 	return i;
 }
 
@@ -2638,13 +2874,27 @@ static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
 }
 
 static int
-ccp_prepare_ops(struct rte_crypto_op **op_d,
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+ccp_prepare_ops(struct ccp_qp *qp,
+#else
+ccp_prepare_ops(struct ccp_qp *qp __rte_unused,
+#endif
+		struct rte_crypto_op **op_d,
 		struct ccp_batch_info *b_info,
 		uint16_t nb_ops)
 {
 	int i, min_ops;
 	struct ccp_session *session;
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX *auth_ctx = NULL;
+
+	auth_ctx = EVP_MD_CTX_create();
+	if (unlikely(!auth_ctx)) {
+		CCP_LOG_ERR("Unable to create auth ctx");
+		return 0;
+	}
+#endif
 	min_ops = RTE_MIN(nb_ops, b_info->opcnt);
 
 	for (i = 0; i < min_ops; i++) {
@@ -2657,8 +2907,25 @@ ccp_prepare_ops(struct rte_crypto_op **op_d,
 			op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 			break;
 		case CCP_CMD_AUTH:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			ccp_auth_dq_prepare(op_d[i]);
+#endif
+			break;
 		case CCP_CMD_CIPHER_HASH:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			cpu_crypto_auth(qp, op_d[i],
+					session, auth_ctx);
+#else
+			ccp_auth_dq_prepare(op_d[i]);
+#endif
+			break;
 		case CCP_CMD_HASH_CIPHER:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#else
+			ccp_auth_dq_prepare(op_d[i]);
+#endif
+			break;
 		case CCP_CMD_COMBINED:
 			ccp_auth_dq_prepare(op_d[i]);
 			break;
@@ -2667,6 +2934,9 @@ ccp_prepare_ops(struct rte_crypto_op **op_d,
 		}
 	}
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX_destroy(auth_ctx);
+#endif
 	b_info->opcnt -= min_ops;
 	return min_ops;
 }
@@ -2686,6 +2956,10 @@ process_ops_to_dequeue(struct ccp_qp *qp,
 	} else if (rte_ring_dequeue(qp->processed_pkts,
 				    (void **)&b_info))
 		return 0;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	if (b_info->auth_ctr == b_info->opcnt)
+		goto success;
+#endif
 	cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
 				       CMD_Q_HEAD_LO_BASE);
 
@@ -2705,7 +2979,7 @@ process_ops_to_dequeue(struct ccp_qp *qp,
 
 
 success:
-	nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
+	nb_ops = ccp_prepare_ops(qp, op, b_info, nb_ops);
 	rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
 	b_info->desccnt = 0;
 	if (b_info->opcnt > 0) {
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 8459b71..f526329 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -94,6 +94,9 @@
 #define SHA512_BLOCK_SIZE       128
 #define SHA3_512_BLOCK_SIZE     72
 
+/* Maximum length for digest */
+#define DIGEST_LENGTH_MAX	64
+
 /* SHA LSB intialiazation values */
 
 #define SHA1_H0		0x67452301UL
@@ -372,7 +375,7 @@ int ccp_compute_slot_count(struct ccp_session *session);
  * @param nb_ops No. of ops to be submitted
  * @return 0 on success otherwise -1
  */
-int process_ops_to_enqueue(const struct ccp_qp *qp,
+int process_ops_to_enqueue(struct ccp_qp *qp,
 			   struct rte_crypto_op **op,
 			   struct ccp_queue *cmd_q,
 			   uint16_t nb_ops,
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index bb59d15..1b67070 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -39,6 +39,29 @@
 #include "ccp_crypto.h"
 
 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 16,
+					 .max = 16,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			}, }
+		}, }
+	},
+#endif
 	{	/* SHA1 */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
index d2283e8..9136f61 100644
--- a/drivers/crypto/ccp/ccp_pmd_private.h
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -32,6 +32,7 @@
 #define _CCP_PMD_PRIVATE_H_
 
 #include <rte_cryptodev.h>
+#include "ccp_crypto.h"
 
 #define CRYPTODEV_NAME_CCP_PMD crypto_ccp
 
@@ -87,6 +88,10 @@ struct ccp_batch_info {
 	phys_addr_t lsb_buf_phys;
 	/**< LSB intermediate buf for passthru */
 	int lsb_buf_idx;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	uint16_t auth_ctr;
+	/**< auth only ops batch */
+#endif
 } __rte_cache_aligned;
 
 /**< CCP crypto queue pair */
@@ -107,6 +112,11 @@ struct ccp_qp {
 	/**< Store ops pulled out of queue */
 	struct rte_cryptodev *dev;
 	/**< rte crypto device to which this qp belongs */
+	uint8_t temp_digest[DIGEST_LENGTH_MAX];
+	/**< Buffer used to store the digest generated
+	 * by the driver when verifying a digest provided
+	 * by the user (using authentication verify operation)
+	 */
 } __rte_cache_aligned;
 
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v3 18/19] test/crypto: add test for AMD CCP crypto poll mode driver
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
                       ` (15 preceding siblings ...)
  2018-01-10  9:42     ` [PATCH v3 17/19] crypto/ccp: support cpu based md5 and sha2 " Ravi Kumar
@ 2018-01-10  9:42     ` Ravi Kumar
  2018-01-10  9:42     ` [PATCH v3 19/19] doc: add document " Ravi Kumar
                       ` (2 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-10  9:42 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 test/test/test_cryptodev.c                   | 161 +++++++++++++++++++++++++++
 test/test/test_cryptodev.h                   |   1 +
 test/test/test_cryptodev_aes_test_vectors.h  |  93 ++++++++++------
 test/test/test_cryptodev_blockcipher.c       |   9 +-
 test/test/test_cryptodev_blockcipher.h       |   1 +
 test/test/test_cryptodev_des_test_vectors.h  |  42 ++++---
 test/test/test_cryptodev_hash_test_vectors.h |  60 ++++++----
 7 files changed, 301 insertions(+), 66 deletions(-)

diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index 4d7e5b1..17df676 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -336,6 +336,23 @@ testsuite_setup(void)
 		}
 	}
 
+	/* Create an CCP device if required */
+	if (gbl_driver_id == rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD))) {
+		nb_devs = rte_cryptodev_device_count_by_driver(
+				rte_cryptodev_driver_id_get(
+				RTE_STR(CRYPTODEV_NAME_CCP_PMD)));
+		if (nb_devs < 1) {
+			ret = rte_vdev_init(
+				RTE_STR(CRYPTODEV_NAME_CCP_PMD),
+				NULL);
+
+			TEST_ASSERT(ret == 0, "Failed to create "
+				"instance of pmd : %s",
+				RTE_STR(CRYPTODEV_NAME_CCP_PMD));
+		}
+	}
+
 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
 	if (gbl_driver_id == rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD))) {
@@ -1725,6 +1742,44 @@ test_AES_cipheronly_openssl_all(void)
 }
 
 static int
+test_AES_chain_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_AES_CHAIN_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_AES_cipheronly_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_AES_CIPHERONLY_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
 test_AES_chain_qat_all(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -1896,6 +1951,25 @@ test_authonly_openssl_all(void)
 }
 
 static int
+test_authonly_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_AUTHONLY_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
 test_AES_chain_armv8_all(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -4971,6 +5045,44 @@ test_3DES_cipheronly_dpaa2_sec_all(void)
 }
 
 static int
+test_3DES_chain_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_3DES_CHAIN_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_3DES_cipheronly_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_3DES_CIPHERONLY_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
 test_3DES_cipheronly_qat_all(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -9552,6 +9664,38 @@ static struct unit_test_suite cryptodev_mrvl_testsuite  = {
 	}
 };
 
+static struct unit_test_suite cryptodev_ccp_testsuite  = {
+	.suite_name = "Crypto Device CCP Unit Test Suite",
+	.setup = testsuite_setup,
+	.teardown = testsuite_teardown,
+	.unit_test_cases = {
+		TEST_CASE_ST(ut_setup, ut_teardown, test_multi_session),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_multi_session_random_usage),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_AES_chain_ccp_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_AES_cipheronly_ccp_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_3DES_chain_ccp_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_3DES_cipheronly_ccp_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_authonly_ccp_all),
+
+		/** Negative tests */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			authentication_verify_HMAC_SHA1_fail_data_corrupt),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			authentication_verify_HMAC_SHA1_fail_tag_corrupt),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			auth_decryption_AES128CBC_HMAC_SHA1_fail_data_corrupt),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			auth_decryption_AES128CBC_HMAC_SHA1_fail_tag_corrupt),
+
+		TEST_CASES_END() /**< NULL terminate unit test array */
+	}
+};
 
 static int
 test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
@@ -9773,6 +9917,22 @@ test_cryptodev_dpaa_sec(void /*argv __rte_unused, int argc __rte_unused*/)
 	return unit_test_suite_runner(&cryptodev_dpaa_sec_testsuite);
 }
 
+static int
+test_cryptodev_ccp(void)
+{
+	gbl_driver_id = rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD));
+
+	if (gbl_driver_id == -1) {
+		RTE_LOG(ERR, USER1, "CCP PMD must be loaded. Check if "
+				"CONFIG_RTE_LIBRTE_PMD_CCP is enabled "
+				"in config file to run this testsuite.\n");
+		return TEST_FAILED;
+	}
+
+	return unit_test_suite_runner(&cryptodev_ccp_testsuite);
+}
+
 REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
 REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb);
 REGISTER_TEST_COMMAND(cryptodev_openssl_autotest, test_cryptodev_openssl);
@@ -9785,3 +9945,4 @@ REGISTER_TEST_COMMAND(cryptodev_sw_armv8_autotest, test_cryptodev_armv8);
 REGISTER_TEST_COMMAND(cryptodev_sw_mrvl_autotest, test_cryptodev_mrvl);
 REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_autotest, test_cryptodev_dpaa2_sec);
 REGISTER_TEST_COMMAND(cryptodev_dpaa_sec_autotest, test_cryptodev_dpaa_sec);
+REGISTER_TEST_COMMAND(cryptodev_ccp_autotest, test_cryptodev_ccp);
diff --git a/test/test/test_cryptodev.h b/test/test/test_cryptodev.h
index 8cdc087..d45fb7b 100644
--- a/test/test/test_cryptodev.h
+++ b/test/test/test_cryptodev.h
@@ -61,6 +61,7 @@
 #define CRYPTODEV_NAME_DPAA2_SEC_PMD	crypto_dpaa2_sec
 #define CRYPTODEV_NAME_SCHEDULER_PMD	crypto_scheduler
 #define CRYPTODEV_NAME_MRVL_PMD		crypto_mrvl
+#define CRYPTODEV_NAME_CCP_PMD		crypto_ccp
 
 /**
  * Write (spread) data from buffer to mbuf data
diff --git a/test/test/test_cryptodev_aes_test_vectors.h b/test/test/test_cryptodev_aes_test_vectors.h
index 3fc3c2b..50b9943 100644
--- a/test/test/test_cryptodev_aes_test_vectors.h
+++ b/test/test/test_cryptodev_aes_test_vectors.h
@@ -1171,7 +1171,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CTR HMAC-SHA1 Decryption Digest "
@@ -1184,7 +1185,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CTR XCBC Encryption Digest",
@@ -1223,7 +1225,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CTR HMAC-SHA1 Decryption Digest "
@@ -1236,7 +1239,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest",
@@ -1249,7 +1253,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1283,7 +1288,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
@@ -1304,7 +1310,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA256 Encryption Digest "
@@ -1326,7 +1333,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA256 Decryption Digest "
@@ -1346,7 +1354,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
@@ -1355,7 +1364,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
@@ -1379,7 +1389,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA512 Decryption Digest "
@@ -1442,7 +1453,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA224 Decryption Digest "
@@ -1454,7 +1466,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA384 Encryption Digest",
@@ -1466,7 +1479,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA384 Decryption Digest "
@@ -1479,7 +1493,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1488,7 +1503,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
-			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr =
@@ -1498,7 +1514,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
-			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 };
 
@@ -1513,7 +1530,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC Decryption",
@@ -1525,7 +1543,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CBC Encryption",
@@ -1536,7 +1555,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CBC Encryption Scater gather",
@@ -1555,7 +1575,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CBC Encryption",
@@ -1567,7 +1588,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CBC Decryption",
@@ -1579,7 +1601,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CBC OOP Encryption",
@@ -1589,7 +1612,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CBC OOP Decryption",
@@ -1599,7 +1623,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CTR Encryption",
@@ -1611,7 +1636,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CTR Decryption",
@@ -1623,7 +1649,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CTR Encryption",
@@ -1634,7 +1661,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CTR Decryption",
@@ -1645,7 +1673,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CTR Encryption",
@@ -1657,7 +1686,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CTR Decryption",
@@ -1669,7 +1699,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CTR Encryption (12-byte IV)",
diff --git a/test/test/test_cryptodev_blockcipher.c b/test/test/test_cryptodev_blockcipher.c
index c682e4d..38cc5d3 100644
--- a/test/test/test_cryptodev_blockcipher.c
+++ b/test/test/test_cryptodev_blockcipher.c
@@ -54,6 +54,8 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
 
 	int openssl_pmd = rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+	int ccp_pmd = rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD));
 	int scheduler_pmd = rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD));
 	int armv8_pmd = rte_cryptodev_driver_id_get(
@@ -94,7 +96,8 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
 			driver_id == qat_pmd ||
 			driver_id == openssl_pmd ||
 			driver_id == armv8_pmd ||
-			driver_id == mrvl_pmd) { /* Fall through */
+			driver_id == mrvl_pmd ||
+			driver_id == ccp_pmd) { /* Fall through */
 		digest_len = tdata->digest.len;
 	} else if (driver_id == aesni_mb_pmd ||
 			driver_id == scheduler_pmd) {
@@ -555,6 +558,8 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
 
 	int openssl_pmd = rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+	int ccp_pmd = rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD));
 	int dpaa2_sec_pmd = rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD));
 	int dpaa_sec_pmd = rte_cryptodev_driver_id_get(
@@ -627,6 +632,8 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
 		target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER;
 	else if (driver_id == dpaa2_sec_pmd)
 		target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC;
+	else if (driver_id == ccp_pmd)
+		target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_CCP;
 	else if (driver_id == dpaa_sec_pmd)
 		target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC;
 	else if (driver_id == mrvl_pmd)
diff --git a/test/test/test_cryptodev_blockcipher.h b/test/test/test_cryptodev_blockcipher.h
index edbdaab..93ef0ae 100644
--- a/test/test/test_cryptodev_blockcipher.h
+++ b/test/test/test_cryptodev_blockcipher.h
@@ -27,6 +27,7 @@
 #define BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC	0x0020 /* DPAA2_SEC flag */
 #define BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC	0x0040 /* DPAA_SEC flag */
 #define BLOCKCIPHER_TEST_TARGET_PMD_MRVL	0x0080 /* Marvell flag */
+#define BLOCKCIPHER_TEST_TARGET_PMD_CCP		0x0040 /* CCP flag */
 
 #define BLOCKCIPHER_TEST_OP_CIPHER	(BLOCKCIPHER_TEST_OP_ENCRYPT | \
 					BLOCKCIPHER_TEST_OP_DECRYPT)
diff --git a/test/test/test_cryptodev_des_test_vectors.h b/test/test/test_cryptodev_des_test_vectors.h
index 0be809e..a30317c 100644
--- a/test/test/test_cryptodev_des_test_vectors.h
+++ b/test/test/test_cryptodev_des_test_vectors.h
@@ -1044,7 +1044,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CBC HMAC-SHA1 Decryption Digest Verify",
@@ -1053,19 +1054,22 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CBC SHA1 Encryption Digest",
 		.test_data = &triple_des128cbc_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CBC SHA1 Decryption Digest Verify",
 		.test_data = &triple_des128cbc_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC HMAC-SHA1 Encryption Digest",
@@ -1075,7 +1079,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC HMAC-SHA1 Decryption Digest Verify",
@@ -1085,21 +1090,24 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC SHA1 Encryption Digest",
 		.test_data = &triple_des192cbc_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC SHA1 Decryption Digest Verify",
 		.test_data = &triple_des192cbc_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CTR HMAC-SHA1 Encryption Digest",
@@ -1180,7 +1188,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.test_data = &triple_des128cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr =
@@ -1189,7 +1198,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.test_data = &triple_des128cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 };
 
@@ -1201,7 +1211,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CBC Decryption",
@@ -1210,7 +1221,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC Encryption",
@@ -1220,7 +1232,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC Decryption",
@@ -1230,7 +1243,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CTR Encryption",
diff --git a/test/test/test_cryptodev_hash_test_vectors.h b/test/test/test_cryptodev_hash_test_vectors.h
index 2215b86..807954d 100644
--- a/test/test/test_cryptodev_hash_test_vectors.h
+++ b/test/test/test_cryptodev_hash_test_vectors.h
@@ -358,13 +358,15 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.test_descr = "SHA1 Digest",
 		.test_data = &sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA1 Digest Verify",
 		.test_data = &sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA1 Digest",
@@ -375,7 +377,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA1 Digest Verify",
@@ -386,19 +389,22 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA224 Digest",
 		.test_data = &sha224_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA224 Digest Verify",
 		.test_data = &sha224_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA224 Digest",
@@ -409,7 +415,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA224 Digest Verify",
@@ -420,19 +427,22 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA256 Digest",
 		.test_data = &sha256_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA256 Digest Verify",
 		.test_data = &sha256_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA256 Digest",
@@ -443,7 +453,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA256 Digest Verify",
@@ -454,19 +465,22 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA384 Digest",
 		.test_data = &sha384_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA384 Digest Verify",
 		.test_data = &sha384_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA384 Digest",
@@ -477,7 +491,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA384 Digest Verify",
@@ -488,19 +503,22 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA512 Digest",
 		.test_data = &sha512_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA512 Digest Verify",
 		.test_data = &sha512_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA512 Digest",
@@ -511,7 +529,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA512 Digest Verify",
@@ -522,7 +541,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v3 19/19] doc: add document for AMD CCP crypto poll mode driver
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
                       ` (16 preceding siblings ...)
  2018-01-10  9:42     ` [PATCH v3 18/19] test/crypto: add test for AMD CCP crypto poll mode driver Ravi Kumar
@ 2018-01-10  9:42     ` Ravi Kumar
  2018-01-10  9:53     ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD De Lara Guarch, Pablo
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-01-10  9:42 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset=n, Size: 7869 bytes --]

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 doc/guides/cryptodevs/ccp.rst              | 127 +++++++++++++++++++++++++++++
 doc/guides/cryptodevs/features/ccp.ini     |  57 +++++++++++++
 doc/guides/cryptodevs/features/default.ini |  12 +++
 doc/guides/cryptodevs/index.rst            |   1 +
 4 files changed, 197 insertions(+)
 create mode 100644 doc/guides/cryptodevs/ccp.rst
 create mode 100644 doc/guides/cryptodevs/features/ccp.ini

diff --git a/doc/guides/cryptodevs/ccp.rst b/doc/guides/cryptodevs/ccp.rst
new file mode 100644
index 0000000..59b9281
--- /dev/null
+++ b/doc/guides/cryptodevs/ccp.rst
@@ -0,0 +1,127 @@
+.. Copyright(c) 2017 Advanced Micro Devices, Inc.
+   All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions
+   are met:
+
+   * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+   * Neither the name of the copyright holder nor the names of its
+   contributors may be used to endorse or promote products derived from
+   this software without specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+AMD CCP Poll Mode Driver
+========================
+
+This code provides the initial implementation of the ccp poll mode driver.
+The CCP poll mode driver library (librte_pmd_ccp) implements support for
+AMD’s cryptographic co-processor (CCP). The CCP PMD is a virtual crypto
+poll mode driver which schedules crypto operations to one or more available
+CCP hardware engines on the platform. The CCP PMD provides poll mode crypto
+driver support for the following hardware accelerator devices::
+
+	AMD Cryptographic Co-processor (0x1456)
+	AMD Cryptographic Co-processor (0x1468)
+
+Features
+--------
+
+CCP crypto PMD has support for:
+
+Cipher algorithms:
+
+* ``RTE_CRYPTO_CIPHER_AES_CBC``
+* ``RTE_CRYPTO_CIPHER_AES_ECB``
+* ``RTE_CRYPTO_CIPHER_AES_CTR``
+* ``RTE_CRYPTO_CIPHER_3DES_CBC``
+
+Hash algorithms:
+
+* ``RTE_CRYPTO_AUTH_SHA1``
+* ``RTE_CRYPTO_AUTH_SHA1_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA224``
+* ``RTE_CRYPTO_AUTH_SHA224_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA256``
+* ``RTE_CRYPTO_AUTH_SHA256_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA384``
+* ``RTE_CRYPTO_AUTH_SHA384_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA512``
+* ``RTE_CRYPTO_AUTH_SHA512_HMAC``
+* ``RTE_CRYPTO_AUTH_MD5_HMAC``
+* ``RTE_CRYPTO_AUTH_AES_CMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_224``
+* ``RTE_CRYPTO_AUTH_SHA3_224_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_256``
+* ``RTE_CRYPTO_AUTH_SHA3_256_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_384``
+* ``RTE_CRYPTO_AUTH_SHA3_384_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_512``
+* ``RTE_CRYPTO_AUTH_SHA3_512_HMAC``
+
+AEAD algorithms:
+
+* ``RTE_CRYPTO_AEAD_AES_GCM``
+
+Installation
+------------
+
+To compile CCP PMD, it has to be enabled in the config/common_base file.
+* ``CONFIG_RTE_LIBRTE_PMD_CCP=y``
+
+The CCP PMD also supports computing authentication over CPU with cipher offloaded
+to CCP. To enable this feature, enable following in the configuration.
+* ``CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=y``
+
+This code was verified on Ubuntu 16.04.
+
+Initialization
+--------------
+
+Bind the CCP devices to DPDK UIO driver module before running the CCP PMD stack.
+e.g. for the 0x1456 device::
+
+	cd to the top-level DPDK directory
+	modprobe uio
+	insmod ./build/kmod/igb_uio.ko
+	echo "1022 1456" > /sys/bus/pci/drivers/igb_uio/new_id
+
+Another way to bind the CCP devices to DPDK UIO driver is by using the ``dpdk-devbind.py`` script.
+The following command assumes ``BFD`` of ``0000:09:00.2``::
+
+	cd to the top-level DPDK directory
+	./usertools/dpdk-devbind.py -b igb_uio 0000:09:00.2
+
+To verify real traffic l2fwd-crypto example can be used with following command:
+
+.. code-block:: console
+
+	sudo ./build/l2fwd-crypto -l 1 -n 4 --vdev "crypto_ccp" -- -p 0x1
+	--chain CIPHER_HASH --cipher_op ENCRYPT --cipher_algo AES_CBC
+	--cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f
+	--iv 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:ff
+	--auth_op GENERATE --auth_algo SHA1_HMAC
+	--auth_key 11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+	:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+	:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+
+Limitations
+-----------
+
+* Chained mbufs are not supported
+* MD5_HMAC is supported only if ``CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=y`` is enabled in configuration
diff --git a/doc/guides/cryptodevs/features/ccp.ini b/doc/guides/cryptodevs/features/ccp.ini
new file mode 100644
index 0000000..add4bd8
--- /dev/null
+++ b/doc/guides/cryptodevs/features/ccp.ini
@@ -0,0 +1,57 @@
+;
+; Supported features of the 'ccp' crypto poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Symmetric crypto       = Y
+Sym operation chaining = Y
+HW Accelerated         = Y
+
+;
+; Supported crypto algorithms of the 'ccp' crypto driver.
+;
+[Cipher]
+AES CBC (128)  = Y
+AES CBC (192)  = Y
+AES CBC (256)  = Y
+AES ECB (128)  = Y
+AES ECB (192)  = Y
+AES ECB (256)  = Y
+AES CTR (128)  = Y
+AES CTR (192)  = Y
+AES CTR (256)  = Y
+3DES CBC       = Y
+
+;
+; Supported authentication algorithms of the 'ccp' crypto driver.
+;
+[Auth]
+MD5 HMAC       = Y
+SHA1           = Y
+SHA1 HMAC      = Y
+SHA224	       = Y
+SHA224 HMAC    = Y
+SHA256         = Y
+SHA256 HMAC    = Y
+SHA384         = Y
+SHA384 HMAC    = Y
+SHA512         = Y
+SHA512 HMAC    = Y
+AES CMAC       = Y
+SHA3_224       = Y
+SHA3_224 HMAC  = Y
+SHA3_256       = Y
+SHA3_256 HMAC  = Y
+SHA3_384       = Y
+SHA3_384 HMAC  = Y
+SHA3_512       = Y
+SHA3_512 HMAC  = Y
+
+;
+; Supported AEAD algorithms of the 'ccp' crypto driver.
+;
+[AEAD]
+AES GCM (128) = Y
+AES GCM (192) = Y
+AES GCM (256) = Y
diff --git a/doc/guides/cryptodevs/features/default.ini b/doc/guides/cryptodevs/features/default.ini
index 18d66cb..54c90c1 100644
--- a/doc/guides/cryptodevs/features/default.ini
+++ b/doc/guides/cryptodevs/features/default.ini
@@ -27,6 +27,9 @@ NULL           =
 AES CBC (128)  =
 AES CBC (192)  =
 AES CBC (256)  =
+AES ECB (128)  =
+AES ECB (192)  =
+AES ECB (256)  =
 AES CTR (128)  =
 AES CTR (192)  =
 AES CTR (256)  =
@@ -61,6 +64,15 @@ AES GMAC     =
 SNOW3G UIA2  =
 KASUMI F9    =
 ZUC EIA3     =
+AES CMAC     =
+SHA3_224       =
+SHA3_224 HMAC  =
+SHA3_256       =
+SHA3_256 HMAC  =
+SHA3_384       =
+SHA3_384 HMAC  =
+SHA3_512       =
+SHA3_512 HMAC  =
 
 ;
 ; Supported AEAD algorithms of a default crypto driver.
diff --git a/doc/guides/cryptodevs/index.rst b/doc/guides/cryptodevs/index.rst
index 6d4e15b..3202b48 100644
--- a/doc/guides/cryptodevs/index.rst
+++ b/doc/guides/cryptodevs/index.rst
@@ -39,6 +39,7 @@ Crypto Device Drivers
     aesni_mb
     aesni_gcm
     armv8
+    ccp
     dpaa2_sec
     dpaa_sec
     kasumi
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* Re: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
                       ` (17 preceding siblings ...)
  2018-01-10  9:42     ` [PATCH v3 19/19] doc: add document " Ravi Kumar
@ 2018-01-10  9:53     ` De Lara Guarch, Pablo
  2018-01-10 10:29       ` Kumar, Ravi1
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
  19 siblings, 1 reply; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2018-01-10  9:53 UTC (permalink / raw)
  To: Ravi Kumar, dev

Hi Ravi,

> -----Original Message-----
> From: Ravi Kumar [mailto:Ravi1.kumar@amd.com]
> Sent: Wednesday, January 10, 2018 9:43 AM
> To: dev@dpdk.org
> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
> Subject: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
> 
> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
> ---

...

> --- /dev/null
> +++ b/drivers/crypto/ccp/Makefile
> @@ -0,0 +1,55 @@
> +#
> +#   Copyright(c) 2018 Advanced Micro Devices, Inc.
> +#   All rights reserved.
> +#

As Hemant commented, you need to change this full license with SPDX tags:

http://dpdk.org/ml/archives/dev/2018-January/085510.html

Could you submit a v4 with these changes today?

Thanks,
Pablo

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
  2018-01-10  9:53     ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD De Lara Guarch, Pablo
@ 2018-01-10 10:29       ` Kumar, Ravi1
  2018-01-10 13:41         ` De Lara Guarch, Pablo
  0 siblings, 1 reply; 131+ messages in thread
From: Kumar, Ravi1 @ 2018-01-10 10:29 UTC (permalink / raw)
  To: De Lara Guarch, Pablo, dev; +Cc: Shippen, Greg

>Hi Ravi,
>
>> -----Original Message-----
>> From: Ravi Kumar [mailto:Ravi1.kumar@amd.com]
>> Sent: Wednesday, January 10, 2018 9:43 AM
>> To: dev@dpdk.org
>> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
>> Subject: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
>> 
>> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
>> ---
>
>...
>
>> --- /dev/null
>> +++ b/drivers/crypto/ccp/Makefile
>> @@ -0,0 +1,55 @@
>> +#
>> +#   Copyright(c) 2018 Advanced Micro Devices, Inc.
>> +#   All rights reserved.
>> +#
>
>As Hemant commented, you need to change this full license with SPDX tags:
>
>http://dpdk.org/ml/archives/dev/2018-January/085510.html
>
>Could you submit a v4 with these changes today?
>
>Thanks,
>Pablo

Hi Pablo,

Our legal team is still working on the license. We want to get the code reviewed in parallel. 
I will give you and update later if we can upload the v4 patch with SPDX tags today. 

Regards,
Ravi

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
  2018-01-10 10:29       ` Kumar, Ravi1
@ 2018-01-10 13:41         ` De Lara Guarch, Pablo
  2018-01-11  6:36           ` Kumar, Ravi1
  0 siblings, 1 reply; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2018-01-10 13:41 UTC (permalink / raw)
  To: Kumar, Ravi1, dev; +Cc: Shippen, Greg



> -----Original Message-----
> From: Kumar, Ravi1 [mailto:Ravi1.Kumar@amd.com]
> Sent: Wednesday, January 10, 2018 10:30 AM
> To: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;
> dev@dpdk.org
> Cc: Shippen, Greg <Greg.Shippen@amd.com>
> Subject: RE: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
> 
> >Hi Ravi,
> >
> >> -----Original Message-----
> >> From: Ravi Kumar [mailto:Ravi1.kumar@amd.com]
> >> Sent: Wednesday, January 10, 2018 9:43 AM
> >> To: dev@dpdk.org
> >> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
> >> Subject: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
> >>
> >> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
> >> ---
> >
> >...
> >
> >> --- /dev/null
> >> +++ b/drivers/crypto/ccp/Makefile
> >> @@ -0,0 +1,55 @@
> >> +#
> >> +#   Copyright(c) 2018 Advanced Micro Devices, Inc.
> >> +#   All rights reserved.
> >> +#
> >
> >As Hemant commented, you need to change this full license with SPDX
> tags:
> >
> >http://dpdk.org/ml/archives/dev/2018-January/085510.html
> >
> >Could you submit a v4 with these changes today?
> >
> >Thanks,
> >Pablo
> 
> Hi Pablo,
> 
> Our legal team is still working on the license. We want to get the code
> reviewed in parallel.
> I will give you and update later if we can upload the v4 patch with SPDX tags
> today.
> 

Ok, I have looked at the overall patchset and it looks ok to me.
The only thing that I would change is the title of patch 7/19:
I would change it to "crypto/ccp: support sessionless operations".

Regards,
Pablo

> Regards,
> Ravi

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
  2018-01-10 13:41         ` De Lara Guarch, Pablo
@ 2018-01-11  6:36           ` Kumar, Ravi1
  2018-01-16 15:29             ` De Lara Guarch, Pablo
  0 siblings, 1 reply; 131+ messages in thread
From: Kumar, Ravi1 @ 2018-01-11  6:36 UTC (permalink / raw)
  To: De Lara Guarch, Pablo, dev; +Cc: Shippen, Greg

> -----Original Message-----
>> From: Kumar, Ravi1 [mailto:Ravi1.Kumar@amd.com]
>> Sent: Wednesday, January 10, 2018 10:30 AM
>> To: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; 
>> dev@dpdk.org
>> Cc: Shippen, Greg <Greg.Shippen@amd.com>
>> Subject: RE: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
>> 
>> >Hi Ravi,
>> >
>> >> -----Original Message-----
>> >> From: Ravi Kumar [mailto:Ravi1.kumar@amd.com]
>> >> Sent: Wednesday, January 10, 2018 9:43 AM
>> >> To: dev@dpdk.org
>> >> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
>> >> Subject: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
>> >>
>> >> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
>> >> ---
>> >
>> >...
>> >
>> >> --- /dev/null
>> >> +++ b/drivers/crypto/ccp/Makefile
>> >> @@ -0,0 +1,55 @@
>> >> +#
>> >> +#   Copyright(c) 2018 Advanced Micro Devices, Inc.
>> >> +#   All rights reserved.
>> >> +#
>> >
>> >As Hemant commented, you need to change this full license with SPDX
>> tags:
>> >
>> >http://dpdk.org/ml/archives/dev/2018-January/085510.html
>> >
>> >Could you submit a v4 with these changes today?
>> >
>> >Thanks,
>> >Pablo
>> 
>> Hi Pablo,
>> 
>> Our legal team is still working on the license. We want to get the 
>> code reviewed in parallel.
>> I will give you and update later if we can upload the v4 patch with 
>> SPDX tags today.
>> 
>
>Ok, I have looked at the overall patchset and it looks ok to me.
>The only thing that I would change is the title of patch 7/19:
>I would change it to "crypto/ccp: support sessionless operations".
>
>Regards,
>Pablo

Hi Pablo,

Thanks for going through our code. Good to know the code is fine. 

The new changes in licensing policy has introduced some delays as we work through the implications and get agreement from the third party.

We are working on it and will upload the v4 with SPDX license tags as soon as possible. 

Regards,
Ravi
>
>> Regards,
>> Ravi

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
  2018-01-11  6:36           ` Kumar, Ravi1
@ 2018-01-16 15:29             ` De Lara Guarch, Pablo
  2018-01-17  9:08               ` Kumar, Ravi1
  0 siblings, 1 reply; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2018-01-16 15:29 UTC (permalink / raw)
  To: Kumar, Ravi1, dev; +Cc: Shippen, Greg

Hi Ravi,

> -----Original Message-----
> From: Kumar, Ravi1 [mailto:Ravi1.Kumar@amd.com]
> Sent: Thursday, January 11, 2018 6:37 AM
> To: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;
> dev@dpdk.org
> Cc: Shippen, Greg <Greg.Shippen@amd.com>
> Subject: RE: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
> 
> > -----Original Message-----
> >> From: Kumar, Ravi1 [mailto:Ravi1.Kumar@amd.com]
> >> Sent: Wednesday, January 10, 2018 10:30 AM
> >> To: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;
> >> dev@dpdk.org
> >> Cc: Shippen, Greg <Greg.Shippen@amd.com>
> >> Subject: RE: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
> >>
> >> >Hi Ravi,
> >> >
> >> >> -----Original Message-----
> >> >> From: Ravi Kumar [mailto:Ravi1.kumar@amd.com]
> >> >> Sent: Wednesday, January 10, 2018 9:43 AM
> >> >> To: dev@dpdk.org
> >> >> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
> >> >> Subject: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
> >> >>
> >> >> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
> >> >> ---
> >> >
> >> >...
> >> >
> >> >> --- /dev/null
> >> >> +++ b/drivers/crypto/ccp/Makefile
> >> >> @@ -0,0 +1,55 @@
> >> >> +#
> >> >> +#   Copyright(c) 2018 Advanced Micro Devices, Inc.
> >> >> +#   All rights reserved.
> >> >> +#
> >> >
> >> >As Hemant commented, you need to change this full license with SPDX
> >> tags:
> >> >
> >> >http://dpdk.org/ml/archives/dev/2018-January/085510.html
> >> >
> >> >Could you submit a v4 with these changes today?
> >> >
> >> >Thanks,
> >> >Pablo
> >>
> >> Hi Pablo,
> >>
> >> Our legal team is still working on the license. We want to get the
> >> code reviewed in parallel.
> >> I will give you and update later if we can upload the v4 patch with
> >> SPDX tags today.
> >>
> >
> >Ok, I have looked at the overall patchset and it looks ok to me.
> >The only thing that I would change is the title of patch 7/19:
> >I would change it to "crypto/ccp: support sessionless operations".
> >
> >Regards,
> >Pablo
> 
> Hi Pablo,
> 
> Thanks for going through our code. Good to know the code is fine.
> 
> The new changes in licensing policy has introduced some delays as we work
> through the implications and get agreement from the third party.
> 
> We are working on it and will upload the v4 with SPDX license tags as soon
> as possible.
> 

Any news? RC1 will be out on Friday, and the subtree should be close tomorrow evening,
and there will not be more features added after this point.

Thanks,
Pablo

> Regards,
> Ravi
> >
> >> Regards,
> >> Ravi

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
  2018-01-16 15:29             ` De Lara Guarch, Pablo
@ 2018-01-17  9:08               ` Kumar, Ravi1
  2018-01-24  8:57                 ` De Lara Guarch, Pablo
  0 siblings, 1 reply; 131+ messages in thread
From: Kumar, Ravi1 @ 2018-01-17  9:08 UTC (permalink / raw)
  To: De Lara Guarch, Pablo, dev; +Cc: Shippen, Greg

>Hi Ravi,
>
>> -----Original Message-----
>> From: Kumar, Ravi1 [mailto:Ravi1.Kumar@amd.com]
>> Sent: Thursday, January 11, 2018 6:37 AM
>> To: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; 
>> dev@dpdk.org
>> Cc: Shippen, Greg <Greg.Shippen@amd.com>
>> Subject: RE: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
>> 
>> > -----Original Message-----
>> >> From: Kumar, Ravi1 [mailto:Ravi1.Kumar@amd.com]
>> >> Sent: Wednesday, January 10, 2018 10:30 AM
>> >> To: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; 
>> >> dev@dpdk.org
>> >> Cc: Shippen, Greg <Greg.Shippen@amd.com>
>> >> Subject: RE: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
>> >>
>> >> >Hi Ravi,
>> >> >
>> >> >> -----Original Message-----
>> >> >> From: Ravi Kumar [mailto:Ravi1.kumar@amd.com]
>> >> >> Sent: Wednesday, January 10, 2018 9:43 AM
>> >> >> To: dev@dpdk.org
>> >> >> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
>> >> >> Subject: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
>> >> >>
>> >> >> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
>> >> >> ---
>> >> >
>> >> >...
>> >> >
>> >> >> --- /dev/null
>> >> >> +++ b/drivers/crypto/ccp/Makefile
>> >> >> @@ -0,0 +1,55 @@
>> >> >> +#
>> >> >> +#   Copyright(c) 2018 Advanced Micro Devices, Inc.
>> >> >> +#   All rights reserved.
>> >> >> +#
>> >> >
>> >> >As Hemant commented, you need to change this full license with 
>> >> >SPDX
>> >> tags:
>> >> >
>> >> >http://dpdk.org/ml/archives/dev/2018-January/085510.html
>> >> >
>> >> >Could you submit a v4 with these changes today?
>> >> >
>> >> >Thanks,
>> >> >Pablo
>> >>
>> >> Hi Pablo,
>> >>
>> >> Our legal team is still working on the license. We want to get the 
>> >> code reviewed in parallel.
>> >> I will give you and update later if we can upload the v4 patch with 
>> >> SPDX tags today.
>> >>
>> >
>> >Ok, I have looked at the overall patchset and it looks ok to me.
>> >The only thing that I would change is the title of patch 7/19:
>> >I would change it to "crypto/ccp: support sessionless operations".
>> >
>> >Regards,
>> >Pablo
>> 
>> Hi Pablo,
>> 
>> Thanks for going through our code. Good to know the code is fine.
>> 
>> The new changes in licensing policy has introduced some delays as we 
>> work through the implications and get agreement from the third party.
>> 
>> We are working on it and will upload the v4 with SPDX license tags as 
>> soon as possible.
>> 
>
>Any news? RC1 will be out on Friday, and the subtree should be close tomorrow evening, and there will not be more features added after this point.
>
>Thanks,
>Pablo

Hi Pablo,

We are still in the process of getting the sign off on the new SPDX license tags from the third party. We expect an answer from them by the end of the week at the earliest. 

I will get back to you as soon as I have some update.

Regards,
Ravi

>
>> Regards,
>> Ravi
>> >
>> >> Regards,
>> >> Ravi

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
  2018-01-17  9:08               ` Kumar, Ravi1
@ 2018-01-24  8:57                 ` De Lara Guarch, Pablo
  0 siblings, 0 replies; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2018-01-24  8:57 UTC (permalink / raw)
  To: Kumar, Ravi1, dev; +Cc: Shippen, Greg

Hi Ravi,

> -----Original Message-----
> From: Kumar, Ravi1 [mailto:Ravi1.Kumar@amd.com]
> Sent: Wednesday, January 17, 2018 9:09 AM
> To: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;
> dev@dpdk.org
> Cc: Shippen, Greg <Greg.Shippen@amd.com>
> Subject: RE: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
> 
> >Hi Ravi,
> >
> >> -----Original Message-----
> >> From: Kumar, Ravi1 [mailto:Ravi1.Kumar@amd.com]
> >> Sent: Thursday, January 11, 2018 6:37 AM
> >> To: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;
> >> dev@dpdk.org
> >> Cc: Shippen, Greg <Greg.Shippen@amd.com>
> >> Subject: RE: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD
> >>
> >> > -----Original Message-----
> >> >> From: Kumar, Ravi1 [mailto:Ravi1.Kumar@amd.com]
> >> >> Sent: Wednesday, January 10, 2018 10:30 AM
> >> >> To: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;
> >> >> dev@dpdk.org
> >> >> Cc: Shippen, Greg <Greg.Shippen@amd.com>
> >> >> Subject: RE: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton
> PMD
> >> >>
> >> >> >Hi Ravi,
> >> >> >
> >> >> >> -----Original Message-----
> >> >> >> From: Ravi Kumar [mailto:Ravi1.kumar@amd.com]
> >> >> >> Sent: Wednesday, January 10, 2018 9:43 AM
> >> >> >> To: dev@dpdk.org
> >> >> >> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
> >> >> >> Subject: [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton
> PMD
> >> >> >>
> >> >> >> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
> >> >> >> ---
> >> >> >
> >> >> >...
> >> >> >
> >> >> >> --- /dev/null
> >> >> >> +++ b/drivers/crypto/ccp/Makefile
> >> >> >> @@ -0,0 +1,55 @@
> >> >> >> +#
> >> >> >> +#   Copyright(c) 2018 Advanced Micro Devices, Inc.
> >> >> >> +#   All rights reserved.
> >> >> >> +#
> >> >> >
> >> >> >As Hemant commented, you need to change this full license with
> >> >> >SPDX
> >> >> tags:
> >> >> >
> >> >> >http://dpdk.org/ml/archives/dev/2018-January/085510.html
> >> >> >
> >> >> >Could you submit a v4 with these changes today?
> >> >> >
> >> >> >Thanks,
> >> >> >Pablo
> >> >>
> >> >> Hi Pablo,
> >> >>
> >> >> Our legal team is still working on the license. We want to get the
> >> >> code reviewed in parallel.
> >> >> I will give you and update later if we can upload the v4 patch
> >> >> with SPDX tags today.
> >> >>
> >> >
> >> >Ok, I have looked at the overall patchset and it looks ok to me.
> >> >The only thing that I would change is the title of patch 7/19:
> >> >I would change it to "crypto/ccp: support sessionless operations".
> >> >
> >> >Regards,
> >> >Pablo
> >>
> >> Hi Pablo,
> >>
> >> Thanks for going through our code. Good to know the code is fine.
> >>
> >> The new changes in licensing policy has introduced some delays as we
> >> work through the implications and get agreement from the third party.
> >>
> >> We are working on it and will upload the v4 with SPDX license tags as
> >> soon as possible.
> >>
> >
> >Any news? RC1 will be out on Friday, and the subtree should be close
> tomorrow evening, and there will not be more features added after this
> point.
> >
> >Thanks,
> >Pablo
> 
> Hi Pablo,
> 
> We are still in the process of getting the sign off on the new SPDX license
> tags from the third party. We expect an answer from them by the end of
> the week at the earliest.
> 
> I will get back to you as soon as I have some update.

Any update? This could still be merged in RC2, but no later than that.

Thanks,
Pablo

> 
> Regards,
> Ravi
> 
> >
> >> Regards,
> >> Ravi
> >> >
> >> >> Regards,
> >> >> Ravi

^ permalink raw reply	[flat|nested] 131+ messages in thread

* [PATCH v4 01/20] crypto/ccp: add AMD ccp skeleton PMD
  2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
                       ` (18 preceding siblings ...)
  2018-01-10  9:53     ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD De Lara Guarch, Pablo
@ 2018-03-09  8:35     ` Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 02/20] crypto/ccp: support ccp device initialization and deintialization Ravi Kumar
                         ` (20 more replies)
  19 siblings, 21 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-09  8:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 MAINTAINERS                                |  6 +++
 config/common_base                         |  5 +++
 doc/guides/rel_notes/release_18_05.rst     |  5 +++
 drivers/crypto/Makefile                    |  1 +
 drivers/crypto/ccp/Makefile                | 55 ++++++++++++++++++++++++++
 drivers/crypto/ccp/rte_ccp_pmd.c           | 62 ++++++++++++++++++++++++++++++
 drivers/crypto/ccp/rte_pmd_ccp_version.map |  4 ++
 mk/rte.app.mk                              |  2 +
 8 files changed, 140 insertions(+)
 create mode 100644 drivers/crypto/ccp/Makefile
 create mode 100644 drivers/crypto/ccp/rte_ccp_pmd.c
 create mode 100644 drivers/crypto/ccp/rte_pmd_ccp_version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index a646ca3..8481731 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -640,6 +640,12 @@ M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
 T: git://dpdk.org/next/dpdk-next-crypto
 F: doc/guides/cryptodevs/features/default.ini
 
+AMD CCP Crypto PMD
+M: Ravi Kumar <ravi1.kumar@amd.com>
+F: drivers/crypto/ccp/
+F: doc/guides/cryptodevs/ccp.rst
+F: doc/guides/cryptodevs/features/ccp.ini
+
 ARMv8 Crypto
 M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
 F: drivers/crypto/armv8/
diff --git a/config/common_base b/config/common_base
index ad03cf4..28237f0 100644
--- a/config/common_base
+++ b/config/common_base
@@ -529,6 +529,11 @@ CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n
 CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
 
 #
+# Compile PMD for AMD CCP crypto device
+#
+CONFIG_RTE_LIBRTE_PMD_CCP=n
+
+#
 # Compile PMD for Marvell Crypto device
 #
 CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO=n
diff --git a/doc/guides/rel_notes/release_18_05.rst b/doc/guides/rel_notes/release_18_05.rst
index 3923dc2..c5b2854 100644
--- a/doc/guides/rel_notes/release_18_05.rst
+++ b/doc/guides/rel_notes/release_18_05.rst
@@ -41,6 +41,11 @@ New Features
      Also, make sure to start the actual text at the margin.
      =========================================================
 
+* **Added a new crypto poll mode driver for AMD CCP devices.**
+
+  Added the new ``ccp`` crypto driver for AMD CCP devices. See the
+  :doc:`../cryptodevs/ccp` crypto driver guide for more details on
+  this new driver.
 
 API Changes
 -----------
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 628bd14..fe41edd 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -16,5 +16,6 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += mrvl
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp
 
 include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
new file mode 100644
index 0000000..51c5e5b
--- /dev/null
+++ b/drivers/crypto/ccp/Makefile
@@ -0,0 +1,55 @@
+#
+#   Copyright(c) 2018 Advanced Micro Devices, Inc.
+#   All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+# 	* Redistributions of source code must retain the above copyright
+# 	notice, this list of conditions and the following disclaimer.
+# 	* Redistributions in binary form must reproduce the above copyright
+# 	notice, this list of conditions and the following disclaimer in the
+# 	documentation and/or other materials provided with the distribution.
+# 	* Neither the name of the copyright holder nor the names of its
+# 	contributors may be used to endorse or promote products derived from
+# 	this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_ccp.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += -I$(SRCDIR)
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# external library include paths
+LDLIBS += -lcrypto
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_pci -lrte_bus_pci
+
+# versioning export map
+EXPORT_MAP := rte_pmd_ccp_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
new file mode 100644
index 0000000..6fa14bd
--- /dev/null
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -0,0 +1,62 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_bus_vdev.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+
+uint8_t ccp_cryptodev_driver_id;
+
+/** Remove ccp pmd */
+static int
+cryptodev_ccp_remove(struct rte_vdev_device *dev __rte_unused)
+{
+	return 0;
+}
+
+/** Probe ccp pmd */
+static int
+cryptodev_ccp_probe(struct rte_vdev_device *vdev __rte_unused)
+{
+	return 0;
+}
+
+static struct rte_vdev_driver cryptodev_ccp_pmd_drv = {
+	.probe = cryptodev_ccp_probe,
+	.remove = cryptodev_ccp_remove
+};
+
+static struct cryptodev_driver ccp_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD,
+	"max_nb_queue_pairs=<int> max_nb_sessions=<int> socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv,
+			       ccp_cryptodev_driver_id);
diff --git a/drivers/crypto/ccp/rte_pmd_ccp_version.map b/drivers/crypto/ccp/rte_pmd_ccp_version.map
new file mode 100644
index 0000000..9b9ab1a
--- /dev/null
+++ b/drivers/crypto/ccp/rte_pmd_ccp_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+	local: *;
+};
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 3eb41d1..95c1221 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -215,6 +215,8 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC)   += -lrte_bus_dpaa
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC)   += -lrte_pmd_dpaa_sec
 endif # CONFIG_RTE_LIBRTE_DPAA_BUS
 
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_CCP)   += -lrte_pmd_ccp -lcrypto
+
 endif # CONFIG_RTE_LIBRTE_CRYPTODEV
 
 ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y)
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v4 02/20] crypto/ccp: support ccp device initialization and deintialization
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
@ 2018-03-09  8:35       ` Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 03/20] crypto/ccp: support basic pmd ops Ravi Kumar
                         ` (19 subsequent siblings)
  20 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-09  8:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/Makefile          |   3 +
 drivers/crypto/ccp/ccp_dev.c         | 787 +++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_dev.h         | 310 ++++++++++++++
 drivers/crypto/ccp/ccp_pci.c         | 262 ++++++++++++
 drivers/crypto/ccp/ccp_pci.h         |  53 +++
 drivers/crypto/ccp/ccp_pmd_ops.c     |  55 +++
 drivers/crypto/ccp/ccp_pmd_private.h |  82 ++++
 drivers/crypto/ccp/rte_ccp_pmd.c     | 151 ++++++-
 8 files changed, 1701 insertions(+), 2 deletions(-)
 create mode 100644 drivers/crypto/ccp/ccp_dev.c
 create mode 100644 drivers/crypto/ccp/ccp_dev.h
 create mode 100644 drivers/crypto/ccp/ccp_pci.c
 create mode 100644 drivers/crypto/ccp/ccp_pci.h
 create mode 100644 drivers/crypto/ccp/ccp_pmd_ops.c
 create mode 100644 drivers/crypto/ccp/ccp_pmd_private.h

diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 51c5e5b..5e58c31 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -51,5 +51,8 @@ EXPORT_MAP := rte_pmd_ccp_version.map
 
 # library source files
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
new file mode 100644
index 0000000..5af2b49
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -0,0 +1,787 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <sys/file.h>
+#include <unistd.h>
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+
+#include "ccp_dev.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
+static int ccp_dev_id;
+
+static const struct rte_memzone *
+ccp_queue_dma_zone_reserve(const char *queue_name,
+			   uint32_t queue_size,
+			   int socket_id)
+{
+	const struct rte_memzone *mz;
+	unsigned int memzone_flags = 0;
+	const struct rte_memseg *ms;
+
+	mz = rte_memzone_lookup(queue_name);
+	if (mz != 0)
+		return mz;
+
+	ms = rte_eal_get_physmem_layout();
+	switch (ms[0].hugepage_sz) {
+	case(RTE_PGSIZE_2M):
+		memzone_flags = RTE_MEMZONE_2MB;
+		break;
+	case(RTE_PGSIZE_1G):
+		memzone_flags = RTE_MEMZONE_1GB;
+		break;
+	case(RTE_PGSIZE_16M):
+		memzone_flags = RTE_MEMZONE_16MB;
+		break;
+	case(RTE_PGSIZE_16G):
+		memzone_flags = RTE_MEMZONE_16GB;
+		break;
+	default:
+		memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
+	}
+
+	return rte_memzone_reserve_aligned(queue_name,
+					   queue_size,
+					   socket_id,
+					   memzone_flags,
+					   queue_size);
+}
+
+/* bitmap support apis */
+static inline void
+ccp_set_bit(unsigned long *bitmap, int n)
+{
+	__sync_fetch_and_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)));
+}
+
+static inline void
+ccp_clear_bit(unsigned long *bitmap, int n)
+{
+	__sync_fetch_and_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)));
+}
+
+static inline uint32_t
+ccp_get_bit(unsigned long *bitmap, int n)
+{
+	return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0);
+}
+
+
+static inline uint32_t
+ccp_ffz(unsigned long word)
+{
+	unsigned long first_zero;
+
+	first_zero = __builtin_ffsl(~word);
+	return first_zero ? (first_zero - 1) :
+		BITS_PER_WORD;
+}
+
+static inline uint32_t
+ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit)
+{
+	uint32_t i;
+	uint32_t nwords = 0;
+
+	nwords = (limit - 1) / BITS_PER_WORD + 1;
+	for (i = 0; i < nwords; i++) {
+		if (addr[i] == 0UL)
+			return i * BITS_PER_WORD;
+		if (addr[i] < ~(0UL))
+			break;
+	}
+	return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]);
+}
+
+static void
+ccp_bitmap_set(unsigned long *map, unsigned int start, int len)
+{
+	unsigned long *p = map + WORD_OFFSET(start);
+	const unsigned int size = start + len;
+	int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD);
+	unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+	while (len - bits_to_set >= 0) {
+		*p |= mask_to_set;
+		len -= bits_to_set;
+		bits_to_set = BITS_PER_WORD;
+		mask_to_set = ~0UL;
+		p++;
+	}
+	if (len) {
+		mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size);
+		*p |= mask_to_set;
+	}
+}
+
+static void
+ccp_bitmap_clear(unsigned long *map, unsigned int start, int len)
+{
+	unsigned long *p = map + WORD_OFFSET(start);
+	const unsigned int size = start + len;
+	int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD);
+	unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+	while (len - bits_to_clear >= 0) {
+		*p &= ~mask_to_clear;
+		len -= bits_to_clear;
+		bits_to_clear = BITS_PER_WORD;
+		mask_to_clear = ~0UL;
+		p++;
+	}
+	if (len) {
+		mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size);
+		*p &= ~mask_to_clear;
+	}
+}
+
+
+static unsigned long
+_ccp_find_next_bit(const unsigned long *addr,
+		   unsigned long nbits,
+		   unsigned long start,
+		   unsigned long invert)
+{
+	unsigned long tmp;
+
+	if (!nbits || start >= nbits)
+		return nbits;
+
+	tmp = addr[start / BITS_PER_WORD] ^ invert;
+
+	/* Handle 1st word. */
+	tmp &= CCP_BITMAP_FIRST_WORD_MASK(start);
+	start = ccp_round_down(start, BITS_PER_WORD);
+
+	while (!tmp) {
+		start += BITS_PER_WORD;
+		if (start >= nbits)
+			return nbits;
+
+		tmp = addr[start / BITS_PER_WORD] ^ invert;
+	}
+
+	return RTE_MIN(start + (ffs(tmp) - 1), nbits);
+}
+
+static unsigned long
+ccp_find_next_bit(const unsigned long *addr,
+		  unsigned long size,
+		  unsigned long offset)
+{
+	return _ccp_find_next_bit(addr, size, offset, 0UL);
+}
+
+static unsigned long
+ccp_find_next_zero_bit(const unsigned long *addr,
+		       unsigned long size,
+		       unsigned long offset)
+{
+	return _ccp_find_next_bit(addr, size, offset, ~0UL);
+}
+
+/**
+ * bitmap_find_next_zero_area - find a contiguous aligned zero area
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ */
+static unsigned long
+ccp_bitmap_find_next_zero_area(unsigned long *map,
+			       unsigned long size,
+			       unsigned long start,
+			       unsigned int nr)
+{
+	unsigned long index, end, i;
+
+again:
+	index = ccp_find_next_zero_bit(map, size, start);
+
+	end = index + nr;
+	if (end > size)
+		return end;
+	i = ccp_find_next_bit(map, end, index);
+	if (i < end) {
+		start = i + 1;
+		goto again;
+	}
+	return index;
+}
+
+static uint32_t
+ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count)
+{
+	struct ccp_device *ccp;
+	int start;
+
+	/* First look at the map for the queue */
+	if (cmd_q->lsb >= 0) {
+		start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap,
+								 LSB_SIZE, 0,
+								 count);
+		if (start < LSB_SIZE) {
+			ccp_bitmap_set(cmd_q->lsbmap, start, count);
+			return start + cmd_q->lsb * LSB_SIZE;
+		}
+	}
+
+	/* try to get an entry from the shared blocks */
+	ccp = cmd_q->dev;
+
+	rte_spinlock_lock(&ccp->lsb_lock);
+
+	start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap,
+						    MAX_LSB_CNT * LSB_SIZE,
+						    0, count);
+	if (start <= MAX_LSB_CNT * LSB_SIZE) {
+		ccp_bitmap_set(ccp->lsbmap, start, count);
+		rte_spinlock_unlock(&ccp->lsb_lock);
+		return start * LSB_ITEM_SIZE;
+	}
+	CCP_LOG_ERR("NO LSBs available");
+
+	rte_spinlock_unlock(&ccp->lsb_lock);
+
+	return 0;
+}
+
+static void __rte_unused
+ccp_lsb_free(struct ccp_queue *cmd_q,
+	     unsigned int start,
+	     unsigned int count)
+{
+	int lsbno = start / LSB_SIZE;
+
+	if (!start)
+		return;
+
+	if (cmd_q->lsb == lsbno) {
+		/* An entry from the private LSB */
+		ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
+	} else {
+		/* From the shared LSBs */
+		struct ccp_device *ccp = cmd_q->dev;
+
+		rte_spinlock_lock(&ccp->lsb_lock);
+		ccp_bitmap_clear(ccp->lsbmap, start, count);
+		rte_spinlock_unlock(&ccp->lsb_lock);
+	}
+}
+
+static int
+ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status)
+{
+	int q_mask = 1 << cmd_q->id;
+	int weight = 0;
+	int j;
+
+	/* Build a bit mask to know which LSBs
+	 * this queue has access to.
+	 * Don't bother with segment 0
+	 * as it has special
+	 * privileges.
+	 */
+	cmd_q->lsbmask = 0;
+	status >>= LSB_REGION_WIDTH;
+	for (j = 1; j < MAX_LSB_CNT; j++) {
+		if (status & q_mask)
+			ccp_set_bit(&cmd_q->lsbmask, j);
+
+		status >>= LSB_REGION_WIDTH;
+	}
+
+	for (j = 0; j < MAX_LSB_CNT; j++)
+		if (ccp_get_bit(&cmd_q->lsbmask, j))
+			weight++;
+
+	printf("Queue %d can access %d LSB regions  of mask  %lu\n",
+	       (int)cmd_q->id, weight, cmd_q->lsbmask);
+
+	return weight ? 0 : -EINVAL;
+}
+
+static int
+ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
+			     int lsb_cnt, int n_lsbs,
+			     unsigned long *lsb_pub)
+{
+	unsigned long qlsb = 0;
+	int bitno = 0;
+	int qlsb_wgt = 0;
+	int i, j;
+
+	/* For each queue:
+	 * If the count of potential LSBs available to a queue matches the
+	 * ordinal given to us in lsb_cnt:
+	 * Copy the mask of possible LSBs for this queue into "qlsb";
+	 * For each bit in qlsb, see if the corresponding bit in the
+	 * aggregation mask is set; if so, we have a match.
+	 *     If we have a match, clear the bit in the aggregation to
+	 *     mark it as no longer available.
+	 *     If there is no match, clear the bit in qlsb and keep looking.
+	 */
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		struct ccp_queue *cmd_q = &ccp->cmd_q[i];
+
+		qlsb_wgt = 0;
+		for (j = 0; j < MAX_LSB_CNT; j++)
+			if (ccp_get_bit(&cmd_q->lsbmask, j))
+				qlsb_wgt++;
+
+		if (qlsb_wgt == lsb_cnt) {
+			qlsb = cmd_q->lsbmask;
+
+			bitno = ffs(qlsb) - 1;
+			while (bitno < MAX_LSB_CNT) {
+				if (ccp_get_bit(lsb_pub, bitno)) {
+					/* We found an available LSB
+					 * that this queue can access
+					 */
+					cmd_q->lsb = bitno;
+					ccp_clear_bit(lsb_pub, bitno);
+					break;
+				}
+				ccp_clear_bit(&qlsb, bitno);
+				bitno = ffs(qlsb) - 1;
+			}
+			if (bitno >= MAX_LSB_CNT)
+				return -EINVAL;
+			n_lsbs--;
+		}
+	}
+	return n_lsbs;
+}
+
+/* For each queue, from the most- to least-constrained:
+ * find an LSB that can be assigned to the queue. If there are N queues that
+ * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
+ * dedicated LSB. Remaining LSB regions become a shared resource.
+ * If we have fewer LSBs than queues, all LSB regions become shared
+ * resources.
+ */
+static int
+ccp_assign_lsbs(struct ccp_device *ccp)
+{
+	unsigned long lsb_pub = 0, qlsb = 0;
+	int n_lsbs = 0;
+	int bitno;
+	int i, lsb_cnt;
+	int rc = 0;
+
+	rte_spinlock_init(&ccp->lsb_lock);
+
+	/* Create an aggregate bitmap to get a total count of available LSBs */
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		lsb_pub |= ccp->cmd_q[i].lsbmask;
+
+	for (i = 0; i < MAX_LSB_CNT; i++)
+		if (ccp_get_bit(&lsb_pub, i))
+			n_lsbs++;
+
+	if (n_lsbs >= ccp->cmd_q_count) {
+		/* We have enough LSBS to give every queue a private LSB.
+		 * Brute force search to start with the queues that are more
+		 * constrained in LSB choice. When an LSB is privately
+		 * assigned, it is removed from the public mask.
+		 * This is an ugly N squared algorithm with some optimization.
+		 */
+		for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
+		     lsb_cnt++) {
+			rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
+							  &lsb_pub);
+			if (rc < 0)
+				return -EINVAL;
+			n_lsbs = rc;
+		}
+	}
+
+	rc = 0;
+	/* What's left of the LSBs, according to the public mask, now become
+	 * shared. Any zero bits in the lsb_pub mask represent an LSB region
+	 * that can't be used as a shared resource, so mark the LSB slots for
+	 * them as "in use".
+	 */
+	qlsb = lsb_pub;
+	bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+	while (bitno < MAX_LSB_CNT) {
+		ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
+		ccp_set_bit(&qlsb, bitno);
+		bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+	}
+
+	return rc;
+}
+
+static int
+ccp_add_device(struct ccp_device *dev, int type)
+{
+	int i;
+	uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi;
+	uint64_t status;
+	struct ccp_queue *cmd_q;
+	const struct rte_memzone *q_mz;
+	void *vaddr;
+
+	if (dev == NULL)
+		return -1;
+
+	dev->id = ccp_dev_id++;
+	dev->qidx = 0;
+	vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+	if (type == CCP_VERSION_5B) {
+		CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57);
+		CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003);
+		for (i = 0; i < 12; i++) {
+			CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET,
+				      CCP_READ_REG(vaddr, TRNG_OUT_REG));
+		}
+		CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F);
+		CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D);
+		CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000);
+
+		CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF);
+		CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF);
+
+		CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
+	}
+	CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
+
+	/* Copy the private LSB mask to the public registers */
+	status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
+	status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET);
+	CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo);
+	CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi);
+	status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo);
+
+	dev->cmd_q_count = 0;
+	/* Find available queues */
+	qmr = CCP_READ_REG(vaddr, Q_MASK_REG);
+	for (i = 0; i < MAX_HW_QUEUES; i++) {
+		if (!(qmr & (1 << i)))
+			continue;
+		cmd_q = &dev->cmd_q[dev->cmd_q_count++];
+		cmd_q->dev = dev;
+		cmd_q->id = i;
+		cmd_q->qidx = 0;
+		cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+
+		cmd_q->reg_base = (uint8_t *)vaddr +
+			CMD_Q_STATUS_INCR * (i + 1);
+
+		/* CCP queue memory */
+		snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name),
+			 "%s_%d_%s_%d_%s",
+			 "ccp_dev",
+			 (int)dev->id, "queue",
+			 (int)cmd_q->id, "mem");
+		q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name,
+						  cmd_q->qsize, SOCKET_ID_ANY);
+		cmd_q->qbase_addr = (void *)q_mz->addr;
+		cmd_q->qbase_desc = (void *)q_mz->addr;
+		cmd_q->qbase_phys_addr =  q_mz->phys_addr;
+
+		cmd_q->qcontrol = 0;
+		/* init control reg to zero */
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol);
+
+		/* Disable the interrupts */
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00);
+		CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE);
+		CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE);
+
+		/* Clear the interrupts */
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE,
+			      ALL_INTERRUPTS);
+
+		/* Configure size of each virtual queue accessible to host */
+		cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT);
+		cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT;
+
+		dma_addr_lo = low32_value(cmd_q->qbase_phys_addr);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+			      (uint32_t)dma_addr_lo);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE,
+			      (uint32_t)dma_addr_lo);
+
+		dma_addr_hi = high32_value(cmd_q->qbase_phys_addr);
+		cmd_q->qcontrol |= (dma_addr_hi << 16);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol);
+
+		/* create LSB Mask map */
+		if (ccp_find_lsb_regions(cmd_q, status))
+			CCP_LOG_ERR("queue doesn't have lsb regions");
+		cmd_q->lsb = -1;
+
+		rte_atomic64_init(&cmd_q->free_slots);
+		rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1));
+		/* unused slot barrier b/w H&T */
+	}
+
+	if (ccp_assign_lsbs(dev))
+		CCP_LOG_ERR("Unable to assign lsb region");
+
+	/* pre-allocate LSB slots */
+	for (i = 0; i < dev->cmd_q_count; i++) {
+		dev->cmd_q[i].sb_key =
+			ccp_lsb_alloc(&dev->cmd_q[i], 1);
+		dev->cmd_q[i].sb_iv =
+			ccp_lsb_alloc(&dev->cmd_q[i], 1);
+		dev->cmd_q[i].sb_sha =
+			ccp_lsb_alloc(&dev->cmd_q[i], 2);
+		dev->cmd_q[i].sb_hmac =
+			ccp_lsb_alloc(&dev->cmd_q[i], 2);
+	}
+
+	TAILQ_INSERT_TAIL(&ccp_list, dev, next);
+	return 0;
+}
+
+static void
+ccp_remove_device(struct ccp_device *dev)
+{
+	if (dev == NULL)
+		return;
+
+	TAILQ_REMOVE(&ccp_list, dev, next);
+}
+
+static int
+is_ccp_device(const char *dirname,
+	      const struct rte_pci_id *ccp_id,
+	      int *type)
+{
+	char filename[PATH_MAX];
+	const struct rte_pci_id *id;
+	uint16_t vendor, device_id;
+	int i;
+	unsigned long tmp;
+
+	/* get vendor id */
+	snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		return 0;
+	vendor = (uint16_t)tmp;
+
+	/* get device id */
+	snprintf(filename, sizeof(filename), "%s/device", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		return 0;
+	device_id = (uint16_t)tmp;
+
+	for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) {
+		if (vendor == id->vendor_id &&
+		    device_id == id->device_id) {
+			*type = i;
+			return 1; /* Matched device */
+		}
+	}
+	return 0;
+}
+
+static int
+ccp_probe_device(const char *dirname, uint16_t domain,
+		 uint8_t bus, uint8_t devid,
+		 uint8_t function, int ccp_type)
+{
+	struct ccp_device *ccp_dev = NULL;
+	struct rte_pci_device *pci;
+	char filename[PATH_MAX];
+	unsigned long tmp;
+	int uio_fd = -1, i, uio_num;
+	char uio_devname[PATH_MAX];
+	void *map_addr;
+
+	ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
+			      RTE_CACHE_LINE_SIZE);
+	if (ccp_dev == NULL)
+		goto fail;
+	pci = &(ccp_dev->pci);
+
+	pci->addr.domain = domain;
+	pci->addr.bus = bus;
+	pci->addr.devid = devid;
+	pci->addr.function = function;
+
+	/* get vendor id */
+	snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.vendor_id = (uint16_t)tmp;
+
+	/* get device id */
+	snprintf(filename, sizeof(filename), "%s/device", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.device_id = (uint16_t)tmp;
+
+	/* get subsystem_vendor id */
+	snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
+			dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.subsystem_vendor_id = (uint16_t)tmp;
+
+	/* get subsystem_device id */
+	snprintf(filename, sizeof(filename), "%s/subsystem_device",
+			dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.subsystem_device_id = (uint16_t)tmp;
+
+	/* get class_id */
+	snprintf(filename, sizeof(filename), "%s/class",
+			dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	/* the least 24 bits are valid: class, subclass, program interface */
+	pci->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
+
+	/* parse resources */
+	snprintf(filename, sizeof(filename), "%s/resource", dirname);
+	if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
+		goto fail;
+
+	uio_num = ccp_find_uio_devname(dirname);
+	if (uio_num < 0) {
+		/*
+		 * It may take time for uio device to appear,
+		 * wait  here and try again
+		 */
+		usleep(100000);
+		uio_num = ccp_find_uio_devname(dirname);
+		if (uio_num < 0)
+			goto fail;
+	}
+	snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
+
+	uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK);
+	if (uio_fd < 0)
+		goto fail;
+	if (flock(uio_fd, LOCK_EX | LOCK_NB))
+		goto fail;
+
+	/* Map the PCI memory resource of device */
+	for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+
+		char devname[PATH_MAX];
+		int res_fd;
+
+		if (pci->mem_resource[i].phys_addr == 0)
+			continue;
+		snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
+		res_fd = open(devname, O_RDWR);
+		if (res_fd < 0)
+			goto fail;
+		map_addr = mmap(NULL, pci->mem_resource[i].len,
+				PROT_READ | PROT_WRITE,
+				MAP_SHARED, res_fd, 0);
+		if (map_addr == MAP_FAILED)
+			goto fail;
+
+		pci->mem_resource[i].addr = map_addr;
+	}
+
+	/* device is valid, add in list */
+	if (ccp_add_device(ccp_dev, ccp_type)) {
+		ccp_remove_device(ccp_dev);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	CCP_LOG_ERR("CCP Device probe failed");
+	if (uio_fd > 0)
+		close(uio_fd);
+	if (ccp_dev)
+		rte_free(ccp_dev);
+	return -1;
+}
+
+int
+ccp_probe_devices(const struct rte_pci_id *ccp_id)
+{
+	int dev_cnt = 0;
+	int ccp_type = 0;
+	struct dirent *d;
+	DIR *dir;
+	int ret = 0;
+	int module_idx = 0;
+	uint16_t domain;
+	uint8_t bus, devid, function;
+	char dirname[PATH_MAX];
+
+	module_idx = ccp_check_pci_uio_module();
+	if (module_idx < 0)
+		return -1;
+
+	TAILQ_INIT(&ccp_list);
+	dir = opendir(SYSFS_PCI_DEVICES);
+	if (dir == NULL)
+		return -1;
+	while ((d = readdir(dir)) != NULL) {
+		if (d->d_name[0] == '.')
+			continue;
+		if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name),
+					&domain, &bus, &devid, &function) != 0)
+			continue;
+		snprintf(dirname, sizeof(dirname), "%s/%s",
+			     SYSFS_PCI_DEVICES, d->d_name);
+		if (is_ccp_device(dirname, ccp_id, &ccp_type)) {
+			printf("CCP : Detected CCP device with ID = 0x%x\n",
+			       ccp_id[ccp_type].device_id);
+			ret = ccp_probe_device(dirname, domain, bus, devid,
+					       function, ccp_type);
+			if (ret == 0)
+				dev_cnt++;
+		}
+	}
+	closedir(dir);
+	return dev_cnt;
+}
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
new file mode 100644
index 0000000..fe05bf0
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -0,0 +1,310 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CCP_DEV_H_
+#define _CCP_DEV_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_bus_pci.h>
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+/**< CCP sspecific */
+#define MAX_HW_QUEUES                   5
+
+/**< CCP Register Mappings */
+#define Q_MASK_REG                      0x000
+#define TRNG_OUT_REG                    0x00c
+
+/* CCP Version 5 Specifics */
+#define CMD_QUEUE_MASK_OFFSET		0x00
+#define	CMD_QUEUE_PRIO_OFFSET		0x04
+#define CMD_REQID_CONFIG_OFFSET		0x08
+#define	CMD_CMD_TIMEOUT_OFFSET		0x10
+#define LSB_PUBLIC_MASK_LO_OFFSET	0x18
+#define LSB_PUBLIC_MASK_HI_OFFSET	0x1C
+#define LSB_PRIVATE_MASK_LO_OFFSET	0x20
+#define LSB_PRIVATE_MASK_HI_OFFSET	0x24
+
+#define CMD_Q_CONTROL_BASE		0x0000
+#define CMD_Q_TAIL_LO_BASE		0x0004
+#define CMD_Q_HEAD_LO_BASE		0x0008
+#define CMD_Q_INT_ENABLE_BASE		0x000C
+#define CMD_Q_INTERRUPT_STATUS_BASE	0x0010
+
+#define CMD_Q_STATUS_BASE		0x0100
+#define CMD_Q_INT_STATUS_BASE		0x0104
+
+#define	CMD_CONFIG_0_OFFSET		0x6000
+#define	CMD_TRNG_CTL_OFFSET		0x6008
+#define	CMD_AES_MASK_OFFSET		0x6010
+#define	CMD_CLK_GATE_CTL_OFFSET		0x603C
+
+/* Address offset between two virtual queue registers */
+#define CMD_Q_STATUS_INCR		0x1000
+
+/* Bit masks */
+#define CMD_Q_RUN			0x1
+#define CMD_Q_SIZE			0x1F
+#define CMD_Q_SHIFT			3
+#define COMMANDS_PER_QUEUE		2048
+
+#define QUEUE_SIZE_VAL                  ((ffs(COMMANDS_PER_QUEUE) - 2) & \
+					 CMD_Q_SIZE)
+#define Q_DESC_SIZE                     sizeof(struct ccp_desc)
+#define Q_SIZE(n)                       (COMMANDS_PER_QUEUE*(n))
+
+#define INT_COMPLETION                  0x1
+#define INT_ERROR                       0x2
+#define INT_QUEUE_STOPPED               0x4
+#define ALL_INTERRUPTS                  (INT_COMPLETION| \
+					 INT_ERROR| \
+					 INT_QUEUE_STOPPED)
+
+#define LSB_REGION_WIDTH                5
+#define MAX_LSB_CNT                     8
+
+#define LSB_SIZE                        16
+#define LSB_ITEM_SIZE                   32
+#define SLSB_MAP_SIZE                   (MAX_LSB_CNT * LSB_SIZE)
+
+/* bitmap */
+enum {
+	BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT
+};
+
+#define WORD_OFFSET(b) ((b) / BITS_PER_WORD)
+#define BIT_OFFSET(b)  ((b) % BITS_PER_WORD)
+
+#define CCP_DIV_ROUND_UP(n, d)  (((n) + (d) - 1) / (d))
+#define CCP_BITMAP_SIZE(nr) \
+	CCP_DIV_ROUND_UP(nr, CHAR_BIT * sizeof(unsigned long))
+
+#define CCP_BITMAP_FIRST_WORD_MASK(start) \
+	(~0UL << ((start) & (BITS_PER_WORD - 1)))
+#define CCP_BITMAP_LAST_WORD_MASK(nbits) \
+	(~0UL >> (-(nbits) & (BITS_PER_WORD - 1)))
+
+#define __ccp_round_mask(x, y) ((typeof(x))((y)-1))
+#define ccp_round_down(x, y) ((x) & ~__ccp_round_mask(x, y))
+
+/** CCP registers Write/Read */
+
+static inline void ccp_pci_reg_write(void *base, int offset,
+				     uint32_t value)
+{
+	volatile void *reg_addr = ((uint8_t *)base + offset);
+
+	rte_write32((rte_cpu_to_le_32(value)), reg_addr);
+}
+
+static inline uint32_t ccp_pci_reg_read(void *base, int offset)
+{
+	volatile void *reg_addr = ((uint8_t *)base + offset);
+
+	return rte_le_to_cpu_32(rte_read32(reg_addr));
+}
+
+#define CCP_READ_REG(hw_addr, reg_offset) \
+	ccp_pci_reg_read(hw_addr, reg_offset)
+
+#define CCP_WRITE_REG(hw_addr, reg_offset, value) \
+	ccp_pci_reg_write(hw_addr, reg_offset, value)
+
+TAILQ_HEAD(ccp_list, ccp_device);
+
+extern struct ccp_list ccp_list;
+
+/**
+ * CCP device version
+ */
+enum ccp_device_version {
+	CCP_VERSION_5A = 0,
+	CCP_VERSION_5B,
+};
+
+/**
+ * A structure describing a CCP command queue.
+ */
+struct ccp_queue {
+	struct ccp_device *dev;
+	char memz_name[RTE_MEMZONE_NAMESIZE];
+
+	rte_atomic64_t free_slots;
+	/**< available free slots updated from enq/deq calls */
+
+	/* Queue identifier */
+	uint64_t id;	/**< queue id */
+	uint64_t qidx;	/**< queue index */
+	uint64_t qsize;	/**< queue size */
+
+	/* Queue address */
+	struct ccp_desc *qbase_desc;
+	void *qbase_addr;
+	phys_addr_t qbase_phys_addr;
+	/**< queue-page registers addr */
+	void *reg_base;
+
+	uint32_t qcontrol;
+	/**< queue ctrl reg */
+
+	int lsb;
+	/**< lsb region assigned to queue */
+	unsigned long lsbmask;
+	/**< lsb regions queue can access */
+	unsigned long lsbmap[CCP_BITMAP_SIZE(LSB_SIZE)];
+	/**< all lsb resources which queue is using */
+	uint32_t sb_key;
+	/**< lsb assigned for queue */
+	uint32_t sb_iv;
+	/**< lsb assigned for iv */
+	uint32_t sb_sha;
+	/**< lsb assigned for sha ctx */
+	uint32_t sb_hmac;
+	/**< lsb assigned for hmac ctx */
+} ____cacheline_aligned;
+
+/**
+ * A structure describing a CCP device.
+ */
+struct ccp_device {
+	TAILQ_ENTRY(ccp_device) next;
+	int id;
+	/**< ccp dev id on platform */
+	struct ccp_queue cmd_q[MAX_HW_QUEUES];
+	/**< ccp queue */
+	int cmd_q_count;
+	/**< no. of ccp Queues */
+	struct rte_pci_device pci;
+	/**< ccp pci identifier */
+	unsigned long lsbmap[CCP_BITMAP_SIZE(SLSB_MAP_SIZE)];
+	/**< shared lsb mask of ccp */
+	rte_spinlock_t lsb_lock;
+	/**< protection for shared lsb region allocation */
+	int qidx;
+	/**< current queue index */
+} __rte_cache_aligned;
+
+/**
+ * descriptor for version 5 CPP commands
+ * 8 32-bit words:
+ * word 0: function; engine; control bits
+ * word 1: length of source data
+ * word 2: low 32 bits of source pointer
+ * word 3: upper 16 bits of source pointer; source memory type
+ * word 4: low 32 bits of destination pointer
+ * word 5: upper 16 bits of destination pointer; destination memory
+ * type
+ * word 6: low 32 bits of key pointer
+ * word 7: upper 16 bits of key pointer; key memory type
+ */
+struct dword0 {
+	uint32_t soc:1;
+	uint32_t ioc:1;
+	uint32_t rsvd1:1;
+	uint32_t init:1;
+	uint32_t eom:1;
+	uint32_t function:15;
+	uint32_t engine:4;
+	uint32_t prot:1;
+	uint32_t rsvd2:7;
+};
+
+struct dword3 {
+	uint32_t src_hi:16;
+	uint32_t src_mem:2;
+	uint32_t lsb_cxt_id:8;
+	uint32_t rsvd1:5;
+	uint32_t fixed:1;
+};
+
+union dword4 {
+	uint32_t dst_lo;	/* NON-SHA */
+	uint32_t sha_len_lo;	/* SHA */
+};
+
+union dword5 {
+	struct {
+		uint32_t dst_hi:16;
+		uint32_t dst_mem:2;
+		uint32_t rsvd1:13;
+		uint32_t fixed:1;
+	}
+	fields;
+	uint32_t sha_len_hi;
+};
+
+struct dword7 {
+	uint32_t key_hi:16;
+	uint32_t key_mem:2;
+	uint32_t rsvd1:14;
+};
+
+struct ccp_desc {
+	struct dword0 dw0;
+	uint32_t length;
+	uint32_t src_lo;
+	struct dword3 dw3;
+	union dword4 dw4;
+	union dword5 dw5;
+	uint32_t key_lo;
+	struct dword7 dw7;
+};
+
+static inline uint32_t
+low32_value(unsigned long addr)
+{
+	return ((uint64_t)addr) & 0x0ffffffff;
+}
+
+static inline uint32_t
+high32_value(unsigned long addr)
+{
+	return ((uint64_t)addr >> 32) & 0x00000ffff;
+}
+
+/**
+ * Detect ccp platform and initialize all ccp devices
+ *
+ * @param ccp_id rte_pci_id list for supported CCP devices
+ * @return no. of successfully initialized CCP devices
+ */
+int ccp_probe_devices(const struct rte_pci_id *ccp_id);
+
+#endif /* _CCP_DEV_H_ */
diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c
new file mode 100644
index 0000000..ddf4b49
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pci.c
@@ -0,0 +1,262 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_string_fns.h>
+
+#include "ccp_pci.h"
+
+static const char * const uio_module_names[] = {
+	"igb_uio",
+	"uio_pci_generic",
+};
+
+int
+ccp_check_pci_uio_module(void)
+{
+	FILE *fp;
+	int i;
+	char buf[BUFSIZ];
+
+	fp = fopen(PROC_MODULES, "r");
+	if (fp == NULL)
+		return -1;
+	i = 0;
+	while (uio_module_names[i] != NULL) {
+		while (fgets(buf, sizeof(buf), fp) != NULL) {
+			if (!strncmp(buf, uio_module_names[i],
+				     strlen(uio_module_names[i])))
+				return i;
+		}
+		i++;
+		rewind(fp);
+	}
+	printf("Insert igb_uio or uio_pci_generic kernel module(s)");
+	return -1;/* uio not inserted */
+}
+
+/*
+ * split up a pci address into its constituent parts.
+ */
+int
+ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+			  uint8_t *bus, uint8_t *devid, uint8_t *function)
+{
+	/* first split on ':' */
+	union splitaddr {
+		struct {
+			char *domain;
+			char *bus;
+			char *devid;
+			char *function;
+		};
+		char *str[PCI_FMT_NVAL];
+		/* last element-separator is "." not ":" */
+	} splitaddr;
+
+	char *buf_copy = strndup(buf, bufsize);
+
+	if (buf_copy == NULL)
+		return -1;
+
+	if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':')
+			!= PCI_FMT_NVAL - 1)
+		goto error;
+	/* final split is on '.' between devid and function */
+	splitaddr.function = strchr(splitaddr.devid, '.');
+	if (splitaddr.function == NULL)
+		goto error;
+	*splitaddr.function++ = '\0';
+
+	/* now convert to int values */
+	errno = 0;
+	*domain = (uint8_t)strtoul(splitaddr.domain, NULL, 16);
+	*bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16);
+	*devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16);
+	*function = (uint8_t)strtoul(splitaddr.function, NULL, 10);
+	if (errno != 0)
+		goto error;
+
+	free(buf_copy); /* free the copy made with strdup */
+	return 0;
+error:
+	free(buf_copy);
+	return -1;
+}
+
+int
+ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val)
+{
+	FILE *f;
+	char buf[BUFSIZ];
+	char *end = NULL;
+
+	f = fopen(filename, "r");
+	if (f == NULL)
+		return -1;
+	if (fgets(buf, sizeof(buf), f) == NULL) {
+		fclose(f);
+		return -1;
+	}
+	*val = strtoul(buf, &end, 0);
+	if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
+		fclose(f);
+		return -1;
+	}
+	fclose(f);
+	return 0;
+}
+
+/** IO resource type: */
+#define IORESOURCE_IO         0x00000100
+#define IORESOURCE_MEM        0x00000200
+
+/* parse one line of the "resource" sysfs file (note that the 'line'
+ * string is modified)
+ */
+static int
+ccp_pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr,
+				 uint64_t *end_addr, uint64_t *flags)
+{
+	union pci_resource_info {
+		struct {
+			char *phys_addr;
+			char *end_addr;
+			char *flags;
+		};
+		char *ptrs[PCI_RESOURCE_FMT_NVAL];
+	} res_info;
+
+	if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3)
+		return -1;
+	errno = 0;
+	*phys_addr = strtoull(res_info.phys_addr, NULL, 16);
+	*end_addr = strtoull(res_info.end_addr, NULL, 16);
+	*flags = strtoull(res_info.flags, NULL, 16);
+	if (errno != 0)
+		return -1;
+
+	return 0;
+}
+
+/* parse the "resource" sysfs file */
+int
+ccp_pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
+{
+	FILE *fp;
+	char buf[BUFSIZ];
+	int i;
+	uint64_t phys_addr, end_addr, flags;
+
+	fp = fopen(filename, "r");
+	if (fp == NULL)
+		return -1;
+
+	for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+		if (fgets(buf, sizeof(buf), fp) == NULL)
+			goto error;
+		if (ccp_pci_parse_one_sysfs_resource(buf, sizeof(buf),
+				&phys_addr, &end_addr, &flags) < 0)
+			goto error;
+
+		if (flags & IORESOURCE_MEM) {
+			dev->mem_resource[i].phys_addr = phys_addr;
+			dev->mem_resource[i].len = end_addr - phys_addr + 1;
+			/* not mapped for now */
+			dev->mem_resource[i].addr = NULL;
+		}
+	}
+	fclose(fp);
+	return 0;
+
+error:
+	fclose(fp);
+	return -1;
+}
+
+int
+ccp_find_uio_devname(const char *dirname)
+{
+
+	DIR *dir;
+	struct dirent *e;
+	char dirname_uio[PATH_MAX];
+	unsigned int uio_num;
+	int ret = -1;
+
+	/* depending on kernel version, uio can be located in uio/uioX
+	 * or uio:uioX
+	 */
+	snprintf(dirname_uio, sizeof(dirname_uio), "%s/uio", dirname);
+	dir = opendir(dirname_uio);
+	if (dir == NULL) {
+	/* retry with the parent directory might be different kernel version*/
+		dir = opendir(dirname);
+		if (dir == NULL)
+			return -1;
+	}
+
+	/* take the first file starting with "uio" */
+	while ((e = readdir(dir)) != NULL) {
+		/* format could be uio%d ...*/
+		int shortprefix_len = sizeof("uio") - 1;
+		/* ... or uio:uio%d */
+		int longprefix_len = sizeof("uio:uio") - 1;
+		char *endptr;
+
+		if (strncmp(e->d_name, "uio", 3) != 0)
+			continue;
+
+		/* first try uio%d */
+		errno = 0;
+		uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
+		if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
+			ret = uio_num;
+			break;
+		}
+
+		/* then try uio:uio%d */
+		errno = 0;
+		uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
+		if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
+			ret = uio_num;
+			break;
+		}
+	}
+	closedir(dir);
+	return ret;
+
+
+}
diff --git a/drivers/crypto/ccp/ccp_pci.h b/drivers/crypto/ccp/ccp_pci.h
new file mode 100644
index 0000000..a4c09c8
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pci.h
@@ -0,0 +1,53 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CCP_PCI_H_
+#define _CCP_PCI_H_
+
+#include <stdint.h>
+
+#include <rte_bus_pci.h>
+
+#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
+#define PROC_MODULES "/proc/modules"
+
+int ccp_check_pci_uio_module(void);
+
+int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+			      uint8_t *bus, uint8_t *devid, uint8_t *function);
+
+int ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val);
+
+int ccp_pci_parse_sysfs_resource(const char *filename,
+				 struct rte_pci_device *dev);
+
+int ccp_find_uio_devname(const char *dirname);
+
+#endif /* _CCP_PCI_H_ */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
new file mode 100644
index 0000000..bc4120b
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -0,0 +1,55 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_cryptodev_pmd.h>
+
+struct rte_cryptodev_ops ccp_ops = {
+		.dev_configure		= NULL,
+		.dev_start		= NULL,
+		.dev_stop		= NULL,
+		.dev_close		= NULL,
+
+		.stats_get		= NULL,
+		.stats_reset		= NULL,
+
+		.dev_infos_get		= NULL,
+
+		.queue_pair_setup	= NULL,
+		.queue_pair_release	= NULL,
+		.queue_pair_start	= NULL,
+		.queue_pair_stop	= NULL,
+		.queue_pair_count	= NULL,
+
+		.session_get_size	= NULL,
+		.session_configure	= NULL,
+		.session_clear		= NULL,
+};
+
+struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops;
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
new file mode 100644
index 0000000..f5b6061
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -0,0 +1,82 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CCP_PMD_PRIVATE_H_
+#define _CCP_PMD_PRIVATE_H_
+
+#include <rte_cryptodev.h>
+
+#define CRYPTODEV_NAME_CCP_PMD crypto_ccp
+
+#define CCP_LOG_ERR(fmt, args...) \
+	RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",  \
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+			__func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_CCP_DEBUG
+#define CCP_LOG_INFO(fmt, args...) \
+	RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+			__func__, __LINE__, ## args)
+
+#define CCP_LOG_DBG(fmt, args...) \
+	RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+			__func__, __LINE__, ## args)
+#else
+#define CCP_LOG_INFO(fmt, args...)
+#define CCP_LOG_DBG(fmt, args...)
+#endif
+
+/**< Maximum queue pairs supported by CCP PMD */
+#define CCP_PMD_MAX_QUEUE_PAIRS	1
+#define CCP_NB_MAX_DESCRIPTORS 1024
+#define CCP_MAX_BURST 64
+
+/* private data structure for each CCP crypto device */
+struct ccp_private {
+	unsigned int max_nb_qpairs;	/**< Max number of queue pairs */
+	unsigned int max_nb_sessions;	/**< Max number of sessions */
+	uint8_t crypto_num_dev;		/**< Number of working crypto devices */
+};
+
+/**< device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *ccp_pmd_ops;
+
+uint16_t
+ccp_cpu_pmd_enqueue_burst(void *queue_pair,
+			  struct rte_crypto_op **ops,
+			  uint16_t nb_ops);
+uint16_t
+ccp_cpu_pmd_dequeue_burst(void *queue_pair,
+			  struct rte_crypto_op **ops,
+			  uint16_t nb_ops);
+
+#endif /* _CCP_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index 6fa14bd..cc35a97 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -28,23 +28,170 @@
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <rte_bus_pci.h>
 #include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_config.h>
 #include <rte_cryptodev.h>
 #include <rte_cryptodev_pmd.h>
+#include <rte_pci.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
 
+#include "ccp_dev.h"
+#include "ccp_pmd_private.h"
+
+/**
+ * Global static parameter used to find if CCP device is already initialized.
+ */
+static unsigned int ccp_pmd_init_done;
 uint8_t ccp_cryptodev_driver_id;
 
+static uint16_t
+ccp_pmd_enqueue_burst(void *queue_pair __rte_unused,
+		      struct rte_crypto_op **ops __rte_unused,
+		      uint16_t nb_ops __rte_unused)
+{
+	uint16_t enq_cnt = 0;
+
+	return enq_cnt;
+}
+
+static uint16_t
+ccp_pmd_dequeue_burst(void *queue_pair __rte_unused,
+		      struct rte_crypto_op **ops __rte_unused,
+		      uint16_t nb_ops __rte_unused)
+{
+	uint16_t nb_dequeued = 0;
+
+	return nb_dequeued;
+}
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id ccp_pci_id[] = {
+	{
+		RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */
+	},
+	{
+		RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */
+	},
+	{.device_id = 0},
+};
+
 /** Remove ccp pmd */
 static int
-cryptodev_ccp_remove(struct rte_vdev_device *dev __rte_unused)
+cryptodev_ccp_remove(struct rte_vdev_device *dev)
 {
+	const char *name;
+
+	ccp_pmd_init_done = 0;
+	name = rte_vdev_device_name(dev);
+	if (name == NULL)
+		return -EINVAL;
+
+	RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n",
+			name, rte_socket_id());
+
 	return 0;
 }
 
+/** Create crypto device */
+static int
+cryptodev_ccp_create(const char *name,
+		     struct rte_vdev_device *vdev,
+		     struct rte_cryptodev_pmd_init_params *init_params)
+{
+	struct rte_cryptodev *dev;
+	struct ccp_private *internals;
+	uint8_t cryptodev_cnt = 0;
+
+	if (init_params->name[0] == '\0')
+		snprintf(init_params->name, sizeof(init_params->name),
+				"%s", name);
+
+	dev = rte_cryptodev_pmd_create(init_params->name,
+				       &vdev->device,
+				       init_params);
+	if (dev == NULL) {
+		CCP_LOG_ERR("failed to create cryptodev vdev");
+		goto init_error;
+	}
+
+	cryptodev_cnt = ccp_probe_devices(ccp_pci_id);
+
+	if (cryptodev_cnt == 0) {
+		CCP_LOG_ERR("failed to detect CCP crypto device");
+		goto init_error;
+	}
+
+	printf("CCP : Crypto device count = %d\n", cryptodev_cnt);
+	dev->driver_id = ccp_cryptodev_driver_id;
+
+	/* register rx/tx burst functions for data path */
+	dev->dev_ops = ccp_pmd_ops;
+	dev->enqueue_burst = ccp_pmd_enqueue_burst;
+	dev->dequeue_burst = ccp_pmd_dequeue_burst;
+
+	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+			RTE_CRYPTODEV_FF_HW_ACCELERATED |
+			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+	internals = dev->data->dev_private;
+
+	internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+	internals->max_nb_sessions = init_params->max_nb_sessions;
+	internals->crypto_num_dev = cryptodev_cnt;
+
+	return 0;
+
+init_error:
+	CCP_LOG_ERR("driver %s: %s() failed",
+		    init_params->name, __func__);
+	cryptodev_ccp_remove(vdev);
+
+	return -EFAULT;
+}
+
 /** Probe ccp pmd */
 static int
-cryptodev_ccp_probe(struct rte_vdev_device *vdev __rte_unused)
+cryptodev_ccp_probe(struct rte_vdev_device *vdev)
 {
+	int rc = 0;
+	const char *name;
+	struct rte_cryptodev_pmd_init_params init_params = {
+		"",
+		sizeof(struct ccp_private),
+		rte_socket_id(),
+		CCP_PMD_MAX_QUEUE_PAIRS,
+		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+	};
+	const char *input_args;
+
+	if (ccp_pmd_init_done) {
+		RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
+		return -EFAULT;
+	}
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	input_args = rte_vdev_device_args(vdev);
+	rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
+	init_params.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS;
+
+	RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+			init_params.socket_id);
+	RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n",
+			init_params.max_nb_queue_pairs);
+	RTE_LOG(INFO, PMD, "Max number of sessions = %d\n",
+			init_params.max_nb_sessions);
+
+	rc = cryptodev_ccp_create(name, vdev, &init_params);
+	if (rc)
+		return rc;
+	ccp_pmd_init_done = 1;
 	return 0;
 }
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v4 03/20] crypto/ccp: support basic pmd ops
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 02/20] crypto/ccp: support ccp device initialization and deintialization Ravi Kumar
@ 2018-03-09  8:35       ` Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 04/20] crypto/ccp: support session related crypto " Ravi Kumar
                         ` (18 subsequent siblings)
  20 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-09  8:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_dev.c         |  9 ++++++
 drivers/crypto/ccp/ccp_dev.h         |  9 ++++++
 drivers/crypto/ccp/ccp_pmd_ops.c     | 61 +++++++++++++++++++++++++++++++++---
 drivers/crypto/ccp/ccp_pmd_private.h | 43 +++++++++++++++++++++++++
 4 files changed, 117 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index 5af2b49..57bccf4 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -52,6 +52,15 @@
 struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
 static int ccp_dev_id;
 
+int
+ccp_dev_start(struct rte_cryptodev *dev)
+{
+	struct ccp_private *priv = dev->data->dev_private;
+
+	priv->last_dev = TAILQ_FIRST(&ccp_list);
+	return 0;
+}
+
 static const struct rte_memzone *
 ccp_queue_dma_zone_reserve(const char *queue_name,
 			   uint32_t queue_size,
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index fe05bf0..b321530 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -104,6 +104,10 @@
 #define LSB_ITEM_SIZE                   32
 #define SLSB_MAP_SIZE                   (MAX_LSB_CNT * LSB_SIZE)
 
+/* General CCP Defines */
+
+#define CCP_SB_BYTES                    32
+
 /* bitmap */
 enum {
 	BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT
@@ -299,6 +303,11 @@ high32_value(unsigned long addr)
 	return ((uint64_t)addr >> 32) & 0x00000ffff;
 }
 
+/*
+ * Start CCP device
+ */
+int ccp_dev_start(struct rte_cryptodev *dev);
+
 /**
  * Detect ccp platform and initialize all ccp devices
  *
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index bc4120b..b6f8c48 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -28,18 +28,69 @@
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <string.h>
+
+#include <rte_common.h>
 #include <rte_cryptodev_pmd.h>
+#include <rte_malloc.h>
+
+#include "ccp_pmd_private.h"
+#include "ccp_dev.h"
+
+static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+ccp_pmd_config(struct rte_cryptodev *dev __rte_unused,
+	       struct rte_cryptodev_config *config __rte_unused)
+{
+	return 0;
+}
+
+static int
+ccp_pmd_start(struct rte_cryptodev *dev)
+{
+	return ccp_dev_start(dev);
+}
+
+static void
+ccp_pmd_stop(struct rte_cryptodev *dev __rte_unused)
+{
+
+}
+
+static int
+ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
+{
+	return 0;
+}
+
+static void
+ccp_pmd_info_get(struct rte_cryptodev *dev,
+		 struct rte_cryptodev_info *dev_info)
+{
+	struct ccp_private *internals = dev->data->dev_private;
+
+	if (dev_info != NULL) {
+		dev_info->driver_id = dev->driver_id;
+		dev_info->feature_flags = dev->feature_flags;
+		dev_info->capabilities = ccp_pmd_capabilities;
+		dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+		dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+	}
+}
 
 struct rte_cryptodev_ops ccp_ops = {
-		.dev_configure		= NULL,
-		.dev_start		= NULL,
-		.dev_stop		= NULL,
-		.dev_close		= NULL,
+		.dev_configure		= ccp_pmd_config,
+		.dev_start		= ccp_pmd_start,
+		.dev_stop		= ccp_pmd_stop,
+		.dev_close		= ccp_pmd_close,
 
 		.stats_get		= NULL,
 		.stats_reset		= NULL,
 
-		.dev_infos_get		= NULL,
+		.dev_infos_get		= ccp_pmd_info_get,
 
 		.queue_pair_setup	= NULL,
 		.queue_pair_release	= NULL,
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
index f5b6061..d278a8c 100644
--- a/drivers/crypto/ccp/ccp_pmd_private.h
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -60,13 +60,56 @@
 #define CCP_NB_MAX_DESCRIPTORS 1024
 #define CCP_MAX_BURST 64
 
+#include "ccp_dev.h"
+
 /* private data structure for each CCP crypto device */
 struct ccp_private {
 	unsigned int max_nb_qpairs;	/**< Max number of queue pairs */
 	unsigned int max_nb_sessions;	/**< Max number of sessions */
 	uint8_t crypto_num_dev;		/**< Number of working crypto devices */
+	struct ccp_device *last_dev;	/**< Last working crypto device */
 };
 
+/* CCP batch info */
+struct ccp_batch_info {
+	struct rte_crypto_op *op[CCP_MAX_BURST];
+	/**< optable populated at enque time from app*/
+	int op_idx;
+	struct ccp_queue *cmd_q;
+	uint16_t opcnt;
+	/**< no. of crypto ops in batch*/
+	int desccnt;
+	/**< no. of ccp queue descriptors*/
+	uint32_t head_offset;
+	/**< ccp queue head tail offsets time of enqueue*/
+	uint32_t tail_offset;
+	uint8_t lsb_buf[CCP_SB_BYTES * CCP_MAX_BURST];
+	phys_addr_t lsb_buf_phys;
+	/**< LSB intermediate buf for passthru */
+	int lsb_buf_idx;
+} __rte_cache_aligned;
+
+/**< CCP crypto queue pair */
+struct ccp_qp {
+	uint16_t id;
+	/**< Queue Pair Identifier */
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	/**< Unique Queue Pair Name */
+	struct rte_ring *processed_pkts;
+	/**< Ring for placing process packets */
+	struct rte_mempool *sess_mp;
+	/**< Session Mempool */
+	struct rte_mempool *batch_mp;
+	/**< Session Mempool for batch info */
+	struct rte_cryptodev_stats qp_stats;
+	/**< Queue pair statistics */
+	struct ccp_batch_info *b_info;
+	/**< Store ops pulled out of queue */
+	struct rte_cryptodev *dev;
+	/**< rte crypto device to which this qp belongs */
+} __rte_cache_aligned;
+
+
 /**< device specific operations function pointer structure */
 extern struct rte_cryptodev_ops *ccp_pmd_ops;
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v4 04/20] crypto/ccp: support session related crypto pmd ops
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 02/20] crypto/ccp: support ccp device initialization and deintialization Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 03/20] crypto/ccp: support basic pmd ops Ravi Kumar
@ 2018-03-09  8:35       ` Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 05/20] crypto/ccp: support queue pair related " Ravi Kumar
                         ` (17 subsequent siblings)
  20 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-09  8:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/Makefile      |   3 +-
 drivers/crypto/ccp/ccp_crypto.c  | 229 +++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_crypto.h  | 267 +++++++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_dev.h     | 129 +++++++++++++++++++
 drivers/crypto/ccp/ccp_pmd_ops.c |  61 ++++++++-
 5 files changed, 685 insertions(+), 4 deletions(-)
 create mode 100644 drivers/crypto/ccp/ccp_crypto.c
 create mode 100644 drivers/crypto/ccp/ccp_crypto.h

diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 5e58c31..5241465 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -51,8 +51,9 @@ EXPORT_MAP := rte_pmd_ccp_version.map
 
 # library source files
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_crypto.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
new file mode 100644
index 0000000..c365c0f
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -0,0 +1,229 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "ccp_dev.h"
+#include "ccp_crypto.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+static enum ccp_cmd_order
+ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+{
+	enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
+
+	if (xform == NULL)
+		return res;
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (xform->next == NULL)
+			return CCP_CMD_AUTH;
+		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+			return CCP_CMD_HASH_CIPHER;
+	}
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (xform->next == NULL)
+			return CCP_CMD_CIPHER;
+		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+			return CCP_CMD_CIPHER_HASH;
+	}
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+		return CCP_CMD_COMBINED;
+	return res;
+}
+
+/* configure session */
+static int
+ccp_configure_session_cipher(struct ccp_session *sess,
+			     const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+
+	cipher_xform = &xform->cipher;
+
+	/* set cipher direction */
+	if (cipher_xform->op ==  RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+		sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+	else
+		sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+
+	/* set cipher key */
+	sess->cipher.key_length = cipher_xform->key.length;
+	rte_memcpy(sess->cipher.key, cipher_xform->key.data,
+		   cipher_xform->key.length);
+
+	/* set iv parameters */
+	sess->iv.offset = cipher_xform->iv.offset;
+	sess->iv.length = cipher_xform->iv.length;
+
+	switch (cipher_xform->algo) {
+	default:
+		CCP_LOG_ERR("Unsupported cipher algo");
+		return -1;
+	}
+
+
+	switch (sess->cipher.engine) {
+	default:
+		CCP_LOG_ERR("Invalid CCP Engine");
+		return -ENOTSUP;
+	}
+	return 0;
+}
+
+static int
+ccp_configure_session_auth(struct ccp_session *sess,
+			   const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_auth_xform *auth_xform = NULL;
+
+	auth_xform = &xform->auth;
+
+	sess->auth.digest_length = auth_xform->digest_length;
+	if (auth_xform->op ==  RTE_CRYPTO_AUTH_OP_GENERATE)
+		sess->auth.op = CCP_AUTH_OP_GENERATE;
+	else
+		sess->auth.op = CCP_AUTH_OP_VERIFY;
+	switch (auth_xform->algo) {
+	default:
+		CCP_LOG_ERR("Unsupported hash algo");
+		return -ENOTSUP;
+	}
+	return 0;
+}
+
+static int
+ccp_configure_session_aead(struct ccp_session *sess,
+			   const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_aead_xform *aead_xform = NULL;
+
+	aead_xform = &xform->aead;
+
+	sess->cipher.key_length = aead_xform->key.length;
+	rte_memcpy(sess->cipher.key, aead_xform->key.data,
+		   aead_xform->key.length);
+
+	if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+		sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+		sess->auth.op = CCP_AUTH_OP_GENERATE;
+	} else {
+		sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+		sess->auth.op = CCP_AUTH_OP_VERIFY;
+	}
+	sess->auth.aad_length = aead_xform->aad_length;
+	sess->auth.digest_length = aead_xform->digest_length;
+
+	/* set iv parameters */
+	sess->iv.offset = aead_xform->iv.offset;
+	sess->iv.length = aead_xform->iv.length;
+
+	switch (aead_xform->algo) {
+	default:
+		CCP_LOG_ERR("Unsupported aead algo");
+		return -ENOTSUP;
+	}
+	return 0;
+}
+
+int
+ccp_set_session_parameters(struct ccp_session *sess,
+			   const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_sym_xform *cipher_xform = NULL;
+	const struct rte_crypto_sym_xform *auth_xform = NULL;
+	const struct rte_crypto_sym_xform *aead_xform = NULL;
+	int ret = 0;
+
+	sess->cmd_id = ccp_get_cmd_id(xform);
+
+	switch (sess->cmd_id) {
+	case CCP_CMD_CIPHER:
+		cipher_xform = xform;
+		break;
+	case CCP_CMD_AUTH:
+		auth_xform = xform;
+		break;
+	case CCP_CMD_CIPHER_HASH:
+		cipher_xform = xform;
+		auth_xform = xform->next;
+		break;
+	case CCP_CMD_HASH_CIPHER:
+		auth_xform = xform;
+		cipher_xform = xform->next;
+		break;
+	case CCP_CMD_COMBINED:
+		aead_xform = xform;
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported cmd_id");
+		return -1;
+	}
+
+	/* Default IV length = 0 */
+	sess->iv.length = 0;
+	if (cipher_xform) {
+		ret = ccp_configure_session_cipher(sess, cipher_xform);
+		if (ret != 0) {
+			CCP_LOG_ERR("Invalid/unsupported cipher parameters");
+			return ret;
+		}
+	}
+	if (auth_xform) {
+		ret = ccp_configure_session_auth(sess, auth_xform);
+		if (ret != 0) {
+			CCP_LOG_ERR("Invalid/unsupported auth parameters");
+			return ret;
+		}
+	}
+	if (aead_xform) {
+		ret = ccp_configure_session_aead(sess, aead_xform);
+		if (ret != 0) {
+			CCP_LOG_ERR("Invalid/unsupported aead parameters");
+			return ret;
+		}
+	}
+	return ret;
+}
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
new file mode 100644
index 0000000..346d5ee
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -0,0 +1,267 @@
+/*-
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *	* Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions and the following disclaimer.
+ *	* Redistributions in binary form must reproduce the above copyright
+ *	notice, this list of conditions and the following disclaimer in the
+ *	documentation and/or other materials provided with the distribution.
+ *	* Neither the name of the copyright holder nor the names of its
+ *	contributors may be used to endorse or promote products derived from
+ *	this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CCP_CRYPTO_H_
+#define _CCP_CRYPTO_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+#include "ccp_dev.h"
+
+#define CCP_SHA3_CTX_SIZE 200
+/**
+ * CCP supported AES modes
+ */
+enum ccp_aes_mode {
+	CCP_AES_MODE_ECB = 0,
+	CCP_AES_MODE_CBC,
+	CCP_AES_MODE_OFB,
+	CCP_AES_MODE_CFB,
+	CCP_AES_MODE_CTR,
+	CCP_AES_MODE_CMAC,
+	CCP_AES_MODE_GHASH,
+	CCP_AES_MODE_GCTR,
+	CCP_AES_MODE__LAST,
+};
+
+/**
+ * CCP AES GHASH mode
+ */
+enum ccp_aes_ghash_mode {
+	CCP_AES_MODE_GHASH_AAD = 0,
+	CCP_AES_MODE_GHASH_FINAL
+};
+
+/**
+ * CCP supported AES types
+ */
+enum ccp_aes_type {
+	CCP_AES_TYPE_128 = 0,
+	CCP_AES_TYPE_192,
+	CCP_AES_TYPE_256,
+	CCP_AES_TYPE__LAST,
+};
+
+/***** 3DES engine *****/
+
+/**
+ * CCP supported DES/3DES modes
+ */
+enum ccp_des_mode {
+	CCP_DES_MODE_ECB = 0, /* Not supported */
+	CCP_DES_MODE_CBC,
+	CCP_DES_MODE_CFB,
+};
+
+/**
+ * CCP supported DES types
+ */
+enum ccp_des_type {
+	CCP_DES_TYPE_128 = 0,	/* 112 + 16 parity */
+	CCP_DES_TYPE_192,	/* 168 + 24 parity */
+	CCP_DES_TYPE__LAST,
+};
+
+/***** SHA engine *****/
+
+/**
+ * ccp_sha_type - type of SHA operation
+ *
+ * @CCP_SHA_TYPE_1: SHA-1 operation
+ * @CCP_SHA_TYPE_224: SHA-224 operation
+ * @CCP_SHA_TYPE_256: SHA-256 operation
+ */
+enum ccp_sha_type {
+	CCP_SHA_TYPE_1 = 1,
+	CCP_SHA_TYPE_224,
+	CCP_SHA_TYPE_256,
+	CCP_SHA_TYPE_384,
+	CCP_SHA_TYPE_512,
+	CCP_SHA_TYPE_RSVD1,
+	CCP_SHA_TYPE_RSVD2,
+	CCP_SHA3_TYPE_224,
+	CCP_SHA3_TYPE_256,
+	CCP_SHA3_TYPE_384,
+	CCP_SHA3_TYPE_512,
+	CCP_SHA_TYPE__LAST,
+};
+
+/**
+ * CCP supported cipher algorithms
+ */
+enum ccp_cipher_algo {
+	CCP_CIPHER_ALGO_AES_CBC = 0,
+	CCP_CIPHER_ALGO_AES_ECB,
+	CCP_CIPHER_ALGO_AES_CTR,
+	CCP_CIPHER_ALGO_AES_GCM,
+	CCP_CIPHER_ALGO_3DES_CBC,
+};
+
+/**
+ * CCP cipher operation type
+ */
+enum ccp_cipher_dir {
+	CCP_CIPHER_DIR_DECRYPT = 0,
+	CCP_CIPHER_DIR_ENCRYPT = 1,
+};
+
+/**
+ * CCP supported hash algorithms
+ */
+enum ccp_hash_algo {
+	CCP_AUTH_ALGO_SHA1 = 0,
+	CCP_AUTH_ALGO_SHA1_HMAC,
+	CCP_AUTH_ALGO_SHA224,
+	CCP_AUTH_ALGO_SHA224_HMAC,
+	CCP_AUTH_ALGO_SHA3_224,
+	CCP_AUTH_ALGO_SHA3_224_HMAC,
+	CCP_AUTH_ALGO_SHA256,
+	CCP_AUTH_ALGO_SHA256_HMAC,
+	CCP_AUTH_ALGO_SHA3_256,
+	CCP_AUTH_ALGO_SHA3_256_HMAC,
+	CCP_AUTH_ALGO_SHA384,
+	CCP_AUTH_ALGO_SHA384_HMAC,
+	CCP_AUTH_ALGO_SHA3_384,
+	CCP_AUTH_ALGO_SHA3_384_HMAC,
+	CCP_AUTH_ALGO_SHA512,
+	CCP_AUTH_ALGO_SHA512_HMAC,
+	CCP_AUTH_ALGO_SHA3_512,
+	CCP_AUTH_ALGO_SHA3_512_HMAC,
+	CCP_AUTH_ALGO_AES_CMAC,
+	CCP_AUTH_ALGO_AES_GCM,
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	CCP_AUTH_ALGO_MD5_HMAC,
+#endif
+};
+
+/**
+ * CCP hash operation type
+ */
+enum ccp_hash_op {
+	CCP_AUTH_OP_GENERATE = 0,
+	CCP_AUTH_OP_VERIFY = 1,
+};
+
+/* CCP crypto private session structure */
+struct ccp_session {
+	enum ccp_cmd_order cmd_id;
+	/**< chain order mode */
+	struct {
+		uint16_t length;
+		uint16_t offset;
+	} iv;
+	/**< IV parameters */
+	struct {
+		enum ccp_cipher_algo  algo;
+		enum ccp_engine  engine;
+		union {
+			enum ccp_aes_mode aes_mode;
+			enum ccp_des_mode des_mode;
+		} um;
+		union {
+			enum ccp_aes_type aes_type;
+			enum ccp_des_type des_type;
+		} ut;
+		enum ccp_cipher_dir dir;
+		uint64_t key_length;
+		/**< max cipher key size 256 bits */
+		uint8_t key[32];
+		/**ccp key format*/
+		uint8_t key_ccp[32];
+		phys_addr_t key_phys;
+		/**AES-ctr nonce(4) iv(8) ctr*/
+		uint8_t nonce[32];
+		phys_addr_t nonce_phys;
+	} cipher;
+	/**< Cipher Parameters */
+
+	struct {
+		enum ccp_hash_algo algo;
+		enum ccp_engine  engine;
+		union {
+			enum ccp_aes_mode aes_mode;
+		} um;
+		union {
+			enum ccp_sha_type sha_type;
+			enum ccp_aes_type aes_type;
+		} ut;
+		enum ccp_hash_op op;
+		uint64_t key_length;
+		/**< max hash key size 144 bytes (struct capabilties) */
+		uint8_t key[144];
+		/**< max be key size of AES is 32*/
+		uint8_t key_ccp[32];
+		phys_addr_t key_phys;
+		uint64_t digest_length;
+		void *ctx;
+		int ctx_len;
+		int offset;
+		int block_size;
+		/**< Buffer to store  Software generated precomute values*/
+		/**< For HMAC H(ipad ^ key) and H(opad ^ key) */
+		/**< For CMAC K1 IV and K2 IV*/
+		uint8_t pre_compute[2 * CCP_SHA3_CTX_SIZE];
+		/**< SHA3 initial ctx all zeros*/
+		uint8_t sha3_ctx[200];
+		int aad_length;
+	} auth;
+	/**< Authentication Parameters */
+	enum rte_crypto_aead_algorithm aead_algo;
+	/**< AEAD Algorithm */
+
+	uint32_t reserved;
+} __rte_cache_aligned;
+
+extern uint8_t ccp_cryptodev_driver_id;
+
+struct ccp_qp;
+
+/**
+ * Set and validate CCP crypto session parameters
+ *
+ * @param sess ccp private session
+ * @param xform crypto xform for this session
+ * @return 0 on success otherwise -1
+ */
+int ccp_set_session_parameters(struct ccp_session *sess,
+			       const struct rte_crypto_sym_xform *xform);
+
+#endif /* _CCP_CRYPTO_H_ */
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index b321530..a16ba81 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -225,6 +225,123 @@ struct ccp_device {
 	/**< current queue index */
 } __rte_cache_aligned;
 
+/**< CCP H/W engine related */
+/**
+ * ccp_engine - CCP operation identifiers
+ *
+ * @CCP_ENGINE_AES: AES operation
+ * @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation
+ * @CCP_ENGINE_3DES: DES/3DES operation
+ * @CCP_ENGINE_SHA: SHA operation
+ * @CCP_ENGINE_RSA: RSA operation
+ * @CCP_ENGINE_PASSTHRU: pass-through operation
+ * @CCP_ENGINE_ZLIB_DECOMPRESS: unused
+ * @CCP_ENGINE_ECC: ECC operation
+ */
+enum ccp_engine {
+	CCP_ENGINE_AES = 0,
+	CCP_ENGINE_XTS_AES_128,
+	CCP_ENGINE_3DES,
+	CCP_ENGINE_SHA,
+	CCP_ENGINE_RSA,
+	CCP_ENGINE_PASSTHRU,
+	CCP_ENGINE_ZLIB_DECOMPRESS,
+	CCP_ENGINE_ECC,
+	CCP_ENGINE__LAST,
+};
+
+/* Passthru engine */
+/**
+ * ccp_passthru_bitwise - type of bitwise passthru operation
+ *
+ * @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed
+ * @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask
+ * @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask
+ * @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask
+ * @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask
+ */
+enum ccp_passthru_bitwise {
+	CCP_PASSTHRU_BITWISE_NOOP = 0,
+	CCP_PASSTHRU_BITWISE_AND,
+	CCP_PASSTHRU_BITWISE_OR,
+	CCP_PASSTHRU_BITWISE_XOR,
+	CCP_PASSTHRU_BITWISE_MASK,
+	CCP_PASSTHRU_BITWISE__LAST,
+};
+
+/**
+ * ccp_passthru_byteswap - type of byteswap passthru operation
+ *
+ * @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed
+ * @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words
+ * @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words
+ */
+enum ccp_passthru_byteswap {
+	CCP_PASSTHRU_BYTESWAP_NOOP = 0,
+	CCP_PASSTHRU_BYTESWAP_32BIT,
+	CCP_PASSTHRU_BYTESWAP_256BIT,
+	CCP_PASSTHRU_BYTESWAP__LAST,
+};
+
+/**
+ * CCP passthru
+ */
+struct ccp_passthru {
+	phys_addr_t src_addr;
+	phys_addr_t dest_addr;
+	enum ccp_passthru_bitwise bit_mod;
+	enum ccp_passthru_byteswap byte_swap;
+	int len;
+	int dir;
+};
+
+/* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
+union ccp_function {
+	struct {
+		uint16_t size:7;
+		uint16_t encrypt:1;
+		uint16_t mode:5;
+		uint16_t type:2;
+	} aes;
+	struct {
+		uint16_t size:7;
+		uint16_t encrypt:1;
+		uint16_t mode:5;
+		uint16_t type:2;
+	} des;
+	struct {
+		uint16_t size:7;
+		uint16_t encrypt:1;
+		uint16_t rsvd:5;
+		uint16_t type:2;
+	} aes_xts;
+	struct {
+		uint16_t rsvd1:10;
+		uint16_t type:4;
+		uint16_t rsvd2:1;
+	} sha;
+	struct {
+		uint16_t mode:3;
+		uint16_t size:12;
+	} rsa;
+	struct {
+		uint16_t byteswap:2;
+		uint16_t bitwise:3;
+		uint16_t reflect:2;
+		uint16_t rsvd:8;
+	} pt;
+	struct  {
+		uint16_t rsvd:13;
+	} zlib;
+	struct {
+		uint16_t size:10;
+		uint16_t type:2;
+		uint16_t mode:3;
+	} ecc;
+	uint16_t raw;
+};
+
+
 /**
  * descriptor for version 5 CPP commands
  * 8 32-bit words:
@@ -291,6 +408,18 @@ struct ccp_desc {
 	struct dword7 dw7;
 };
 
+/**
+ * cmd id to follow order
+ */
+enum ccp_cmd_order {
+	CCP_CMD_CIPHER = 0,
+	CCP_CMD_AUTH,
+	CCP_CMD_CIPHER_HASH,
+	CCP_CMD_HASH_CIPHER,
+	CCP_CMD_COMBINED,
+	CCP_CMD_NOT_SUPPORTED,
+};
+
 static inline uint32_t
 low32_value(unsigned long addr)
 {
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index b6f8c48..ad0a670 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -36,6 +36,7 @@
 
 #include "ccp_pmd_private.h"
 #include "ccp_dev.h"
+#include "ccp_crypto.h"
 
 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
@@ -81,6 +82,60 @@ ccp_pmd_info_get(struct rte_cryptodev *dev,
 	}
 }
 
+static unsigned
+ccp_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct ccp_session);
+}
+
+static int
+ccp_pmd_session_configure(struct rte_cryptodev *dev,
+			  struct rte_crypto_sym_xform *xform,
+			  struct rte_cryptodev_sym_session *sess,
+			  struct rte_mempool *mempool)
+{
+	int ret;
+	void *sess_private_data;
+
+	if (unlikely(sess == NULL || xform == NULL)) {
+		CCP_LOG_ERR("Invalid session struct or xform");
+		return -ENOMEM;
+	}
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		CCP_LOG_ERR("Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+	ret = ccp_set_session_parameters(sess_private_data, xform);
+	if (ret != 0) {
+		CCP_LOG_ERR("failed configure session parameters");
+
+		/* Return session to mempool */
+		rte_mempool_put(mempool, sess_private_data);
+		return ret;
+	}
+	set_session_private_data(sess, dev->driver_id,
+				 sess_private_data);
+
+	return 0;
+}
+
+static void
+ccp_pmd_session_clear(struct rte_cryptodev *dev,
+		      struct rte_cryptodev_sym_session *sess)
+{
+	uint8_t index = dev->driver_id;
+	void *sess_priv = get_session_private_data(sess, index);
+
+	if (sess_priv) {
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+		rte_mempool_put(sess_mp, sess_priv);
+		memset(sess_priv, 0, sizeof(struct ccp_session));
+		set_session_private_data(sess, index, NULL);
+	}
+}
+
 struct rte_cryptodev_ops ccp_ops = {
 		.dev_configure		= ccp_pmd_config,
 		.dev_start		= ccp_pmd_start,
@@ -98,9 +153,9 @@ struct rte_cryptodev_ops ccp_ops = {
 		.queue_pair_stop	= NULL,
 		.queue_pair_count	= NULL,
 
-		.session_get_size	= NULL,
-		.session_configure	= NULL,
-		.session_clear		= NULL,
+		.session_get_size	= ccp_pmd_session_get_size,
+		.session_configure	= ccp_pmd_session_configure,
+		.session_clear		= ccp_pmd_session_clear,
 };
 
 struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v4 05/20] crypto/ccp: support queue pair related pmd ops
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
                         ` (2 preceding siblings ...)
  2018-03-09  8:35       ` [PATCH v4 04/20] crypto/ccp: support session related crypto " Ravi Kumar
@ 2018-03-09  8:35       ` Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 06/20] crypto/ccp: support crypto enqueue and dequeue burst api Ravi Kumar
                         ` (16 subsequent siblings)
  20 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-09  8:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_pmd_ops.c | 149 +++++++++++++++++++++++++++++++++++++--
 1 file changed, 144 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index ad0a670..a02aa6f 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -82,6 +82,145 @@ ccp_pmd_info_get(struct rte_cryptodev *dev,
 	}
 }
 
+static int
+ccp_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+	struct ccp_qp *qp;
+
+	if (dev->data->queue_pairs[qp_id] != NULL) {
+		qp = (struct ccp_qp *)dev->data->queue_pairs[qp_id];
+		rte_ring_free(qp->processed_pkts);
+		rte_mempool_free(qp->batch_mp);
+		rte_free(qp);
+		dev->data->queue_pairs[qp_id] = NULL;
+	}
+	return 0;
+}
+
+static int
+ccp_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+		struct ccp_qp *qp)
+{
+	unsigned int n = snprintf(qp->name, sizeof(qp->name),
+			"ccp_pmd_%u_qp_%u",
+			dev->data->dev_id, qp->id);
+
+	if (n > sizeof(qp->name))
+		return -1;
+
+	return 0;
+}
+
+static struct rte_ring *
+ccp_pmd_qp_create_batch_info_ring(struct ccp_qp *qp,
+				  unsigned int ring_size, int socket_id)
+{
+	struct rte_ring *r;
+
+	r = rte_ring_lookup(qp->name);
+	if (r) {
+		if (r->size >= ring_size) {
+			CCP_LOG_INFO(
+				"Reusing ring %s for processed packets",
+				qp->name);
+			return r;
+		}
+		CCP_LOG_INFO(
+			"Unable to reuse ring %s for processed packets",
+			 qp->name);
+		return NULL;
+	}
+
+	return rte_ring_create(qp->name, ring_size, socket_id,
+			RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+static int
+ccp_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+		 const struct rte_cryptodev_qp_conf *qp_conf,
+		 int socket_id, struct rte_mempool *session_pool)
+{
+	struct ccp_private *internals = dev->data->dev_private;
+	struct ccp_qp *qp;
+	int retval = 0;
+
+	if (qp_id >= internals->max_nb_qpairs) {
+		CCP_LOG_ERR("Invalid qp_id %u, should be less than %u",
+			    qp_id, internals->max_nb_qpairs);
+		return (-EINVAL);
+	}
+
+	/* Free memory prior to re-allocation if needed. */
+	if (dev->data->queue_pairs[qp_id] != NULL)
+		ccp_pmd_qp_release(dev, qp_id);
+
+	/* Allocate the queue pair data structure. */
+	qp = rte_zmalloc_socket("CCP Crypto PMD Queue Pair", sizeof(*qp),
+					RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp == NULL) {
+		CCP_LOG_ERR("Failed to allocate queue pair memory");
+		return (-ENOMEM);
+	}
+
+	qp->dev = dev;
+	qp->id = qp_id;
+	dev->data->queue_pairs[qp_id] = qp;
+
+	retval = ccp_pmd_qp_set_unique_name(dev, qp);
+	if (retval) {
+		CCP_LOG_ERR("Failed to create unique name for ccp qp");
+		goto qp_setup_cleanup;
+	}
+
+	qp->processed_pkts = ccp_pmd_qp_create_batch_info_ring(qp,
+			qp_conf->nb_descriptors, socket_id);
+	if (qp->processed_pkts == NULL) {
+		CCP_LOG_ERR("Failed to create batch info ring");
+		goto qp_setup_cleanup;
+	}
+
+	qp->sess_mp = session_pool;
+
+	/* mempool for batch info */
+	qp->batch_mp = rte_mempool_create(
+				qp->name,
+				qp_conf->nb_descriptors,
+				sizeof(struct ccp_batch_info),
+				RTE_CACHE_LINE_SIZE,
+				0, NULL, NULL, NULL, NULL,
+				SOCKET_ID_ANY, 0);
+	if (qp->batch_mp == NULL)
+		goto qp_setup_cleanup;
+	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	return 0;
+
+qp_setup_cleanup:
+	dev->data->queue_pairs[qp_id] = NULL;
+	if (qp)
+		rte_free(qp);
+	return -1;
+}
+
+static int
+ccp_pmd_qp_start(struct rte_cryptodev *dev __rte_unused,
+		 uint16_t queue_pair_id __rte_unused)
+{
+	return -ENOTSUP;
+}
+
+static int
+ccp_pmd_qp_stop(struct rte_cryptodev *dev __rte_unused,
+		uint16_t queue_pair_id __rte_unused)
+{
+	return -ENOTSUP;
+}
+
+static uint32_t
+ccp_pmd_qp_count(struct rte_cryptodev *dev)
+{
+	return dev->data->nb_queue_pairs;
+}
+
 static unsigned
 ccp_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
 {
@@ -147,11 +286,11 @@ struct rte_cryptodev_ops ccp_ops = {
 
 		.dev_infos_get		= ccp_pmd_info_get,
 
-		.queue_pair_setup	= NULL,
-		.queue_pair_release	= NULL,
-		.queue_pair_start	= NULL,
-		.queue_pair_stop	= NULL,
-		.queue_pair_count	= NULL,
+		.queue_pair_setup	= ccp_pmd_qp_setup,
+		.queue_pair_release	= ccp_pmd_qp_release,
+		.queue_pair_start	= ccp_pmd_qp_start,
+		.queue_pair_stop	= ccp_pmd_qp_stop,
+		.queue_pair_count	= ccp_pmd_qp_count,
 
 		.session_get_size	= ccp_pmd_session_get_size,
 		.session_configure	= ccp_pmd_session_configure,
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v4 06/20] crypto/ccp: support crypto enqueue and dequeue burst api
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
                         ` (3 preceding siblings ...)
  2018-03-09  8:35       ` [PATCH v4 05/20] crypto/ccp: support queue pair related " Ravi Kumar
@ 2018-03-09  8:35       ` Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 07/20] crypto/ccp: support sessionless operations Ravi Kumar
                         ` (15 subsequent siblings)
  20 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-09  8:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 360 +++++++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_crypto.h  |  35 ++++
 drivers/crypto/ccp/ccp_dev.c     |  27 +++
 drivers/crypto/ccp/ccp_dev.h     |   9 +
 drivers/crypto/ccp/rte_ccp_pmd.c |  64 ++++++-
 5 files changed, 488 insertions(+), 7 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index c365c0f..c17e84f 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -227,3 +227,363 @@ ccp_set_session_parameters(struct ccp_session *sess,
 	}
 	return ret;
 }
+
+/* calculate CCP descriptors requirement */
+static inline int
+ccp_cipher_slot(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->cipher.algo) {
+	default:
+		CCP_LOG_ERR("Unsupported cipher algo %d",
+			    session->cipher.algo);
+	}
+	return count;
+}
+
+static inline int
+ccp_auth_slot(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->auth.algo) {
+	default:
+		CCP_LOG_ERR("Unsupported auth algo %d",
+			    session->auth.algo);
+	}
+
+	return count;
+}
+
+static int
+ccp_aead_slot(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->aead_algo) {
+	default:
+		CCP_LOG_ERR("Unsupported aead algo %d",
+			    session->aead_algo);
+	}
+	return count;
+}
+
+int
+ccp_compute_slot_count(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->cmd_id) {
+	case CCP_CMD_CIPHER:
+		count = ccp_cipher_slot(session);
+		break;
+	case CCP_CMD_AUTH:
+		count = ccp_auth_slot(session);
+		break;
+	case CCP_CMD_CIPHER_HASH:
+	case CCP_CMD_HASH_CIPHER:
+		count = ccp_cipher_slot(session);
+		count += ccp_auth_slot(session);
+		break;
+	case CCP_CMD_COMBINED:
+		count = ccp_aead_slot(session);
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported cmd_id");
+
+	}
+
+	return count;
+}
+
+static inline int
+ccp_crypto_cipher(struct rte_crypto_op *op,
+		  struct ccp_queue *cmd_q __rte_unused,
+		  struct ccp_batch_info *b_info __rte_unused)
+{
+	int result = 0;
+	struct ccp_session *session;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 ccp_cryptodev_driver_id);
+
+	switch (session->cipher.algo) {
+	default:
+		CCP_LOG_ERR("Unsupported cipher algo %d",
+			    session->cipher.algo);
+		return -ENOTSUP;
+	}
+	return result;
+}
+
+static inline int
+ccp_crypto_auth(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q __rte_unused,
+		struct ccp_batch_info *b_info __rte_unused)
+{
+
+	int result = 0;
+	struct ccp_session *session;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	switch (session->auth.algo) {
+	default:
+		CCP_LOG_ERR("Unsupported auth algo %d",
+			    session->auth.algo);
+		return -ENOTSUP;
+	}
+
+	return result;
+}
+
+static inline int
+ccp_crypto_aead(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q __rte_unused,
+		struct ccp_batch_info *b_info __rte_unused)
+{
+	int result = 0;
+	struct ccp_session *session;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	switch (session->aead_algo) {
+	default:
+		CCP_LOG_ERR("Unsupported aead algo %d",
+			    session->aead_algo);
+		return -ENOTSUP;
+	}
+	return result;
+}
+
+int
+process_ops_to_enqueue(const struct ccp_qp *qp,
+		       struct rte_crypto_op **op,
+		       struct ccp_queue *cmd_q,
+		       uint16_t nb_ops,
+		       int slots_req)
+{
+	int i, result = 0;
+	struct ccp_batch_info *b_info;
+	struct ccp_session *session;
+
+	if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
+		CCP_LOG_ERR("batch info allocation failed");
+		return 0;
+	}
+	/* populate batch info necessary for dequeue */
+	b_info->op_idx = 0;
+	b_info->lsb_buf_idx = 0;
+	b_info->desccnt = 0;
+	b_info->cmd_q = cmd_q;
+	b_info->lsb_buf_phys =
+		(phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+	rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
+
+	b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+					 Q_DESC_SIZE);
+	for (i = 0; i < nb_ops; i++) {
+		session = (struct ccp_session *)get_session_private_data(
+						 op[i]->sym->session,
+						 ccp_cryptodev_driver_id);
+		switch (session->cmd_id) {
+		case CCP_CMD_CIPHER:
+			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_AUTH:
+			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_CIPHER_HASH:
+			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+			if (result)
+				break;
+			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_HASH_CIPHER:
+			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+			if (result)
+				break;
+			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_COMBINED:
+			result = ccp_crypto_aead(op[i], cmd_q, b_info);
+			break;
+		default:
+			CCP_LOG_ERR("Unsupported cmd_id");
+			result = -1;
+		}
+		if (unlikely(result < 0)) {
+			rte_atomic64_add(&b_info->cmd_q->free_slots,
+					 (slots_req - b_info->desccnt));
+			break;
+		}
+		b_info->op[i] = op[i];
+	}
+
+	b_info->opcnt = i;
+	b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+					 Q_DESC_SIZE);
+
+	rte_wmb();
+	/* Write the new tail address back to the queue register */
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+			      b_info->tail_offset);
+	/* Turn the queue back on using our cached control register */
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol | CMD_Q_RUN);
+
+	rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
+
+	return i;
+}
+
+static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
+{
+	struct ccp_session *session;
+	uint8_t *digest_data, *addr;
+	struct rte_mbuf *m_last;
+	int offset, digest_offset;
+	uint8_t digest_le[64];
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	if (session->cmd_id == CCP_CMD_COMBINED) {
+		digest_data = op->sym->aead.digest.data;
+		digest_offset = op->sym->aead.data.offset +
+					op->sym->aead.data.length;
+	} else {
+		digest_data = op->sym->auth.digest.data;
+		digest_offset = op->sym->auth.data.offset +
+					op->sym->auth.data.length;
+	}
+	m_last = rte_pktmbuf_lastseg(op->sym->m_src);
+	addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
+			   m_last->data_len - session->auth.ctx_len);
+
+	rte_mb();
+	offset = session->auth.offset;
+
+	if (session->auth.engine == CCP_ENGINE_SHA)
+		if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
+		    (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
+		    (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
+			/* All other algorithms require byte
+			 * swap done by host
+			 */
+			unsigned int i;
+
+			offset = session->auth.ctx_len -
+				session->auth.offset - 1;
+			for (i = 0; i < session->auth.digest_length; i++)
+				digest_le[i] = addr[offset - i];
+			offset = 0;
+			addr = digest_le;
+		}
+
+	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	if (session->auth.op == CCP_AUTH_OP_VERIFY) {
+		if (memcmp(addr + offset, digest_data,
+			   session->auth.digest_length) != 0)
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+	} else {
+		if (unlikely(digest_data == 0))
+			digest_data = rte_pktmbuf_mtod_offset(
+					op->sym->m_dst, uint8_t *,
+					digest_offset);
+		rte_memcpy(digest_data, addr + offset,
+			   session->auth.digest_length);
+	}
+	/* Trim area used for digest from mbuf. */
+	rte_pktmbuf_trim(op->sym->m_src,
+			 session->auth.ctx_len);
+}
+
+static int
+ccp_prepare_ops(struct rte_crypto_op **op_d,
+		struct ccp_batch_info *b_info,
+		uint16_t nb_ops)
+{
+	int i, min_ops;
+	struct ccp_session *session;
+
+	min_ops = RTE_MIN(nb_ops, b_info->opcnt);
+
+	for (i = 0; i < min_ops; i++) {
+		op_d[i] = b_info->op[b_info->op_idx++];
+		session = (struct ccp_session *)get_session_private_data(
+						 op_d[i]->sym->session,
+						ccp_cryptodev_driver_id);
+		switch (session->cmd_id) {
+		case CCP_CMD_CIPHER:
+			op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+			break;
+		case CCP_CMD_AUTH:
+		case CCP_CMD_CIPHER_HASH:
+		case CCP_CMD_HASH_CIPHER:
+		case CCP_CMD_COMBINED:
+			ccp_auth_dq_prepare(op_d[i]);
+			break;
+		default:
+			CCP_LOG_ERR("Unsupported cmd_id");
+		}
+	}
+
+	b_info->opcnt -= min_ops;
+	return min_ops;
+}
+
+int
+process_ops_to_dequeue(struct ccp_qp *qp,
+		       struct rte_crypto_op **op,
+		       uint16_t nb_ops)
+{
+	struct ccp_batch_info *b_info;
+	uint32_t cur_head_offset;
+
+	if (qp->b_info != NULL) {
+		b_info = qp->b_info;
+		if (unlikely(b_info->op_idx > 0))
+			goto success;
+	} else if (rte_ring_dequeue(qp->processed_pkts,
+				    (void **)&b_info))
+		return 0;
+	cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
+				       CMD_Q_HEAD_LO_BASE);
+
+	if (b_info->head_offset < b_info->tail_offset) {
+		if ((cur_head_offset >= b_info->head_offset) &&
+		    (cur_head_offset < b_info->tail_offset)) {
+			qp->b_info = b_info;
+			return 0;
+		}
+	} else {
+		if ((cur_head_offset >= b_info->head_offset) ||
+		    (cur_head_offset < b_info->tail_offset)) {
+			qp->b_info = b_info;
+			return 0;
+		}
+	}
+
+
+success:
+	nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
+	rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
+	b_info->desccnt = 0;
+	if (b_info->opcnt > 0) {
+		qp->b_info = b_info;
+	} else {
+		rte_mempool_put(qp->batch_mp, (void *)b_info);
+		qp->b_info = NULL;
+	}
+
+	return nb_ops;
+}
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 346d5ee..4455497 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -264,4 +264,39 @@ struct ccp_qp;
 int ccp_set_session_parameters(struct ccp_session *sess,
 			       const struct rte_crypto_sym_xform *xform);
 
+/**
+ * Find count of slots
+ *
+ * @param session CCP private session
+ * @return count of free slots available
+ */
+int ccp_compute_slot_count(struct ccp_session *session);
+
+/**
+ * process crypto ops to be enqueued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param cmd_q CCP cmd queue
+ * @param nb_ops No. of ops to be submitted
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_enqueue(const struct ccp_qp *qp,
+			   struct rte_crypto_op **op,
+			   struct ccp_queue *cmd_q,
+			   uint16_t nb_ops,
+			   int slots_req);
+
+/**
+ * process crypto ops to be dequeued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param nb_ops requested no. of ops
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_dequeue(struct ccp_qp *qp,
+			   struct rte_crypto_op **op,
+			   uint16_t nb_ops);
+
 #endif /* _CCP_CRYPTO_H_ */
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index 57bccf4..fee90e3 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -61,6 +61,33 @@ ccp_dev_start(struct rte_cryptodev *dev)
 	return 0;
 }
 
+struct ccp_queue *
+ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
+{
+	int i, ret = 0;
+	struct ccp_device *dev;
+	struct ccp_private *priv = cdev->data->dev_private;
+
+	dev = TAILQ_NEXT(priv->last_dev, next);
+	if (unlikely(dev == NULL))
+		dev = TAILQ_FIRST(&ccp_list);
+	priv->last_dev = dev;
+	if (dev->qidx >= dev->cmd_q_count)
+		dev->qidx = 0;
+	ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+	if (ret >= slot_req)
+		return &dev->cmd_q[dev->qidx];
+	for (i = 0; i < dev->cmd_q_count; i++) {
+		dev->qidx++;
+		if (dev->qidx >= dev->cmd_q_count)
+			dev->qidx = 0;
+		ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+		if (ret >= slot_req)
+			return &dev->cmd_q[dev->qidx];
+	}
+	return NULL;
+}
+
 static const struct rte_memzone *
 ccp_queue_dma_zone_reserve(const char *queue_name,
 			   uint32_t queue_size,
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index a16ba81..cfb3b03 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -445,4 +445,13 @@ int ccp_dev_start(struct rte_cryptodev *dev);
  */
 int ccp_probe_devices(const struct rte_pci_id *ccp_id);
 
+/**
+ * allocate a ccp command queue
+ *
+ * @dev rte crypto device
+ * @param slot_req number of required
+ * @return allotted CCP queue on success otherwise NULL
+ */
+struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
+
 #endif /* _CCP_DEV_H_ */
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index cc35a97..ed6ca5d 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -38,6 +38,7 @@
 #include <rte_dev.h>
 #include <rte_malloc.h>
 
+#include "ccp_crypto.h"
 #include "ccp_dev.h"
 #include "ccp_pmd_private.h"
 
@@ -47,23 +48,72 @@
 static unsigned int ccp_pmd_init_done;
 uint8_t ccp_cryptodev_driver_id;
 
+static struct ccp_session *
+get_ccp_session(struct ccp_qp *qp __rte_unused, struct rte_crypto_op *op)
+{
+	struct ccp_session *sess = NULL;
+
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		if (unlikely(op->sym->session == NULL))
+			return NULL;
+
+		sess = (struct ccp_session *)
+			get_session_private_data(
+				op->sym->session,
+				ccp_cryptodev_driver_id);
+	}
+
+	return sess;
+}
+
 static uint16_t
-ccp_pmd_enqueue_burst(void *queue_pair __rte_unused,
-		      struct rte_crypto_op **ops __rte_unused,
-		      uint16_t nb_ops __rte_unused)
+ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		      uint16_t nb_ops)
 {
-	uint16_t enq_cnt = 0;
+	struct ccp_session *sess = NULL;
+	struct ccp_qp *qp = queue_pair;
+	struct ccp_queue *cmd_q;
+	struct rte_cryptodev *dev = qp->dev;
+	uint16_t i, enq_cnt = 0, slots_req = 0;
+
+	if (nb_ops == 0)
+		return 0;
+
+	if (unlikely(rte_ring_full(qp->processed_pkts) != 0))
+		return 0;
+
+	for (i = 0; i < nb_ops; i++) {
+		sess = get_ccp_session(qp, ops[i]);
+		if (unlikely(sess == NULL) && (i == 0)) {
+			qp->qp_stats.enqueue_err_count++;
+			return 0;
+		} else if (sess == NULL) {
+			nb_ops = i;
+			break;
+		}
+		slots_req += ccp_compute_slot_count(sess);
+	}
+
+	cmd_q = ccp_allot_queue(dev, slots_req);
+	if (unlikely(cmd_q == NULL))
+		return 0;
 
+	enq_cnt = process_ops_to_enqueue(qp, ops, cmd_q, nb_ops, slots_req);
+	qp->qp_stats.enqueued_count += enq_cnt;
 	return enq_cnt;
 }
 
 static uint16_t
-ccp_pmd_dequeue_burst(void *queue_pair __rte_unused,
-		      struct rte_crypto_op **ops __rte_unused,
-		      uint16_t nb_ops __rte_unused)
+ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
 {
+	struct ccp_qp *qp = queue_pair;
 	uint16_t nb_dequeued = 0;
 
+	nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops);
+
+	qp->qp_stats.dequeued_count += nb_dequeued;
+
 	return nb_dequeued;
 }
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v4 07/20] crypto/ccp: support sessionless operations
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
                         ` (4 preceding siblings ...)
  2018-03-09  8:35       ` [PATCH v4 06/20] crypto/ccp: support crypto enqueue and dequeue burst api Ravi Kumar
@ 2018-03-09  8:35       ` Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 08/20] crypto/ccp: support stats related crypto pmd ops Ravi Kumar
                         ` (14 subsequent siblings)
  20 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-09  8:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/rte_ccp_pmd.c | 33 +++++++++++++++++++++++++++++++--
 1 file changed, 31 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index ed6ca5d..23d3af3 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -49,7 +49,7 @@ static unsigned int ccp_pmd_init_done;
 uint8_t ccp_cryptodev_driver_id;
 
 static struct ccp_session *
-get_ccp_session(struct ccp_qp *qp __rte_unused, struct rte_crypto_op *op)
+get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op)
 {
 	struct ccp_session *sess = NULL;
 
@@ -61,6 +61,27 @@ get_ccp_session(struct ccp_qp *qp __rte_unused, struct rte_crypto_op *op)
 			get_session_private_data(
 				op->sym->session,
 				ccp_cryptodev_driver_id);
+	} else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		void *_sess;
+		void *_sess_private_data = NULL;
+
+		if (rte_mempool_get(qp->sess_mp, &_sess))
+			return NULL;
+		if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+			return NULL;
+
+		sess = (struct ccp_session *)_sess_private_data;
+
+		if (unlikely(ccp_set_session_parameters(sess,
+							op->sym->xform) != 0)) {
+			rte_mempool_put(qp->sess_mp, _sess);
+			rte_mempool_put(qp->sess_mp, _sess_private_data);
+			sess = NULL;
+		}
+		op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+		set_session_private_data(op->sym->session,
+					 ccp_cryptodev_driver_id,
+					 _sess_private_data);
 	}
 
 	return sess;
@@ -108,10 +129,18 @@ ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
 	struct ccp_qp *qp = queue_pair;
-	uint16_t nb_dequeued = 0;
+	uint16_t nb_dequeued = 0, i;
 
 	nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops);
 
+	/* Free session if a session-less crypto op */
+	for (i = 0; i < nb_dequeued; i++)
+		if (unlikely(ops[i]->sess_type ==
+			     RTE_CRYPTO_OP_SESSIONLESS)) {
+			rte_mempool_put(qp->sess_mp,
+					ops[i]->sym->session);
+			ops[i]->sym->session = NULL;
+		}
 	qp->qp_stats.dequeued_count += nb_dequeued;
 
 	return nb_dequeued;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v4 08/20] crypto/ccp: support stats related crypto pmd ops
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
                         ` (5 preceding siblings ...)
  2018-03-09  8:35       ` [PATCH v4 07/20] crypto/ccp: support sessionless operations Ravi Kumar
@ 2018-03-09  8:35       ` Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 09/20] crypto/ccp: support ccp hwrng feature Ravi Kumar
                         ` (13 subsequent siblings)
  20 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-09  8:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_pmd_ops.c | 34 ++++++++++++++++++++++++++++++++--
 1 file changed, 32 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index a02aa6f..d483a74 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -68,6 +68,36 @@ ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
 }
 
 static void
+ccp_pmd_stats_get(struct rte_cryptodev *dev,
+		  struct rte_cryptodev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+		stats->enqueued_count += qp->qp_stats.enqueued_count;
+		stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+	}
+
+}
+
+static void
+ccp_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	}
+}
+
+static void
 ccp_pmd_info_get(struct rte_cryptodev *dev,
 		 struct rte_cryptodev_info *dev_info)
 {
@@ -281,8 +311,8 @@ struct rte_cryptodev_ops ccp_ops = {
 		.dev_stop		= ccp_pmd_stop,
 		.dev_close		= ccp_pmd_close,
 
-		.stats_get		= NULL,
-		.stats_reset		= NULL,
+		.stats_get		= ccp_pmd_stats_get,
+		.stats_reset		= ccp_pmd_stats_reset,
 
 		.dev_infos_get		= ccp_pmd_info_get,
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v4 09/20] crypto/ccp: support ccp hwrng feature
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
                         ` (6 preceding siblings ...)
  2018-03-09  8:35       ` [PATCH v4 08/20] crypto/ccp: support stats related crypto pmd ops Ravi Kumar
@ 2018-03-09  8:35       ` Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 10/20] crypto/ccp: support aes cipher algo Ravi Kumar
                         ` (12 subsequent siblings)
  20 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-09  8:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_dev.c | 20 ++++++++++++++++++++
 drivers/crypto/ccp/ccp_dev.h | 11 +++++++++++
 2 files changed, 31 insertions(+)

diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index fee90e3..d8c0ab4 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -88,6 +88,26 @@ ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
 	return NULL;
 }
 
+int
+ccp_read_hwrng(uint32_t *value)
+{
+	struct ccp_device *dev;
+
+	TAILQ_FOREACH(dev, &ccp_list, next) {
+		void *vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+		while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) {
+			*value = CCP_READ_REG(vaddr, TRNG_OUT_REG);
+			if (*value) {
+				dev->hwrng_retries = 0;
+				return 0;
+			}
+		}
+		dev->hwrng_retries = 0;
+	}
+	return -1;
+}
+
 static const struct rte_memzone *
 ccp_queue_dma_zone_reserve(const char *queue_name,
 			   uint32_t queue_size,
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index cfb3b03..a5c9ef3 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -47,6 +47,7 @@
 
 /**< CCP sspecific */
 #define MAX_HW_QUEUES                   5
+#define CCP_MAX_TRNG_RETRIES		10
 
 /**< CCP Register Mappings */
 #define Q_MASK_REG                      0x000
@@ -223,6 +224,8 @@ struct ccp_device {
 	/**< protection for shared lsb region allocation */
 	int qidx;
 	/**< current queue index */
+	int hwrng_retries;
+	/**< retry counter for CCP TRNG */
 } __rte_cache_aligned;
 
 /**< CCP H/W engine related */
@@ -454,4 +457,12 @@ int ccp_probe_devices(const struct rte_pci_id *ccp_id);
  */
 struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
 
+/**
+ * read hwrng value
+ *
+ * @param trng_value data pointer to write RNG value
+ * @return 0 on success otherwise -1
+ */
+int ccp_read_hwrng(uint32_t *trng_value);
+
 #endif /* _CCP_DEV_H_ */
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v4 10/20] crypto/ccp: support aes cipher algo
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
                         ` (7 preceding siblings ...)
  2018-03-09  8:35       ` [PATCH v4 09/20] crypto/ccp: support ccp hwrng feature Ravi Kumar
@ 2018-03-09  8:35       ` Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 11/20] crypto/ccp: support 3des " Ravi Kumar
                         ` (11 subsequent siblings)
  20 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-09  8:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 197 ++++++++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_crypto.h  |  13 +++
 drivers/crypto/ccp/ccp_dev.h     |  53 +++++++++++
 drivers/crypto/ccp/ccp_pmd_ops.c |  60 ++++++++++++
 4 files changed, 321 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index c17e84f..b097355 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -80,6 +80,7 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 			     const struct rte_crypto_sym_xform *xform)
 {
 	const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+	size_t i;
 
 	cipher_xform = &xform->cipher;
 
@@ -99,6 +100,21 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 	sess->iv.length = cipher_xform->iv.length;
 
 	switch (cipher_xform->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_ECB:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo");
 		return -1;
@@ -106,10 +122,27 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 
 
 	switch (sess->cipher.engine) {
+	case CCP_ENGINE_AES:
+		if (sess->cipher.key_length == 16)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+		else if (sess->cipher.key_length == 24)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+		else if (sess->cipher.key_length == 32)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+		else {
+			CCP_LOG_ERR("Invalid cipher key length");
+			return -1;
+		}
+		for (i = 0; i < sess->cipher.key_length ; i++)
+			sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+				sess->cipher.key[i];
+		break;
 	default:
 		CCP_LOG_ERR("Invalid CCP Engine");
 		return -ENOTSUP;
 	}
+	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
 	return 0;
 }
 
@@ -235,6 +268,18 @@ ccp_cipher_slot(struct ccp_session *session)
 	int count = 0;
 
 	switch (session->cipher.algo) {
+	case CCP_CIPHER_ALGO_AES_CBC:
+		count = 2;
+		/**< op + passthrough for iv */
+		break;
+	case CCP_CIPHER_ALGO_AES_ECB:
+		count = 1;
+		/**<only op*/
+		break;
+	case CCP_CIPHER_ALGO_AES_CTR:
+		count = 2;
+		/**< op + passthrough for iv */
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo %d",
 			    session->cipher.algo);
@@ -297,10 +342,146 @@ ccp_compute_slot_count(struct ccp_session *session)
 	return count;
 }
 
+static void
+ccp_perform_passthru(struct ccp_passthru *pst,
+		     struct ccp_queue *cmd_q)
+{
+	struct ccp_desc *desc;
+	union ccp_function function;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 0;
+	CCP_CMD_EOM(desc) = 0;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_PT_BYTESWAP(&function) = pst->byte_swap;
+	CCP_PT_BITWISE(&function) = pst->bit_mod;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = pst->len;
+
+	if (pst->dir) {
+		CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+		CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+		CCP_CMD_DST_HI(desc) = 0;
+		CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+		if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+			CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
+	} else {
+
+		CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+		CCP_CMD_SRC_HI(desc) = 0;
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
+
+		CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+		CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
+		CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+	}
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+}
+
+static int
+ccp_perform_aes(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	uint8_t *lsb_buf;
+	struct ccp_passthru pst = {0};
+	struct ccp_desc *desc;
+	phys_addr_t src_addr, dest_addr, key_addr;
+	uint8_t *iv;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+	function.raw = 0;
+
+	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+	if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
+		if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
+			rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
+				   iv, session->iv.length);
+			pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
+			CCP_AES_SIZE(&function) = 0x1F;
+		} else {
+			lsb_buf =
+			&(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+			rte_memcpy(lsb_buf +
+				   (CCP_SB_BYTES - session->iv.length),
+				   iv, session->iv.length);
+			pst.src_addr = b_info->lsb_buf_phys +
+				(b_info->lsb_buf_idx * CCP_SB_BYTES);
+			b_info->lsb_buf_idx++;
+		}
+
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+	}
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->cipher.data.offset);
+	if (likely(op->sym->m_dst != NULL))
+		dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+						op->sym->cipher.data.offset);
+	else
+		dest_addr = src_addr;
+	key_addr = session->cipher.key_phys;
+
+	/* prepare desc for aes command */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+	CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
 static inline int
 ccp_crypto_cipher(struct rte_crypto_op *op,
-		  struct ccp_queue *cmd_q __rte_unused,
-		  struct ccp_batch_info *b_info __rte_unused)
+		  struct ccp_queue *cmd_q,
+		  struct ccp_batch_info *b_info)
 {
 	int result = 0;
 	struct ccp_session *session;
@@ -310,6 +491,18 @@ ccp_crypto_cipher(struct rte_crypto_op *op,
 					 ccp_cryptodev_driver_id);
 
 	switch (session->cipher.algo) {
+	case CCP_CIPHER_ALGO_AES_CBC:
+		result = ccp_perform_aes(op, cmd_q, b_info);
+		b_info->desccnt += 2;
+		break;
+	case CCP_CIPHER_ALGO_AES_CTR:
+		result = ccp_perform_aes(op, cmd_q, b_info);
+		b_info->desccnt += 2;
+		break;
+	case CCP_CIPHER_ALGO_AES_ECB:
+		result = ccp_perform_aes(op, cmd_q, b_info);
+		b_info->desccnt += 1;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo %d",
 			    session->cipher.algo);
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 4455497..614cd47 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -46,7 +46,20 @@
 
 #include "ccp_dev.h"
 
+#define AES_BLOCK_SIZE 16
+#define CMAC_PAD_VALUE 0x80
+#define CTR_NONCE_SIZE 4
+#define CTR_IV_SIZE 8
 #define CCP_SHA3_CTX_SIZE 200
+
+/**Macro helpers for CCP command creation*/
+#define	CCP_AES_SIZE(p)		((p)->aes.size)
+#define	CCP_AES_ENCRYPT(p)	((p)->aes.encrypt)
+#define	CCP_AES_MODE(p)		((p)->aes.mode)
+#define	CCP_AES_TYPE(p)		((p)->aes.type)
+#define	CCP_PT_BYTESWAP(p)	((p)->pt.byteswap)
+#define	CCP_PT_BITWISE(p)	((p)->pt.bitwise)
+
 /**
  * CCP supported AES modes
  */
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index a5c9ef3..759afc1 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -48,6 +48,7 @@
 /**< CCP sspecific */
 #define MAX_HW_QUEUES                   5
 #define CCP_MAX_TRNG_RETRIES		10
+#define CCP_ALIGN(x, y) ((((x) + (y - 1)) / y) * y)
 
 /**< CCP Register Mappings */
 #define Q_MASK_REG                      0x000
@@ -104,10 +105,52 @@
 #define LSB_SIZE                        16
 #define LSB_ITEM_SIZE                   32
 #define SLSB_MAP_SIZE                   (MAX_LSB_CNT * LSB_SIZE)
+#define LSB_ENTRY_NUMBER(LSB_ADDR)      (LSB_ADDR / LSB_ITEM_SIZE)
 
 /* General CCP Defines */
 
 #define CCP_SB_BYTES                    32
+/* Word 0 */
+#define CCP_CMD_DW0(p)		((p)->dw0)
+#define CCP_CMD_SOC(p)		(CCP_CMD_DW0(p).soc)
+#define CCP_CMD_IOC(p)		(CCP_CMD_DW0(p).ioc)
+#define CCP_CMD_INIT(p)	        (CCP_CMD_DW0(p).init)
+#define CCP_CMD_EOM(p)		(CCP_CMD_DW0(p).eom)
+#define CCP_CMD_FUNCTION(p)	(CCP_CMD_DW0(p).function)
+#define CCP_CMD_ENGINE(p)	(CCP_CMD_DW0(p).engine)
+#define CCP_CMD_PROT(p)	        (CCP_CMD_DW0(p).prot)
+
+/* Word 1 */
+#define CCP_CMD_DW1(p)		((p)->length)
+#define CCP_CMD_LEN(p)		(CCP_CMD_DW1(p))
+
+/* Word 2 */
+#define CCP_CMD_DW2(p)		((p)->src_lo)
+#define CCP_CMD_SRC_LO(p)	(CCP_CMD_DW2(p))
+
+/* Word 3 */
+#define CCP_CMD_DW3(p)		((p)->dw3)
+#define CCP_CMD_SRC_MEM(p)	((p)->dw3.src_mem)
+#define CCP_CMD_SRC_HI(p)	((p)->dw3.src_hi)
+#define CCP_CMD_LSB_ID(p)	((p)->dw3.lsb_cxt_id)
+#define CCP_CMD_FIX_SRC(p)	((p)->dw3.fixed)
+
+/* Words 4/5 */
+#define CCP_CMD_DW4(p)		((p)->dw4)
+#define CCP_CMD_DST_LO(p)	(CCP_CMD_DW4(p).dst_lo)
+#define CCP_CMD_DW5(p)		((p)->dw5.fields.dst_hi)
+#define CCP_CMD_DST_HI(p)	(CCP_CMD_DW5(p))
+#define CCP_CMD_DST_MEM(p)	((p)->dw5.fields.dst_mem)
+#define CCP_CMD_FIX_DST(p)	((p)->dw5.fields.fixed)
+#define CCP_CMD_SHA_LO(p)	((p)->dw4.sha_len_lo)
+#define CCP_CMD_SHA_HI(p)	((p)->dw5.sha_len_hi)
+
+/* Word 6/7 */
+#define CCP_CMD_DW6(p)		((p)->key_lo)
+#define CCP_CMD_KEY_LO(p)	(CCP_CMD_DW6(p))
+#define CCP_CMD_DW7(p)		((p)->dw7)
+#define CCP_CMD_KEY_HI(p)	((p)->dw7.key_hi)
+#define CCP_CMD_KEY_MEM(p)	((p)->dw7.key_mem)
 
 /* bitmap */
 enum {
@@ -412,6 +455,16 @@ struct ccp_desc {
 };
 
 /**
+ * ccp memory type
+ */
+enum ccp_memtype {
+	CCP_MEMTYPE_SYSTEM = 0,
+	CCP_MEMTYPE_SB,
+	CCP_MEMTYPE_LOCAL,
+	CCP_MEMTYPE_LAST,
+};
+
+/**
  * cmd id to follow order
  */
 enum ccp_cmd_order {
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index d483a74..5f56242 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -39,6 +39,66 @@
 #include "ccp_crypto.h"
 
 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+	{       /* AES ECB */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_ECB,
+				.block_size = 16,
+				.key_size = {
+				   .min = 16,
+				   .max = 32,
+				   .increment = 8
+				},
+				.iv_size = {
+				   .min = 0,
+				   .max = 0,
+				   .increment = 0
+				}
+			}, }
+		}, }
+	},
+	{       /* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v4 11/20] crypto/ccp: support 3des cipher algo
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
                         ` (8 preceding siblings ...)
  2018-03-09  8:35       ` [PATCH v4 10/20] crypto/ccp: support aes cipher algo Ravi Kumar
@ 2018-03-09  8:35       ` Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 12/20] crypto/ccp: support aes-cmac auth algo Ravi Kumar
                         ` (10 subsequent siblings)
  20 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-09  8:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 132 ++++++++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_crypto.h  |   3 +
 drivers/crypto/ccp/ccp_pmd_ops.c |  20 ++++++
 3 files changed, 154 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index b097355..0660761 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -80,7 +80,7 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 			     const struct rte_crypto_sym_xform *xform)
 {
 	const struct rte_crypto_cipher_xform *cipher_xform = NULL;
-	size_t i;
+	size_t i, j, x;
 
 	cipher_xform = &xform->cipher;
 
@@ -115,6 +115,11 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 		sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
 		sess->cipher.engine = CCP_ENGINE_AES;
 		break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
+		sess->cipher.um.des_mode = CCP_DES_MODE_CBC;
+		sess->cipher.engine = CCP_ENGINE_3DES;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo");
 		return -1;
@@ -137,6 +142,20 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 			sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
 				sess->cipher.key[i];
 		break;
+	case CCP_ENGINE_3DES:
+		if (sess->cipher.key_length == 16)
+			sess->cipher.ut.des_type = CCP_DES_TYPE_128;
+		else if (sess->cipher.key_length == 24)
+			sess->cipher.ut.des_type = CCP_DES_TYPE_192;
+		else {
+			CCP_LOG_ERR("Invalid cipher key length");
+			return -1;
+		}
+		for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
+			for (i = 0; i < 8; i++)
+				sess->cipher.key_ccp[(8 + x) - i - 1] =
+					sess->cipher.key[i + x];
+		break;
 	default:
 		CCP_LOG_ERR("Invalid CCP Engine");
 		return -ENOTSUP;
@@ -280,6 +299,10 @@ ccp_cipher_slot(struct ccp_session *session)
 		count = 2;
 		/**< op + passthrough for iv */
 		break;
+	case CCP_CIPHER_ALGO_3DES_CBC:
+		count = 2;
+		/**< op + passthrough for iv */
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo %d",
 			    session->cipher.algo);
@@ -478,6 +501,109 @@ ccp_perform_aes(struct rte_crypto_op *op,
 	return 0;
 }
 
+static int
+ccp_perform_3des(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	unsigned char *lsb_buf;
+	struct ccp_passthru pst;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	uint8_t *iv;
+	phys_addr_t src_addr, dest_addr, key_addr;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+	switch (session->cipher.um.des_mode) {
+	case CCP_DES_MODE_CBC:
+		lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+		b_info->lsb_buf_idx++;
+
+		rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
+			   iv, session->iv.length);
+
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+		break;
+	case CCP_DES_MODE_CFB:
+	case CCP_DES_MODE_ECB:
+		CCP_LOG_ERR("Unsupported DES cipher mode");
+		return -ENOTSUP;
+	}
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->cipher.data.offset);
+	if (unlikely(op->sym->m_dst != NULL))
+		dest_addr =
+			rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+						   op->sym->cipher.data.offset);
+	else
+		dest_addr = src_addr;
+
+	key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+	memset(desc, 0, Q_DESC_SIZE);
+
+	/* prepare desc for des command */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_DES_ENCRYPT(&function) = session->cipher.dir;
+	CCP_DES_MODE(&function) = session->cipher.um.des_mode;
+	CCP_DES_TYPE(&function) = session->cipher.ut.des_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	if (session->cipher.um.des_mode)
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	/* Write the new tail address back to the queue register */
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	/* Turn the queue back on using our cached control register */
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
 static inline int
 ccp_crypto_cipher(struct rte_crypto_op *op,
 		  struct ccp_queue *cmd_q,
@@ -503,6 +629,10 @@ ccp_crypto_cipher(struct rte_crypto_op *op,
 		result = ccp_perform_aes(op, cmd_q, b_info);
 		b_info->desccnt += 1;
 		break;
+	case CCP_CIPHER_ALGO_3DES_CBC:
+		result = ccp_perform_3des(op, cmd_q, b_info);
+		b_info->desccnt += 2;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo %d",
 			    session->cipher.algo);
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 614cd47..d528ec9 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -57,6 +57,9 @@
 #define	CCP_AES_ENCRYPT(p)	((p)->aes.encrypt)
 #define	CCP_AES_MODE(p)		((p)->aes.mode)
 #define	CCP_AES_TYPE(p)		((p)->aes.type)
+#define	CCP_DES_ENCRYPT(p)	((p)->des.encrypt)
+#define	CCP_DES_MODE(p)		((p)->des.mode)
+#define	CCP_DES_TYPE(p)		((p)->des.type)
 #define	CCP_PT_BYTESWAP(p)	((p)->pt.byteswap)
 #define	CCP_PT_BITWISE(p)	((p)->pt.bitwise)
 
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 5f56242..3a16be8 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -99,6 +99,26 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			}, }
 		}, }
 	},
+	{	/* 3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 16,
+					.max = 24,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v4 12/20] crypto/ccp: support aes-cmac auth algo
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
                         ` (9 preceding siblings ...)
  2018-03-09  8:35       ` [PATCH v4 11/20] crypto/ccp: support 3des " Ravi Kumar
@ 2018-03-09  8:35       ` Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 13/20] crypto/ccp: support aes-gcm aead algo Ravi Kumar
                         ` (9 subsequent siblings)
  20 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-09  8:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 277 ++++++++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_pmd_ops.c |  20 +++
 2 files changed, 295 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 0660761..6e593d8 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -36,6 +36,8 @@
 #include <sys/queue.h>
 #include <sys/types.h>
 #include <unistd.h>
+#include <openssl/cmac.h> /*sub key apis*/
+#include <openssl/evp.h> /*sub key apis*/
 
 #include <rte_hexdump.h>
 #include <rte_memzone.h>
@@ -74,6 +76,84 @@ ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 	return res;
 }
 
+/* prepare temporary keys K1 and K2 */
+static void prepare_key(unsigned char *k, unsigned char *l, int bl)
+{
+	int i;
+	/* Shift block to left, including carry */
+	for (i = 0; i < bl; i++) {
+		k[i] = l[i] << 1;
+		if (i < bl - 1 && l[i + 1] & 0x80)
+			k[i] |= 1;
+	}
+	/* If MSB set fixup with R */
+	if (l[0] & 0x80)
+		k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b;
+}
+
+/* subkeys K1 and K2 generation for CMAC */
+static int
+generate_cmac_subkeys(struct ccp_session *sess)
+{
+	const EVP_CIPHER *algo;
+	EVP_CIPHER_CTX *ctx;
+	unsigned char *ccp_ctx;
+	size_t i;
+	int dstlen, totlen;
+	unsigned char zero_iv[AES_BLOCK_SIZE] = {0};
+	unsigned char dst[2 * AES_BLOCK_SIZE] = {0};
+	unsigned char k1[AES_BLOCK_SIZE] = {0};
+	unsigned char k2[AES_BLOCK_SIZE] = {0};
+
+	if (sess->auth.ut.aes_type == CCP_AES_TYPE_128)
+		algo =  EVP_aes_128_cbc();
+	else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192)
+		algo =  EVP_aes_192_cbc();
+	else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256)
+		algo =  EVP_aes_256_cbc();
+	else {
+		CCP_LOG_ERR("Invalid CMAC type length");
+		return -1;
+	}
+
+	ctx = EVP_CIPHER_CTX_new();
+	if (!ctx) {
+		CCP_LOG_ERR("ctx creation failed");
+		return -1;
+	}
+	if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key,
+			    (unsigned char *)zero_iv) <= 0)
+		goto key_generate_err;
+	if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
+		goto key_generate_err;
+	if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv,
+			      AES_BLOCK_SIZE) <= 0)
+		goto key_generate_err;
+	if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
+		goto key_generate_err;
+
+	memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2);
+
+	ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1);
+	prepare_key(k1, dst, AES_BLOCK_SIZE);
+	for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
+		*ccp_ctx = k1[i];
+
+	ccp_ctx = (unsigned char *)(sess->auth.pre_compute +
+				   (2 * CCP_SB_BYTES) - 1);
+	prepare_key(k2, k1, AES_BLOCK_SIZE);
+	for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
+		*ccp_ctx = k2[i];
+
+	EVP_CIPHER_CTX_free(ctx);
+
+	return 0;
+
+key_generate_err:
+	CCP_LOG_ERR("CMAC Init failed");
+		return -1;
+}
+
 /* configure session */
 static int
 ccp_configure_session_cipher(struct ccp_session *sess,
@@ -170,6 +250,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   const struct rte_crypto_sym_xform *xform)
 {
 	const struct rte_crypto_auth_xform *auth_xform = NULL;
+	size_t i;
 
 	auth_xform = &xform->auth;
 
@@ -179,6 +260,33 @@ ccp_configure_session_auth(struct ccp_session *sess,
 	else
 		sess->auth.op = CCP_AUTH_OP_VERIFY;
 	switch (auth_xform->algo) {
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
+		sess->auth.engine = CCP_ENGINE_AES;
+		sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
+		sess->auth.key_length = auth_xform->key.length;
+		/**<padding and hash result*/
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = AES_BLOCK_SIZE;
+		sess->auth.block_size = AES_BLOCK_SIZE;
+		if (sess->auth.key_length == 16)
+			sess->auth.ut.aes_type = CCP_AES_TYPE_128;
+		else if (sess->auth.key_length == 24)
+			sess->auth.ut.aes_type = CCP_AES_TYPE_192;
+		else if (sess->auth.key_length == 32)
+			sess->auth.ut.aes_type = CCP_AES_TYPE_256;
+		else {
+			CCP_LOG_ERR("Invalid CMAC key length");
+			return -1;
+		}
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   sess->auth.key_length);
+		for (i = 0; i < sess->auth.key_length; i++)
+			sess->auth.key_ccp[sess->auth.key_length - i - 1] =
+				sess->auth.key[i];
+		if (generate_cmac_subkeys(sess))
+			return -1;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported hash algo");
 		return -ENOTSUP;
@@ -316,6 +424,15 @@ ccp_auth_slot(struct ccp_session *session)
 	int count = 0;
 
 	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_AES_CMAC:
+		count = 4;
+		/**
+		 * op
+		 * extra descriptor in padding case
+		 * (k1/k2(255:128) with iv(127:0))
+		 * Retrieve result
+		 */
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported auth algo %d",
 			    session->auth.algo);
@@ -415,6 +532,158 @@ ccp_perform_passthru(struct ccp_passthru *pst,
 }
 
 static int
+ccp_perform_aes_cmac(struct rte_crypto_op *op,
+		     struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_passthru pst;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	uint8_t *src_tb, *append_ptr, *ctx_addr;
+	phys_addr_t src_addr, dest_addr, key_addr;
+	int length, non_align_len;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+	key_addr = rte_mem_virt2phy(session->auth.key_ccp);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
+	CCP_AES_MODE(&function) = session->auth.um.aes_mode;
+	CCP_AES_TYPE(&function) = session->auth.ut.aes_type;
+
+	if (op->sym->auth.data.length % session->auth.block_size == 0) {
+
+		ctx_addr = session->auth.pre_compute;
+		memset(ctx_addr, 0, AES_BLOCK_SIZE);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		desc = &cmd_q->qbase_desc[cmd_q->qidx];
+		memset(desc, 0, Q_DESC_SIZE);
+
+		/* prepare desc for aes-cmac command */
+		CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+		CCP_CMD_EOM(desc) = 1;
+		CCP_CMD_FUNCTION(desc) = function.raw;
+
+		CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+		CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+		CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+		CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+		CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+		cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+		rte_wmb();
+
+		tail =
+		(uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol | CMD_Q_RUN);
+	} else {
+		ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
+		memset(ctx_addr, 0, AES_BLOCK_SIZE);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		length = (op->sym->auth.data.length / AES_BLOCK_SIZE);
+		length *= AES_BLOCK_SIZE;
+		non_align_len = op->sym->auth.data.length - length;
+		/* prepare desc for aes-cmac command */
+		/*Command 1*/
+		desc = &cmd_q->qbase_desc[cmd_q->qidx];
+		memset(desc, 0, Q_DESC_SIZE);
+
+		CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+		CCP_CMD_INIT(desc) = 1;
+		CCP_CMD_FUNCTION(desc) = function.raw;
+
+		CCP_CMD_LEN(desc) = length;
+		CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+		CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+		CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+		CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+		cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+		/*Command 2*/
+		append_ptr = append_ptr + CCP_SB_BYTES;
+		memset(append_ptr, 0, AES_BLOCK_SIZE);
+		src_tb = rte_pktmbuf_mtod_offset(op->sym->m_src,
+						 uint8_t *,
+						 op->sym->auth.data.offset +
+						 length);
+		rte_memcpy(append_ptr, src_tb, non_align_len);
+		append_ptr[non_align_len] = CMAC_PAD_VALUE;
+
+		desc = &cmd_q->qbase_desc[cmd_q->qidx];
+		memset(desc, 0, Q_DESC_SIZE);
+
+		CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+		CCP_CMD_EOM(desc) = 1;
+		CCP_CMD_FUNCTION(desc) = function.raw;
+		CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+		CCP_CMD_SRC_LO(desc) = ((uint32_t)(dest_addr + CCP_SB_BYTES));
+		CCP_CMD_SRC_HI(desc) = high32_value(dest_addr + CCP_SB_BYTES);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+		CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+		CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+		cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+		rte_wmb();
+		tail =
+		(uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol | CMD_Q_RUN);
+	}
+	/* Retrieve result */
+	pst.dest_addr = dest_addr;
+	pst.src_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+	pst.len = CCP_SB_BYTES;
+	pst.dir = 0;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
 ccp_perform_aes(struct rte_crypto_op *op,
 		struct ccp_queue *cmd_q,
 		struct ccp_batch_info *b_info)
@@ -643,8 +912,8 @@ ccp_crypto_cipher(struct rte_crypto_op *op,
 
 static inline int
 ccp_crypto_auth(struct rte_crypto_op *op,
-		struct ccp_queue *cmd_q __rte_unused,
-		struct ccp_batch_info *b_info __rte_unused)
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
 {
 
 	int result = 0;
@@ -655,6 +924,10 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 					ccp_cryptodev_driver_id);
 
 	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_AES_CMAC:
+		result = ccp_perform_aes_cmac(op, cmd_q);
+		b_info->desccnt += 4;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported auth algo %d",
 			    session->auth.algo);
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 3a16be8..1fb6a6d 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -39,6 +39,26 @@
 #include "ccp_crypto.h"
 
 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+	{	/*AES-CMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				 .block_size = 16,
+				 .key_size = {
+					 .min = 16,
+					 .max = 32,
+					 .increment = 8
+				 },
+				 .digest_size = {
+					 .min = 16,
+					 .max = 16,
+					 .increment = 0
+				 },
+			}, }
+		}, }
+	},
 	{       /* AES ECB */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v4 13/20] crypto/ccp: support aes-gcm aead algo
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
                         ` (10 preceding siblings ...)
  2018-03-09  8:35       ` [PATCH v4 12/20] crypto/ccp: support aes-cmac auth algo Ravi Kumar
@ 2018-03-09  8:35       ` Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 14/20] crypto/ccp: support sha1 authentication algo Ravi Kumar
                         ` (8 subsequent siblings)
  20 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-09  8:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 235 ++++++++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_pmd_ops.c |  30 +++++
 2 files changed, 261 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 6e593d8..ad9fa8e 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -299,6 +299,7 @@ ccp_configure_session_aead(struct ccp_session *sess,
 			   const struct rte_crypto_sym_xform *xform)
 {
 	const struct rte_crypto_aead_xform *aead_xform = NULL;
+	size_t i;
 
 	aead_xform = &xform->aead;
 
@@ -313,6 +314,7 @@ ccp_configure_session_aead(struct ccp_session *sess,
 		sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
 		sess->auth.op = CCP_AUTH_OP_VERIFY;
 	}
+	sess->aead_algo = aead_xform->algo;
 	sess->auth.aad_length = aead_xform->aad_length;
 	sess->auth.digest_length = aead_xform->digest_length;
 
@@ -321,10 +323,37 @@ ccp_configure_session_aead(struct ccp_session *sess,
 	sess->iv.length = aead_xform->iv.length;
 
 	switch (aead_xform->algo) {
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_GCM;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_GCTR;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		if (sess->cipher.key_length == 16)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+		else if (sess->cipher.key_length == 24)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+		else if (sess->cipher.key_length == 32)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+		else {
+			CCP_LOG_ERR("Invalid aead key length");
+			return -1;
+		}
+		for (i = 0; i < sess->cipher.key_length; i++)
+			sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+				sess->cipher.key[i];
+		sess->auth.algo = CCP_AUTH_ALGO_AES_GCM;
+		sess->auth.engine = CCP_ENGINE_AES;
+		sess->auth.um.aes_mode = CCP_AES_MODE_GHASH;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = 0;
+		sess->auth.block_size = AES_BLOCK_SIZE;
+		sess->cmd_id = CCP_CMD_COMBINED;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported aead algo");
 		return -ENOTSUP;
 	}
+	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
 	return 0;
 }
 
@@ -447,10 +476,27 @@ ccp_aead_slot(struct ccp_session *session)
 	int count = 0;
 
 	switch (session->aead_algo) {
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported aead algo %d",
 			    session->aead_algo);
 	}
+	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_AES_GCM:
+		count = 5;
+		/**
+		 * 1. Passthru iv
+		 * 2. Hash AAD
+		 * 3. GCTR
+		 * 4. Reload passthru
+		 * 5. Hash Final tag
+		 */
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported combined auth ALGO %d",
+			    session->auth.algo);
+	}
 	return count;
 }
 
@@ -873,6 +919,179 @@ ccp_perform_3des(struct rte_crypto_op *op,
 	return 0;
 }
 
+static int
+ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	uint8_t *iv;
+	struct ccp_passthru pst;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	uint64_t *temp;
+	phys_addr_t src_addr, dest_addr, key_addr, aad_addr;
+	phys_addr_t digest_dest_addr;
+	int length, non_align_len;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 ccp_cryptodev_driver_id);
+	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+	key_addr = session->cipher.key_phys;
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->aead.data.offset);
+	if (unlikely(op->sym->m_dst != NULL))
+		dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+						op->sym->aead.data.offset);
+	else
+		dest_addr = src_addr;
+	rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len);
+	digest_dest_addr = op->sym->aead.digest.phys_addr;
+	temp = (uint64_t *)(op->sym->aead.digest.data + AES_BLOCK_SIZE);
+	*temp++ = rte_bswap64(session->auth.aad_length << 3);
+	*temp = rte_bswap64(op->sym->aead.data.length << 3);
+
+	non_align_len = op->sym->aead.data.length % AES_BLOCK_SIZE;
+	length = CCP_ALIGN(op->sym->aead.data.length, AES_BLOCK_SIZE);
+
+	aad_addr = op->sym->aead.aad.phys_addr;
+
+	/* CMD1 IV Passthru */
+	rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE, iv,
+		   session->iv.length);
+	pst.src_addr = session->cipher.nonce_phys;
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+	pst.len = CCP_SB_BYTES;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/* CMD2 GHASH-AAD */
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_AAD;
+	CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = session->auth.aad_length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)aad_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(aad_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* CMD3 : GCTR Plain text */
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+	CCP_AES_MODE(&function) = CCP_AES_MODE_GCTR;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+	if (non_align_len == 0)
+		CCP_AES_SIZE(&function) = (AES_BLOCK_SIZE << 3) - 1;
+	else
+		CCP_AES_SIZE(&function) = (non_align_len << 3) - 1;
+
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* CMD4 : PT to copy IV */
+	pst.src_addr = session->cipher.nonce_phys;
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+	pst.len = AES_BLOCK_SIZE;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/* CMD5 : GHASH-Final */
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_FINAL;
+	CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+	/* Last block (AAD_len || PT_len)*/
+	CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)digest_dest_addr + AES_BLOCK_SIZE);
+	CCP_CMD_SRC_HI(desc) = high32_value(digest_dest_addr + AES_BLOCK_SIZE);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)digest_dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(digest_dest_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
 static inline int
 ccp_crypto_cipher(struct rte_crypto_op *op,
 		  struct ccp_queue *cmd_q,
@@ -939,17 +1158,25 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 
 static inline int
 ccp_crypto_aead(struct rte_crypto_op *op,
-		struct ccp_queue *cmd_q __rte_unused,
-		struct ccp_batch_info *b_info __rte_unused)
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
 {
 	int result = 0;
 	struct ccp_session *session;
 
 	session = (struct ccp_session *)get_session_private_data(
-					 op->sym->session,
+					op->sym->session,
 					ccp_cryptodev_driver_id);
 
-	switch (session->aead_algo) {
+	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_AES_GCM:
+		if (session->cipher.algo != CCP_CIPHER_ALGO_AES_GCM) {
+			CCP_LOG_ERR("Incorrect chain order");
+			return -1;
+		}
+		result = ccp_perform_aes_gcm(op, cmd_q);
+		b_info->desccnt += 5;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported aead algo %d",
 			    session->aead_algo);
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 1fb6a6d..24f577a 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -139,6 +139,36 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			}, }
 		}, }
 	},
+	{       /* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				 .algo = RTE_CRYPTO_AEAD_AES_GCM,
+				 .block_size = 16,
+				 .key_size = {
+					 .min = 16,
+					 .max = 32,
+					 .increment = 8
+				 },
+				 .digest_size = {
+					 .min = 16,
+					 .max = 16,
+					 .increment = 0
+				 },
+				 .aad_size = {
+					 .min = 0,
+					 .max = 65535,
+					 .increment = 1
+				 },
+				 .iv_size = {
+					 .min = 12,
+					 .max = 16,
+					 .increment = 4
+				 },
+			}, }
+		}, }
+	},
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v4 14/20] crypto/ccp: support sha1 authentication algo
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
                         ` (11 preceding siblings ...)
  2018-03-09  8:35       ` [PATCH v4 13/20] crypto/ccp: support aes-gcm aead algo Ravi Kumar
@ 2018-03-09  8:35       ` Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 15/20] crypto/ccp: support sha2 family " Ravi Kumar
                         ` (7 subsequent siblings)
  20 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-09  8:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 367 +++++++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_crypto.h  |  23 +++
 drivers/crypto/ccp/ccp_pmd_ops.c |  42 +++++
 3 files changed, 432 insertions(+)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index ad9fa8e..c575dc1 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -36,6 +36,7 @@
 #include <sys/queue.h>
 #include <sys/types.h>
 #include <unistd.h>
+#include <openssl/sha.h>
 #include <openssl/cmac.h> /*sub key apis*/
 #include <openssl/evp.h> /*sub key apis*/
 
@@ -52,6 +53,14 @@
 #include "ccp_pci.h"
 #include "ccp_pmd_private.h"
 
+/* SHA initial context values */
+static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
+	SHA1_H4, SHA1_H3,
+	SHA1_H2, SHA1_H1,
+	SHA1_H0, 0x0U,
+	0x0U, 0x0U,
+};
+
 static enum ccp_cmd_order
 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 {
@@ -76,6 +85,59 @@ ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 	return res;
 }
 
+/* partial hash using openssl */
+static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA_CTX ctx;
+
+	if (!SHA1_Init(&ctx))
+		return -EFAULT;
+	SHA1_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
+	return 0;
+}
+
+static int generate_partial_hash(struct ccp_session *sess)
+{
+
+	uint8_t ipad[sess->auth.block_size];
+	uint8_t	opad[sess->auth.block_size];
+	uint8_t *ipad_t, *opad_t;
+	uint32_t *hash_value_be32, hash_temp32[8];
+	int i, count;
+
+	opad_t = ipad_t = (uint8_t *)sess->auth.key;
+
+	hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
+
+	/* considering key size is always equal to block size of algorithm */
+	for (i = 0; i < sess->auth.block_size; i++) {
+		ipad[i] = (ipad_t[i] ^ HMAC_IPAD_VALUE);
+		opad[i] = (opad_t[i] ^ HMAC_OPAD_VALUE);
+	}
+
+	switch (sess->auth.algo) {
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+		count = SHA1_DIGEST_SIZE >> 2;
+
+		if (partial_hash_sha1(ipad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+
+		hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha1(opad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+		return 0;
+	default:
+		CCP_LOG_ERR("Invalid auth algo");
+		return -1;
+	}
+}
+
 /* prepare temporary keys K1 and K2 */
 static void prepare_key(unsigned char *k, unsigned char *l, int bl)
 {
@@ -260,6 +322,31 @@ ccp_configure_session_auth(struct ccp_session *sess,
 	else
 		sess->auth.op = CCP_AUTH_OP_VERIFY;
 	switch (auth_xform->algo) {
+	case RTE_CRYPTO_AUTH_SHA1:
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA1;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+		sess->auth.ctx = (void *)ccp_sha1_init;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+			return -1;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		sess->auth.block_size = SHA1_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_AES_CMAC:
 		sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
 		sess->auth.engine = CCP_ENGINE_AES;
@@ -453,6 +540,13 @@ ccp_auth_slot(struct ccp_session *session)
 	int count = 0;
 
 	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_SHA1:
+		count = 3;
+		/**< op + lsb passthrough cpy to/from*/
+		break;
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+		count = 6;
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		count = 4;
 		/**
@@ -578,6 +672,271 @@ ccp_perform_passthru(struct ccp_passthru *pst,
 }
 
 static int
+ccp_perform_hmac(struct rte_crypto_op *op,
+		 struct ccp_queue *cmd_q)
+{
+
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr, dest_addr_t;
+	struct ccp_passthru pst;
+	uint64_t auth_msg_bits;
+	void *append_ptr;
+	uint8_t *addr;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 ccp_cryptodev_driver_id);
+	addr = session->auth.pre_compute;
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+	dest_addr_t = dest_addr;
+
+	/** Load PHash1 to LSB*/
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.len = session->auth.ctx_len;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/**sha engine command descriptor for IntermediateHash*/
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+	auth_msg_bits = (op->sym->auth.data.length +
+			 session->auth.block_size)  * 8;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+	CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+	CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Intermediate Hash value retrieve */
+	if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA_TYPE_512)) {
+
+		pst.src_addr =
+			(phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+	} else {
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = session->auth.ctx_len;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+	}
+
+	/** Load PHash2 to LSB*/
+	addr += session->auth.ctx_len;
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.len = session->auth.ctx_len;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/**sha engine command descriptor for FinalHash*/
+	dest_addr_t += session->auth.offset;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = (session->auth.ctx_len -
+			     session->auth.offset);
+	auth_msg_bits = (session->auth.block_size +
+			 session->auth.ctx_len -
+			 session->auth.offset) * 8;
+
+	CCP_CMD_SRC_LO(desc) = (uint32_t)(dest_addr_t);
+	CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+	CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+	CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Retrieve hmac output */
+	pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.dest_addr = dest_addr;
+	pst.len = session->auth.ctx_len;
+	pst.dir = 0;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	else
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+
+}
+
+static int
+ccp_perform_sha(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr;
+	struct ccp_passthru pst;
+	void *append_ptr;
+	uint64_t auth_msg_bits;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+
+	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+
+	/** Passthru sha context*/
+
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
+						     session->auth.ctx);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.len = session->auth.ctx_len;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/**prepare sha command descriptor*/
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+	auth_msg_bits = op->sym->auth.data.length * 8;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+	CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+	CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Hash value retrieve */
+	pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.dest_addr = dest_addr;
+	pst.len = session->auth.ctx_len;
+	pst.dir = 0;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	else
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+
+}
+
+static int
 ccp_perform_aes_cmac(struct rte_crypto_op *op,
 		     struct ccp_queue *cmd_q)
 {
@@ -1143,6 +1502,14 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 					ccp_cryptodev_driver_id);
 
 	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_SHA1:
+		result = ccp_perform_sha(op, cmd_q);
+		b_info->desccnt += 3;
+		break;
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+		result = ccp_perform_hmac(op, cmd_q);
+		b_info->desccnt += 6;
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		result = ccp_perform_aes_cmac(op, cmd_q);
 		b_info->desccnt += 4;
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index d528ec9..42179de 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -60,9 +60,32 @@
 #define	CCP_DES_ENCRYPT(p)	((p)->des.encrypt)
 #define	CCP_DES_MODE(p)		((p)->des.mode)
 #define	CCP_DES_TYPE(p)		((p)->des.type)
+#define	CCP_SHA_TYPE(p)		((p)->sha.type)
 #define	CCP_PT_BYTESWAP(p)	((p)->pt.byteswap)
 #define	CCP_PT_BITWISE(p)	((p)->pt.bitwise)
 
+/* HMAC */
+#define HMAC_IPAD_VALUE 0x36
+#define HMAC_OPAD_VALUE 0x5c
+
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+#define MD5_DIGEST_SIZE         16
+#define MD5_BLOCK_SIZE          64
+#endif
+
+/* SHA */
+#define SHA_COMMON_DIGEST_SIZE	32
+#define SHA1_DIGEST_SIZE        20
+#define SHA1_BLOCK_SIZE         64
+
+/* SHA LSB intialiazation values */
+
+#define SHA1_H0		0x67452301UL
+#define SHA1_H1		0xefcdab89UL
+#define SHA1_H2		0x98badcfeUL
+#define SHA1_H3		0x10325476UL
+#define SHA1_H4		0xc3d2e1f0UL
+
 /**
  * CCP supported AES modes
  */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 24f577a..6adef1c 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -39,6 +39,48 @@
 #include "ccp_crypto.h"
 
 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+	{	/* SHA1 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA1,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 20,
+					 .max = 20,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 20,
+					 .max = 20,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/*AES-CMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v4 15/20] crypto/ccp: support sha2 family authentication algo
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
                         ` (12 preceding siblings ...)
  2018-03-09  8:35       ` [PATCH v4 14/20] crypto/ccp: support sha1 authentication algo Ravi Kumar
@ 2018-03-09  8:35       ` Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 16/20] crypto/ccp: support sha3 " Ravi Kumar
                         ` (6 subsequent siblings)
  20 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-09  8:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 270 +++++++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_crypto.h  |  48 +++++++
 drivers/crypto/ccp/ccp_pmd_ops.c | 168 ++++++++++++++++++++++++
 3 files changed, 486 insertions(+)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index c575dc1..410e8bf 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -61,6 +61,34 @@ static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
 	0x0U, 0x0U,
 };
 
+uint32_t ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+	SHA224_H7, SHA224_H6,
+	SHA224_H5, SHA224_H4,
+	SHA224_H3, SHA224_H2,
+	SHA224_H1, SHA224_H0,
+};
+
+uint32_t ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+	SHA256_H7, SHA256_H6,
+	SHA256_H5, SHA256_H4,
+	SHA256_H3, SHA256_H2,
+	SHA256_H1, SHA256_H0,
+};
+
+uint64_t ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+	SHA384_H7, SHA384_H6,
+	SHA384_H5, SHA384_H4,
+	SHA384_H3, SHA384_H2,
+	SHA384_H1, SHA384_H0,
+};
+
+uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+	SHA512_H7, SHA512_H6,
+	SHA512_H5, SHA512_H4,
+	SHA512_H3, SHA512_H2,
+	SHA512_H1, SHA512_H0,
+};
+
 static enum ccp_cmd_order
 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 {
@@ -97,6 +125,54 @@ static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
+static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA256_CTX ctx;
+
+	if (!SHA224_Init(&ctx))
+		return -EFAULT;
+	SHA256_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA256_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA256_CTX ctx;
+
+	if (!SHA256_Init(&ctx))
+		return -EFAULT;
+	SHA256_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA256_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA512_CTX ctx;
+
+	if (!SHA384_Init(&ctx))
+		return -EFAULT;
+	SHA512_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA512_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA512_CTX ctx;
+
+	if (!SHA512_Init(&ctx))
+		return -EFAULT;
+	SHA512_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA512_DIGEST_LENGTH);
+	return 0;
+}
+
 static int generate_partial_hash(struct ccp_session *sess)
 {
 
@@ -104,11 +180,13 @@ static int generate_partial_hash(struct ccp_session *sess)
 	uint8_t	opad[sess->auth.block_size];
 	uint8_t *ipad_t, *opad_t;
 	uint32_t *hash_value_be32, hash_temp32[8];
+	uint64_t *hash_value_be64, hash_temp64[8];
 	int i, count;
 
 	opad_t = ipad_t = (uint8_t *)sess->auth.key;
 
 	hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
+	hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute);
 
 	/* considering key size is always equal to block size of algorithm */
 	for (i = 0; i < sess->auth.block_size; i++) {
@@ -132,6 +210,66 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be32++)
 			*hash_value_be32 = hash_temp32[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+		count = SHA256_DIGEST_SIZE >> 2;
+
+		if (partial_hash_sha224(ipad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+
+		hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha224(opad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+		return 0;
+	case CCP_AUTH_ALGO_SHA256_HMAC:
+		count = SHA256_DIGEST_SIZE >> 2;
+
+		if (partial_hash_sha256(ipad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+
+		hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha256(opad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+		return 0;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+		count = SHA512_DIGEST_SIZE >> 3;
+
+		if (partial_hash_sha384(ipad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+
+		hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha384(opad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+		return 0;
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		count = SHA512_DIGEST_SIZE >> 3;
+
+		if (partial_hash_sha512(ipad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+
+		hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha512(opad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+		return 0;
 	default:
 		CCP_LOG_ERR("Invalid auth algo");
 		return -1;
@@ -347,6 +485,107 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
+	case RTE_CRYPTO_AUTH_SHA224:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA224;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+		sess->auth.ctx = (void *)ccp_sha224_init;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		sess->auth.block_size = SHA224_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA256;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+		sess->auth.ctx = (void *)ccp_sha256_init;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		sess->auth.block_size = SHA256_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA384;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+		sess->auth.ctx = (void *)ccp_sha384_init;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		sess->auth.block_size = SHA384_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA512;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+		sess->auth.ctx = (void *)ccp_sha512_init;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		sess->auth.block_size = SHA512_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+
 	case RTE_CRYPTO_AUTH_AES_CMAC:
 		sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
 		sess->auth.engine = CCP_ENGINE_AES;
@@ -541,12 +780,32 @@ ccp_auth_slot(struct ccp_session *session)
 
 	switch (session->auth.algo) {
 	case CCP_AUTH_ALGO_SHA1:
+	case CCP_AUTH_ALGO_SHA224:
+	case CCP_AUTH_ALGO_SHA256:
+	case CCP_AUTH_ALGO_SHA384:
+	case CCP_AUTH_ALGO_SHA512:
 		count = 3;
 		/**< op + lsb passthrough cpy to/from*/
 		break;
 	case CCP_AUTH_ALGO_SHA1_HMAC:
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+	case CCP_AUTH_ALGO_SHA256_HMAC:
 		count = 6;
 		break;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		count = 7;
+		/**
+		 * 1. Load PHash1 = H(k ^ ipad); to LSB
+		 * 2. generate IHash = H(hash on meassage with PHash1
+		 * as init values);
+		 * 3. Retrieve IHash 2 slots for 384/512
+		 * 4. Load Phash2 = H(k ^ opad); to LSB
+		 * 5. generate FHash = H(hash on Ihash with Phash2
+		 * as init value);
+		 * 6. Retrieve HMAC output from LSB to host memory
+		 */
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		count = 4;
 		/**
@@ -1503,13 +1762,24 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 
 	switch (session->auth.algo) {
 	case CCP_AUTH_ALGO_SHA1:
+	case CCP_AUTH_ALGO_SHA224:
+	case CCP_AUTH_ALGO_SHA256:
+	case CCP_AUTH_ALGO_SHA384:
+	case CCP_AUTH_ALGO_SHA512:
 		result = ccp_perform_sha(op, cmd_q);
 		b_info->desccnt += 3;
 		break;
 	case CCP_AUTH_ALGO_SHA1_HMAC:
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+	case CCP_AUTH_ALGO_SHA256_HMAC:
 		result = ccp_perform_hmac(op, cmd_q);
 		b_info->desccnt += 6;
 		break;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		result = ccp_perform_hmac(op, cmd_q);
+		b_info->desccnt += 7;
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		result = ccp_perform_aes_cmac(op, cmd_q);
 		b_info->desccnt += 4;
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 42179de..ca1c1a8 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -78,6 +78,18 @@
 #define SHA1_DIGEST_SIZE        20
 #define SHA1_BLOCK_SIZE         64
 
+#define SHA224_DIGEST_SIZE      28
+#define SHA224_BLOCK_SIZE       64
+
+#define SHA256_DIGEST_SIZE      32
+#define SHA256_BLOCK_SIZE       64
+
+#define SHA384_DIGEST_SIZE      48
+#define SHA384_BLOCK_SIZE       128
+
+#define SHA512_DIGEST_SIZE      64
+#define SHA512_BLOCK_SIZE       128
+
 /* SHA LSB intialiazation values */
 
 #define SHA1_H0		0x67452301UL
@@ -86,6 +98,42 @@
 #define SHA1_H3		0x10325476UL
 #define SHA1_H4		0xc3d2e1f0UL
 
+#define SHA224_H0	0xc1059ed8UL
+#define SHA224_H1	0x367cd507UL
+#define SHA224_H2	0x3070dd17UL
+#define SHA224_H3	0xf70e5939UL
+#define SHA224_H4	0xffc00b31UL
+#define SHA224_H5	0x68581511UL
+#define SHA224_H6	0x64f98fa7UL
+#define SHA224_H7	0xbefa4fa4UL
+
+#define SHA256_H0	0x6a09e667UL
+#define SHA256_H1	0xbb67ae85UL
+#define SHA256_H2	0x3c6ef372UL
+#define SHA256_H3	0xa54ff53aUL
+#define SHA256_H4	0x510e527fUL
+#define SHA256_H5	0x9b05688cUL
+#define SHA256_H6	0x1f83d9abUL
+#define SHA256_H7	0x5be0cd19UL
+
+#define SHA384_H0	0xcbbb9d5dc1059ed8ULL
+#define SHA384_H1	0x629a292a367cd507ULL
+#define SHA384_H2	0x9159015a3070dd17ULL
+#define SHA384_H3	0x152fecd8f70e5939ULL
+#define SHA384_H4	0x67332667ffc00b31ULL
+#define SHA384_H5	0x8eb44a8768581511ULL
+#define SHA384_H6	0xdb0c2e0d64f98fa7ULL
+#define SHA384_H7	0x47b5481dbefa4fa4ULL
+
+#define SHA512_H0	0x6a09e667f3bcc908ULL
+#define SHA512_H1	0xbb67ae8584caa73bULL
+#define SHA512_H2	0x3c6ef372fe94f82bULL
+#define SHA512_H3	0xa54ff53a5f1d36f1ULL
+#define SHA512_H4	0x510e527fade682d1ULL
+#define SHA512_H5	0x9b05688c2b3e6c1fULL
+#define SHA512_H6	0x1f83d9abfb41bd6bULL
+#define SHA512_H7	0x5be0cd19137e2179ULL
+
 /**
  * CCP supported AES modes
  */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 6adef1c..ab6199f 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -81,6 +81,174 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA224 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA224,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA224 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA256 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA256,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA384 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA384,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 1,
+					 .max = 128,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA512  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA512,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 1,
+					 .max = 128,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/*AES-CMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v4 16/20] crypto/ccp: support sha3 family authentication algo
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
                         ` (13 preceding siblings ...)
  2018-03-09  8:35       ` [PATCH v4 15/20] crypto/ccp: support sha2 family " Ravi Kumar
@ 2018-03-09  8:35       ` Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 17/20] crypto/ccp: support cpu based md5 and sha2 " Ravi Kumar
                         ` (5 subsequent siblings)
  20 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-09  8:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c       | 667 +++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_crypto.h       |  22 ++
 drivers/crypto/ccp/ccp_pmd_ops.c      | 168 +++++++++
 lib/librte_cryptodev/rte_crypto_sym.h |  17 +
 4 files changed, 873 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 410e8bf..cb63bc6 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -89,6 +89,74 @@ uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
 	SHA512_H1, SHA512_H0,
 };
 
+#if defined(_MSC_VER)
+#define SHA3_CONST(x) x
+#else
+#define SHA3_CONST(x) x##L
+#endif
+
+/** 'Words' here refers to uint64_t */
+#define SHA3_KECCAK_SPONGE_WORDS \
+	(((1600) / 8) / sizeof(uint64_t))
+typedef struct sha3_context_ {
+	uint64_t saved;
+	/**
+	 * The portion of the input message that we
+	 * didn't consume yet
+	 */
+	union {
+		uint64_t s[SHA3_KECCAK_SPONGE_WORDS];
+		/* Keccak's state */
+		uint8_t sb[SHA3_KECCAK_SPONGE_WORDS * 8];
+		/**total 200 ctx size**/
+	};
+	unsigned int byteIndex;
+	/**
+	 * 0..7--the next byte after the set one
+	 * (starts from 0; 0--none are buffered)
+	 */
+	unsigned int wordIndex;
+	/**
+	 * 0..24--the next word to integrate input
+	 * (starts from 0)
+	 */
+	unsigned int capacityWords;
+	/**
+	 * the double size of the hash output in
+	 * words (e.g. 16 for Keccak 512)
+	 */
+} sha3_context;
+
+#ifndef SHA3_ROTL64
+#define SHA3_ROTL64(x, y) \
+	(((x) << (y)) | ((x) >> ((sizeof(uint64_t)*8) - (y))))
+#endif
+
+static const uint64_t keccakf_rndc[24] = {
+	SHA3_CONST(0x0000000000000001UL), SHA3_CONST(0x0000000000008082UL),
+	SHA3_CONST(0x800000000000808aUL), SHA3_CONST(0x8000000080008000UL),
+	SHA3_CONST(0x000000000000808bUL), SHA3_CONST(0x0000000080000001UL),
+	SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008009UL),
+	SHA3_CONST(0x000000000000008aUL), SHA3_CONST(0x0000000000000088UL),
+	SHA3_CONST(0x0000000080008009UL), SHA3_CONST(0x000000008000000aUL),
+	SHA3_CONST(0x000000008000808bUL), SHA3_CONST(0x800000000000008bUL),
+	SHA3_CONST(0x8000000000008089UL), SHA3_CONST(0x8000000000008003UL),
+	SHA3_CONST(0x8000000000008002UL), SHA3_CONST(0x8000000000000080UL),
+	SHA3_CONST(0x000000000000800aUL), SHA3_CONST(0x800000008000000aUL),
+	SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008080UL),
+	SHA3_CONST(0x0000000080000001UL), SHA3_CONST(0x8000000080008008UL)
+};
+
+static const unsigned int keccakf_rotc[24] = {
+	1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62,
+	18, 39, 61, 20, 44
+};
+
+static const unsigned int keccakf_piln[24] = {
+	10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20,
+	14, 22, 9, 6, 1
+};
+
 static enum ccp_cmd_order
 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 {
@@ -173,6 +241,223 @@ static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
+static void
+keccakf(uint64_t s[25])
+{
+	int i, j, round;
+	uint64_t t, bc[5];
+#define KECCAK_ROUNDS 24
+
+	for (round = 0; round < KECCAK_ROUNDS; round++) {
+
+		/* Theta */
+		for (i = 0; i < 5; i++)
+			bc[i] = s[i] ^ s[i + 5] ^ s[i + 10] ^ s[i + 15] ^
+				s[i + 20];
+
+		for (i = 0; i < 5; i++) {
+			t = bc[(i + 4) % 5] ^ SHA3_ROTL64(bc[(i + 1) % 5], 1);
+			for (j = 0; j < 25; j += 5)
+				s[j + i] ^= t;
+		}
+
+		/* Rho Pi */
+		t = s[1];
+		for (i = 0; i < 24; i++) {
+			j = keccakf_piln[i];
+			bc[0] = s[j];
+			s[j] = SHA3_ROTL64(t, keccakf_rotc[i]);
+			t = bc[0];
+		}
+
+		/* Chi */
+		for (j = 0; j < 25; j += 5) {
+			for (i = 0; i < 5; i++)
+				bc[i] = s[j + i];
+			for (i = 0; i < 5; i++)
+				s[j + i] ^= (~bc[(i + 1) % 5]) &
+					    bc[(i + 2) % 5];
+		}
+
+		/* Iota */
+		s[0] ^= keccakf_rndc[round];
+	}
+}
+
+static void
+sha3_Init224(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 224 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init256(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 256 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init384(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 384 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init512(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 512 / (8 * sizeof(uint64_t));
+}
+
+
+/* This is simply the 'update' with the padding block.
+ * The padding block is 0x01 || 0x00* || 0x80. First 0x01 and last 0x80
+ * bytes are always present, but they can be the same byte.
+ */
+static void
+sha3_Update(void *priv, void const *bufIn, size_t len)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+	unsigned int old_tail = (8 - ctx->byteIndex) & 7;
+	size_t words;
+	unsigned int tail;
+	size_t i;
+	const uint8_t *buf = bufIn;
+
+	if (len < old_tail) {
+		while (len--)
+			ctx->saved |= (uint64_t) (*(buf++)) <<
+				      ((ctx->byteIndex++) * 8);
+		return;
+	}
+
+	if (old_tail) {
+		len -= old_tail;
+		while (old_tail--)
+			ctx->saved |= (uint64_t) (*(buf++)) <<
+				      ((ctx->byteIndex++) * 8);
+
+		ctx->s[ctx->wordIndex] ^= ctx->saved;
+		ctx->byteIndex = 0;
+		ctx->saved = 0;
+		if (++ctx->wordIndex ==
+		   (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+			keccakf(ctx->s);
+			ctx->wordIndex = 0;
+		}
+	}
+
+	words = len / sizeof(uint64_t);
+	tail = len - words * sizeof(uint64_t);
+
+	for (i = 0; i < words; i++, buf += sizeof(uint64_t)) {
+		const uint64_t t = (uint64_t) (buf[0]) |
+			((uint64_t) (buf[1]) << 8 * 1) |
+			((uint64_t) (buf[2]) << 8 * 2) |
+			((uint64_t) (buf[3]) << 8 * 3) |
+			((uint64_t) (buf[4]) << 8 * 4) |
+			((uint64_t) (buf[5]) << 8 * 5) |
+			((uint64_t) (buf[6]) << 8 * 6) |
+			((uint64_t) (buf[7]) << 8 * 7);
+		ctx->s[ctx->wordIndex] ^= t;
+		if (++ctx->wordIndex ==
+		   (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+			keccakf(ctx->s);
+			ctx->wordIndex = 0;
+		}
+	}
+
+	while (tail--)
+		ctx->saved |= (uint64_t) (*(buf++)) << ((ctx->byteIndex++) * 8);
+}
+
+int partial_hash_sha3_224(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init224(ctx);
+	sha3_Update(ctx, data_in, SHA3_224_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
+int partial_hash_sha3_256(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init256(ctx);
+	sha3_Update(ctx, data_in, SHA3_256_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
+int partial_hash_sha3_384(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init384(ctx);
+	sha3_Update(ctx, data_in, SHA3_384_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
+int partial_hash_sha3_512(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init512(ctx);
+	sha3_Update(ctx, data_in, SHA3_512_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
 static int generate_partial_hash(struct ccp_session *sess)
 {
 
@@ -182,6 +467,7 @@ static int generate_partial_hash(struct ccp_session *sess)
 	uint32_t *hash_value_be32, hash_temp32[8];
 	uint64_t *hash_value_be64, hash_temp64[8];
 	int i, count;
+	uint8_t *hash_value_sha3;
 
 	opad_t = ipad_t = (uint8_t *)sess->auth.key;
 
@@ -225,6 +511,16 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be32++)
 			*hash_value_be32 = hash_temp32[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA3_224_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_224(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha3_224(opad, hash_value_sha3))
+			return -1;
+		return 0;
 	case CCP_AUTH_ALGO_SHA256_HMAC:
 		count = SHA256_DIGEST_SIZE >> 2;
 
@@ -240,6 +536,16 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be32++)
 			*hash_value_be32 = hash_temp32[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA3_256_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_256(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					      + sess->auth.ctx_len);
+		if (partial_hash_sha3_256(opad, hash_value_sha3))
+			return -1;
+		return 0;
 	case CCP_AUTH_ALGO_SHA384_HMAC:
 		count = SHA512_DIGEST_SIZE >> 3;
 
@@ -255,6 +561,16 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be64++)
 			*hash_value_be64 = hash_temp64[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA3_384_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_384(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					      + sess->auth.ctx_len);
+		if (partial_hash_sha3_384(opad, hash_value_sha3))
+			return -1;
+		return 0;
 	case CCP_AUTH_ALGO_SHA512_HMAC:
 		count = SHA512_DIGEST_SIZE >> 3;
 
@@ -270,6 +586,16 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be64++)
 			*hash_value_be64 = hash_temp64[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA3_512_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_512(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					      + sess->auth.ctx_len);
+		if (partial_hash_sha3_512(opad, hash_value_sha3))
+			return -1;
+		return 0;
 	default:
 		CCP_LOG_ERR("Invalid auth algo");
 		return -1;
@@ -510,6 +836,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
+	case RTE_CRYPTO_AUTH_SHA3_224:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_224;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_224_HMAC:
+		if (auth_xform->key.length > SHA3_224_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_224_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_224_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_SHA256:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA256;
 		sess->auth.engine = CCP_ENGINE_SHA;
@@ -535,6 +885,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
+	case RTE_CRYPTO_AUTH_SHA3_256:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_256;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_256_HMAC:
+		if (auth_xform->key.length > SHA3_256_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_256_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_256_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_SHA384:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA384;
 		sess->auth.engine = CCP_ENGINE_SHA;
@@ -560,6 +934,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
+	case RTE_CRYPTO_AUTH_SHA3_384:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_384;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_384_HMAC:
+		if (auth_xform->key.length > SHA3_384_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_384_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_384_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_SHA512:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA512;
 		sess->auth.engine = CCP_ENGINE_SHA;
@@ -585,7 +983,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
-
+	case RTE_CRYPTO_AUTH_SHA3_512:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_512;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_512_HMAC:
+		if (auth_xform->key.length > SHA3_512_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_512_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_512_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_AES_CMAC:
 		sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
 		sess->auth.engine = CCP_ENGINE_AES;
@@ -806,6 +1227,26 @@ ccp_auth_slot(struct ccp_session *session)
 		 * 6. Retrieve HMAC output from LSB to host memory
 		 */
 		break;
+	case CCP_AUTH_ALGO_SHA3_224:
+	case CCP_AUTH_ALGO_SHA3_256:
+	case CCP_AUTH_ALGO_SHA3_384:
+	case CCP_AUTH_ALGO_SHA3_512:
+		count = 1;
+		/**< only op ctx and dst in host memory*/
+		break;
+	case CCP_AUTH_ALGO_SHA3_224_HMAC:
+	case CCP_AUTH_ALGO_SHA3_256_HMAC:
+		count = 3;
+		break;
+	case CCP_AUTH_ALGO_SHA3_384_HMAC:
+	case CCP_AUTH_ALGO_SHA3_512_HMAC:
+		count = 4;
+		/**
+		 * 1. Op to Perform Ihash
+		 * 2. Retrieve result from LSB to host memory
+		 * 3. Perform final hash
+		 */
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		count = 4;
 		/**
@@ -1196,6 +1637,213 @@ ccp_perform_sha(struct rte_crypto_op *op,
 }
 
 static int
+ccp_perform_sha3_hmac(struct rte_crypto_op *op,
+		      struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	struct ccp_passthru pst;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint8_t *append_ptr;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr, ctx_paddr, dest_addr_t;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	if (!append_ptr) {
+		CCP_LOG_ERR("CCP MBUF append failed\n");
+		return -1;
+	}
+	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
+	ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void
+						   *)session->auth.pre_compute);
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	/*desc1 for SHA3-Ihash operation */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = (cmd_q->sb_sha * CCP_SB_BYTES);
+	CCP_CMD_DST_HI(desc) = 0;
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+	CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Intermediate Hash value retrieve */
+	if ((session->auth.ut.sha_type == CCP_SHA3_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA3_TYPE_512)) {
+
+		pst.src_addr =
+			(phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+	} else {
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+	}
+
+	/**sha engine command descriptor for FinalHash*/
+	ctx_paddr += CCP_SHA3_CTX_SIZE;
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	if (session->auth.ut.sha_type == CCP_SHA3_TYPE_224) {
+		dest_addr_t += (CCP_SB_BYTES - SHA224_DIGEST_SIZE);
+		CCP_CMD_LEN(desc) = SHA224_DIGEST_SIZE;
+	} else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_256) {
+		CCP_CMD_LEN(desc) = SHA256_DIGEST_SIZE;
+	} else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_384) {
+		dest_addr_t += (2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE);
+		CCP_CMD_LEN(desc) = SHA384_DIGEST_SIZE;
+	} else {
+		CCP_CMD_LEN(desc) = SHA512_DIGEST_SIZE;
+	}
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)dest_addr_t);
+	CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = (uint32_t)dest_addr;
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+	CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
+ccp_perform_sha3(struct rte_crypto_op *op,
+		 struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint8_t *ctx_addr, *append_ptr;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr, ctx_paddr;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	if (!append_ptr) {
+		CCP_LOG_ERR("CCP MBUF append failed\n");
+		return -1;
+	}
+	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	ctx_addr = session->auth.sha3_ctx;
+	ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	/* prepare desc for SHA3 operation */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+	CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
 ccp_perform_aes_cmac(struct rte_crypto_op *op,
 		     struct ccp_queue *cmd_q)
 {
@@ -1780,6 +2428,23 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 		result = ccp_perform_hmac(op, cmd_q);
 		b_info->desccnt += 7;
 		break;
+	case CCP_AUTH_ALGO_SHA3_224:
+	case CCP_AUTH_ALGO_SHA3_256:
+	case CCP_AUTH_ALGO_SHA3_384:
+	case CCP_AUTH_ALGO_SHA3_512:
+		result = ccp_perform_sha3(op, cmd_q);
+		b_info->desccnt += 1;
+		break;
+	case CCP_AUTH_ALGO_SHA3_224_HMAC:
+	case CCP_AUTH_ALGO_SHA3_256_HMAC:
+		result = ccp_perform_sha3_hmac(op, cmd_q);
+		b_info->desccnt += 3;
+		break;
+	case CCP_AUTH_ALGO_SHA3_384_HMAC:
+	case CCP_AUTH_ALGO_SHA3_512_HMAC:
+		result = ccp_perform_sha3_hmac(op, cmd_q);
+		b_info->desccnt += 4;
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		result = ccp_perform_aes_cmac(op, cmd_q);
 		b_info->desccnt += 4;
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index ca1c1a8..8459b71 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -80,15 +80,19 @@
 
 #define SHA224_DIGEST_SIZE      28
 #define SHA224_BLOCK_SIZE       64
+#define SHA3_224_BLOCK_SIZE     144
 
 #define SHA256_DIGEST_SIZE      32
 #define SHA256_BLOCK_SIZE       64
+#define SHA3_256_BLOCK_SIZE     136
 
 #define SHA384_DIGEST_SIZE      48
 #define SHA384_BLOCK_SIZE       128
+#define SHA3_384_BLOCK_SIZE	104
 
 #define SHA512_DIGEST_SIZE      64
 #define SHA512_BLOCK_SIZE       128
+#define SHA3_512_BLOCK_SIZE     72
 
 /* SHA LSB intialiazation values */
 
@@ -386,4 +390,22 @@ int process_ops_to_dequeue(struct ccp_qp *qp,
 			   struct rte_crypto_op **op,
 			   uint16_t nb_ops);
 
+
+/**
+ * Apis for SHA3 partial hash generation
+ * @param data_in buffer pointer on which phash is applied
+ * @param data_out phash result in ccp be format is written
+ */
+int partial_hash_sha3_224(uint8_t *data_in,
+			  uint8_t *data_out);
+
+int partial_hash_sha3_256(uint8_t *data_in,
+			  uint8_t *data_out);
+
+int partial_hash_sha3_384(uint8_t *data_in,
+			  uint8_t *data_out);
+
+int partial_hash_sha3_512(uint8_t *data_in,
+			  uint8_t *data_out);
+
 #endif /* _CCP_CRYPTO_H_ */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index ab6199f..bb59d15 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -123,6 +123,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA3-224 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_224,
+				 .block_size = 144,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-224  HMAC*/
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+				 .block_size = 144,
+				 .key_size = {
+					 .min = 1,
+					 .max = 144,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/* SHA256 */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -165,6 +207,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA3-256 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_256,
+				 .block_size = 136,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-256-HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+				 .block_size = 136,
+				 .key_size = {
+					 .min = 1,
+					 .max = 136,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/* SHA384 */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -207,6 +291,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA3-384 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_384,
+				 .block_size = 104,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-384-HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+				 .block_size = 104,
+				 .key_size = {
+					 .min = 1,
+					 .max = 104,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/* SHA512  */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -249,6 +375,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA3-512  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_512,
+				 .block_size = 72,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-512-HMAC  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+				 .block_size = 72,
+				 .key_size = {
+					 .min = 1,
+					 .max = 72,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			}, }
+		}, }
+	},
 	{	/*AES-CMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index 60797e9..eb5afc5 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -245,6 +245,23 @@ enum rte_crypto_auth_algorithm {
 	RTE_CRYPTO_AUTH_ZUC_EIA3,
 	/**< ZUC algorithm in EIA3 mode */
 
+	RTE_CRYPTO_AUTH_SHA3_224,
+	/**< 224 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+	/**< HMAC using 224 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_256,
+	/**< 256 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+	/**< HMAC using 256 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_384,
+	/**< 384 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+	/**< HMAC using 384 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_512,
+	/**< 512 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+	/**< HMAC using 512 bit SHA3 algorithm. */
+
 	RTE_CRYPTO_AUTH_LIST_END
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v4 17/20] crypto/ccp: support cpu based md5 and sha2 family authentication algo
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
                         ` (14 preceding siblings ...)
  2018-03-09  8:35       ` [PATCH v4 16/20] crypto/ccp: support sha3 " Ravi Kumar
@ 2018-03-09  8:35       ` Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 18/20] test/crypto: add test for AMD CCP crypto poll mode Ravi Kumar
                         ` (4 subsequent siblings)
  20 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-09  8:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 config/common_base                   |   1 +
 drivers/crypto/ccp/ccp_crypto.c      | 282 ++++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_crypto.h      |   5 +-
 drivers/crypto/ccp/ccp_pmd_ops.c     |  23 +++
 drivers/crypto/ccp/ccp_pmd_private.h |  10 ++
 5 files changed, 316 insertions(+), 5 deletions(-)

diff --git a/config/common_base b/config/common_base
index 28237f0..65e34ae 100644
--- a/config/common_base
+++ b/config/common_base
@@ -532,6 +532,7 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
 # Compile PMD for AMD CCP crypto device
 #
 CONFIG_RTE_LIBRTE_PMD_CCP=n
+CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=n
 
 #
 # Compile PMD for Marvell Crypto device
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index cb63bc6..133db76 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -53,6 +53,12 @@
 #include "ccp_pci.h"
 #include "ccp_pmd_private.h"
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+#include <openssl/conf.h>
+#include <openssl/err.h>
+#include <openssl/hmac.h>
+#endif
+
 /* SHA initial context values */
 static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
 	SHA1_H4, SHA1_H3,
@@ -786,6 +792,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 	else
 		sess->auth.op = CCP_AUTH_OP_VERIFY;
 	switch (auth_xform->algo) {
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		sess->auth.algo = CCP_AUTH_ALGO_MD5_HMAC;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - MD5_DIGEST_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		sess->auth.block_size = MD5_BLOCK_SIZE;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		break;
+#endif
 	case RTE_CRYPTO_AUTH_SHA1:
 		sess->auth.engine = CCP_ENGINE_SHA;
 		sess->auth.algo = CCP_AUTH_ALGO_SHA1;
@@ -795,6 +812,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
 		break;
 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		sess->auth.block_size = SHA1_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+#else
 		if (auth_xform->key.length > SHA1_BLOCK_SIZE)
 			return -1;
 		sess->auth.engine = CCP_ENGINE_SHA;
@@ -810,6 +838,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   auth_xform->key.length);
 		if (generate_partial_hash(sess))
 			return -1;
+#endif
 		break;
 	case RTE_CRYPTO_AUTH_SHA224:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA224;
@@ -820,6 +849,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
 		break;
 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		sess->auth.block_size = SHA224_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+#else
 		if (auth_xform->key.length > SHA224_BLOCK_SIZE)
 			return -1;
 		sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
@@ -835,6 +875,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   auth_xform->key.length);
 		if (generate_partial_hash(sess))
 			return -1;
+#endif
 		break;
 	case RTE_CRYPTO_AUTH_SHA3_224:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_224;
@@ -869,6 +910,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
 		break;
 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		sess->auth.block_size = SHA256_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+#else
 		if (auth_xform->key.length > SHA256_BLOCK_SIZE)
 			return -1;
 		sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
@@ -884,6 +936,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   auth_xform->key.length);
 		if (generate_partial_hash(sess))
 			return -1;
+#endif
 		break;
 	case RTE_CRYPTO_AUTH_SHA3_256:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_256;
@@ -918,6 +971,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
 		break;
 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		sess->auth.block_size = SHA384_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+#else
 		if (auth_xform->key.length > SHA384_BLOCK_SIZE)
 			return -1;
 		sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
@@ -933,6 +997,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   auth_xform->key.length);
 		if (generate_partial_hash(sess))
 			return -1;
+#endif
 		break;
 	case RTE_CRYPTO_AUTH_SHA3_384:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_384;
@@ -967,6 +1032,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
 		break;
 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		sess->auth.block_size = SHA512_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+#else
 		if (auth_xform->key.length > SHA512_BLOCK_SIZE)
 			return -1;
 		sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
@@ -982,6 +1058,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   auth_xform->key.length);
 		if (generate_partial_hash(sess))
 			return -1;
+#endif
 		break;
 	case RTE_CRYPTO_AUTH_SHA3_512:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_512;
@@ -1012,7 +1089,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.engine = CCP_ENGINE_AES;
 		sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
 		sess->auth.key_length = auth_xform->key.length;
-		/**<padding and hash result*/
+		/* padding and hash result */
 		sess->auth.ctx_len = CCP_SB_BYTES << 1;
 		sess->auth.offset = AES_BLOCK_SIZE;
 		sess->auth.block_size = AES_BLOCK_SIZE;
@@ -1208,14 +1285,22 @@ ccp_auth_slot(struct ccp_session *session)
 		count = 3;
 		/**< op + lsb passthrough cpy to/from*/
 		break;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	case CCP_AUTH_ALGO_MD5_HMAC:
+		break;
+#endif
 	case CCP_AUTH_ALGO_SHA1_HMAC:
 	case CCP_AUTH_ALGO_SHA224_HMAC:
 	case CCP_AUTH_ALGO_SHA256_HMAC:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
 		count = 6;
+#endif
 		break;
 	case CCP_AUTH_ALGO_SHA384_HMAC:
 	case CCP_AUTH_ALGO_SHA512_HMAC:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
 		count = 7;
+#endif
 		/**
 		 * 1. Load PHash1 = H(k ^ ipad); to LSB
 		 * 2. generate IHash = H(hash on meassage with PHash1
@@ -1322,6 +1407,122 @@ ccp_compute_slot_count(struct ccp_session *session)
 	return count;
 }
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+static uint8_t
+algo_select(int sessalgo,
+	    const EVP_MD **algo)
+{
+	int res = 0;
+
+	switch (sessalgo) {
+	case CCP_AUTH_ALGO_MD5_HMAC:
+		*algo = EVP_md5();
+		break;
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+		*algo = EVP_sha1();
+		break;
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+		*algo = EVP_sha224();
+		break;
+	case CCP_AUTH_ALGO_SHA256_HMAC:
+		*algo = EVP_sha256();
+		break;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+		*algo = EVP_sha384();
+		break;
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		*algo = EVP_sha512();
+		break;
+	default:
+		res = -EINVAL;
+		break;
+	}
+	return res;
+}
+
+static int
+process_cpu_auth_hmac(uint8_t *src, uint8_t *dst,
+		      __rte_unused uint8_t *iv,
+		      EVP_PKEY *pkey,
+		      int srclen,
+		      EVP_MD_CTX *ctx,
+		      const EVP_MD *algo,
+		      uint16_t d_len)
+{
+	size_t dstlen;
+	unsigned char temp_dst[64];
+
+	if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
+		goto process_auth_err;
+
+	if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
+		goto process_auth_err;
+
+	if (EVP_DigestSignFinal(ctx, temp_dst, &dstlen) <= 0)
+		goto process_auth_err;
+
+	memcpy(dst, temp_dst, d_len);
+	return 0;
+process_auth_err:
+	CCP_LOG_ERR("Process cpu auth failed");
+	return -EINVAL;
+}
+
+static int cpu_crypto_auth(struct ccp_qp *qp,
+			   struct rte_crypto_op *op,
+			   struct ccp_session *sess,
+			   EVP_MD_CTX *ctx)
+{
+	uint8_t *src, *dst;
+	int srclen, status;
+	struct rte_mbuf *mbuf_src, *mbuf_dst;
+	const EVP_MD *algo = NULL;
+	EVP_PKEY *pkey;
+
+	algo_select(sess->auth.algo, &algo);
+	pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL, sess->auth.key,
+				    sess->auth.key_length);
+	mbuf_src = op->sym->m_src;
+	mbuf_dst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+	srclen = op->sym->auth.data.length;
+	src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+				      op->sym->auth.data.offset);
+
+	if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+		dst = qp->temp_digest;
+	} else {
+		dst = op->sym->auth.digest.data;
+		if (dst == NULL) {
+			dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+						     op->sym->auth.data.offset +
+						     sess->auth.digest_length);
+		}
+	}
+	status = process_cpu_auth_hmac(src, dst, NULL,
+				       pkey, srclen,
+				       ctx,
+				       algo,
+				       sess->auth.digest_length);
+	if (status) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return status;
+	}
+
+	if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+		if (memcmp(dst, op->sym->auth.digest.data,
+			   sess->auth.digest_length) != 0) {
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+		} else {
+			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+		}
+	} else {
+		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	}
+	EVP_PKEY_free(pkey);
+	return 0;
+}
+#endif
+
 static void
 ccp_perform_passthru(struct ccp_passthru *pst,
 		     struct ccp_queue *cmd_q)
@@ -2417,14 +2618,24 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 		result = ccp_perform_sha(op, cmd_q);
 		b_info->desccnt += 3;
 		break;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	case CCP_AUTH_ALGO_MD5_HMAC:
+		break;
+#endif
 	case CCP_AUTH_ALGO_SHA1_HMAC:
 	case CCP_AUTH_ALGO_SHA224_HMAC:
 	case CCP_AUTH_ALGO_SHA256_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		break;
+#endif
 		result = ccp_perform_hmac(op, cmd_q);
 		b_info->desccnt += 6;
 		break;
 	case CCP_AUTH_ALGO_SHA384_HMAC:
 	case CCP_AUTH_ALGO_SHA512_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		break;
+#endif
 		result = ccp_perform_hmac(op, cmd_q);
 		b_info->desccnt += 7;
 		break;
@@ -2488,7 +2699,7 @@ ccp_crypto_aead(struct rte_crypto_op *op,
 }
 
 int
-process_ops_to_enqueue(const struct ccp_qp *qp,
+process_ops_to_enqueue(struct ccp_qp *qp,
 		       struct rte_crypto_op **op,
 		       struct ccp_queue *cmd_q,
 		       uint16_t nb_ops,
@@ -2497,11 +2708,22 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
 	int i, result = 0;
 	struct ccp_batch_info *b_info;
 	struct ccp_session *session;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX *auth_ctx = NULL;
+#endif
 
 	if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
 		CCP_LOG_ERR("batch info allocation failed");
 		return 0;
 	}
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	auth_ctx = EVP_MD_CTX_create();
+	if (unlikely(!auth_ctx)) {
+		CCP_LOG_ERR("Unable to create auth ctx");
+		return 0;
+	}
+	b_info->auth_ctr = 0;
+#endif
 	/* populate batch info necessary for dequeue */
 	b_info->op_idx = 0;
 	b_info->lsb_buf_idx = 0;
@@ -2523,6 +2745,11 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
 			break;
 		case CCP_CMD_AUTH:
 			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			b_info->auth_ctr++;
+			result = cpu_crypto_auth(qp, op[i],
+						 session, auth_ctx);
+#endif
 			break;
 		case CCP_CMD_CIPHER_HASH:
 			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
@@ -2532,6 +2759,12 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
 			break;
 		case CCP_CMD_HASH_CIPHER:
 			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			result = cpu_crypto_auth(qp, op[i],
+						 session, auth_ctx);
+			if (op[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
+				continue;
+#endif
 			if (result)
 				break;
 			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
@@ -2565,6 +2798,9 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
 
 	rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX_destroy(auth_ctx);
+#endif
 	return i;
 }
 
@@ -2633,13 +2869,27 @@ static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
 }
 
 static int
-ccp_prepare_ops(struct rte_crypto_op **op_d,
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+ccp_prepare_ops(struct ccp_qp *qp,
+#else
+ccp_prepare_ops(struct ccp_qp *qp __rte_unused,
+#endif
+		struct rte_crypto_op **op_d,
 		struct ccp_batch_info *b_info,
 		uint16_t nb_ops)
 {
 	int i, min_ops;
 	struct ccp_session *session;
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX *auth_ctx = NULL;
+
+	auth_ctx = EVP_MD_CTX_create();
+	if (unlikely(!auth_ctx)) {
+		CCP_LOG_ERR("Unable to create auth ctx");
+		return 0;
+	}
+#endif
 	min_ops = RTE_MIN(nb_ops, b_info->opcnt);
 
 	for (i = 0; i < min_ops; i++) {
@@ -2652,8 +2902,25 @@ ccp_prepare_ops(struct rte_crypto_op **op_d,
 			op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 			break;
 		case CCP_CMD_AUTH:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			ccp_auth_dq_prepare(op_d[i]);
+#endif
+			break;
 		case CCP_CMD_CIPHER_HASH:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			cpu_crypto_auth(qp, op_d[i],
+					session, auth_ctx);
+#else
+			ccp_auth_dq_prepare(op_d[i]);
+#endif
+			break;
 		case CCP_CMD_HASH_CIPHER:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#else
+			ccp_auth_dq_prepare(op_d[i]);
+#endif
+			break;
 		case CCP_CMD_COMBINED:
 			ccp_auth_dq_prepare(op_d[i]);
 			break;
@@ -2662,6 +2929,9 @@ ccp_prepare_ops(struct rte_crypto_op **op_d,
 		}
 	}
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX_destroy(auth_ctx);
+#endif
 	b_info->opcnt -= min_ops;
 	return min_ops;
 }
@@ -2681,6 +2951,10 @@ process_ops_to_dequeue(struct ccp_qp *qp,
 	} else if (rte_ring_dequeue(qp->processed_pkts,
 				    (void **)&b_info))
 		return 0;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	if (b_info->auth_ctr == b_info->opcnt)
+		goto success;
+#endif
 	cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
 				       CMD_Q_HEAD_LO_BASE);
 
@@ -2700,7 +2974,7 @@ process_ops_to_dequeue(struct ccp_qp *qp,
 
 
 success:
-	nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
+	nb_ops = ccp_prepare_ops(qp, op, b_info, nb_ops);
 	rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
 	b_info->desccnt = 0;
 	if (b_info->opcnt > 0) {
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 8459b71..f526329 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -94,6 +94,9 @@
 #define SHA512_BLOCK_SIZE       128
 #define SHA3_512_BLOCK_SIZE     72
 
+/* Maximum length for digest */
+#define DIGEST_LENGTH_MAX	64
+
 /* SHA LSB intialiazation values */
 
 #define SHA1_H0		0x67452301UL
@@ -372,7 +375,7 @@ int ccp_compute_slot_count(struct ccp_session *session);
  * @param nb_ops No. of ops to be submitted
  * @return 0 on success otherwise -1
  */
-int process_ops_to_enqueue(const struct ccp_qp *qp,
+int process_ops_to_enqueue(struct ccp_qp *qp,
 			   struct rte_crypto_op **op,
 			   struct ccp_queue *cmd_q,
 			   uint16_t nb_ops,
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index bb59d15..1b67070 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -39,6 +39,29 @@
 #include "ccp_crypto.h"
 
 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 16,
+					 .max = 16,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			}, }
+		}, }
+	},
+#endif
 	{	/* SHA1 */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
index d278a8c..cd9f6ae 100644
--- a/drivers/crypto/ccp/ccp_pmd_private.h
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -32,6 +32,7 @@
 #define _CCP_PMD_PRIVATE_H_
 
 #include <rte_cryptodev.h>
+#include "ccp_crypto.h"
 
 #define CRYPTODEV_NAME_CCP_PMD crypto_ccp
 
@@ -87,6 +88,10 @@ struct ccp_batch_info {
 	phys_addr_t lsb_buf_phys;
 	/**< LSB intermediate buf for passthru */
 	int lsb_buf_idx;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	uint16_t auth_ctr;
+	/**< auth only ops batch */
+#endif
 } __rte_cache_aligned;
 
 /**< CCP crypto queue pair */
@@ -107,6 +112,11 @@ struct ccp_qp {
 	/**< Store ops pulled out of queue */
 	struct rte_cryptodev *dev;
 	/**< rte crypto device to which this qp belongs */
+	uint8_t temp_digest[DIGEST_LENGTH_MAX];
+	/**< Buffer used to store the digest generated
+	 * by the driver when verifying a digest provided
+	 * by the user (using authentication verify operation)
+	 */
 } __rte_cache_aligned;
 
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v4 18/20] test/crypto: add test for AMD CCP crypto poll mode
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
                         ` (15 preceding siblings ...)
  2018-03-09  8:35       ` [PATCH v4 17/20] crypto/ccp: support cpu based md5 and sha2 " Ravi Kumar
@ 2018-03-09  8:35       ` Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 19/20] doc: add document for AMD CCP crypto poll mode driver Ravi Kumar
                         ` (3 subsequent siblings)
  20 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-09  8:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 test/test/test_cryptodev.c                   | 161 +++++++++++++++++++++++++++
 test/test/test_cryptodev.h                   |   1 +
 test/test/test_cryptodev_aes_test_vectors.h  |  93 ++++++++++------
 test/test/test_cryptodev_blockcipher.c       |   9 +-
 test/test/test_cryptodev_blockcipher.h       |   1 +
 test/test/test_cryptodev_des_test_vectors.h  |  42 ++++---
 test/test/test_cryptodev_hash_test_vectors.h |  60 ++++++----
 7 files changed, 301 insertions(+), 66 deletions(-)

diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index 1417482..d1d7925 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -338,6 +338,23 @@ testsuite_setup(void)
 		}
 	}
 
+	/* Create an CCP device if required */
+	if (gbl_driver_id == rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD))) {
+		nb_devs = rte_cryptodev_device_count_by_driver(
+				rte_cryptodev_driver_id_get(
+				RTE_STR(CRYPTODEV_NAME_CCP_PMD)));
+		if (nb_devs < 1) {
+			ret = rte_vdev_init(
+				RTE_STR(CRYPTODEV_NAME_CCP_PMD),
+				NULL);
+
+			TEST_ASSERT(ret == 0, "Failed to create "
+				"instance of pmd : %s",
+				RTE_STR(CRYPTODEV_NAME_CCP_PMD));
+		}
+	}
+
 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
 	if (gbl_driver_id == rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD))) {
@@ -1727,6 +1744,44 @@ test_AES_cipheronly_openssl_all(void)
 }
 
 static int
+test_AES_chain_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_AES_CHAIN_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_AES_cipheronly_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_AES_CIPHERONLY_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
 test_AES_chain_qat_all(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -1898,6 +1953,25 @@ test_authonly_openssl_all(void)
 }
 
 static int
+test_authonly_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_AUTHONLY_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
 test_AES_chain_armv8_all(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -4973,6 +5047,44 @@ test_3DES_cipheronly_dpaa2_sec_all(void)
 }
 
 static int
+test_3DES_chain_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_3DES_CHAIN_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_3DES_cipheronly_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_3DES_CIPHERONLY_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
 test_3DES_cipheronly_qat_all(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -9646,6 +9758,38 @@ static struct unit_test_suite cryptodev_mrvl_testsuite  = {
 	}
 };
 
+static struct unit_test_suite cryptodev_ccp_testsuite  = {
+	.suite_name = "Crypto Device CCP Unit Test Suite",
+	.setup = testsuite_setup,
+	.teardown = testsuite_teardown,
+	.unit_test_cases = {
+		TEST_CASE_ST(ut_setup, ut_teardown, test_multi_session),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_multi_session_random_usage),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_AES_chain_ccp_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_AES_cipheronly_ccp_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_3DES_chain_ccp_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_3DES_cipheronly_ccp_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_authonly_ccp_all),
+
+		/** Negative tests */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			authentication_verify_HMAC_SHA1_fail_data_corrupt),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			authentication_verify_HMAC_SHA1_fail_tag_corrupt),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			auth_decryption_AES128CBC_HMAC_SHA1_fail_data_corrupt),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			auth_decryption_AES128CBC_HMAC_SHA1_fail_tag_corrupt),
+
+		TEST_CASES_END() /**< NULL terminate unit test array */
+	}
+};
 
 static int
 test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
@@ -9867,6 +10011,22 @@ test_cryptodev_dpaa_sec(void /*argv __rte_unused, int argc __rte_unused*/)
 	return unit_test_suite_runner(&cryptodev_dpaa_sec_testsuite);
 }
 
+static int
+test_cryptodev_ccp(void)
+{
+	gbl_driver_id = rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD));
+
+	if (gbl_driver_id == -1) {
+		RTE_LOG(ERR, USER1, "CCP PMD must be loaded. Check if "
+				"CONFIG_RTE_LIBRTE_PMD_CCP is enabled "
+				"in config file to run this testsuite.\n");
+		return TEST_FAILED;
+	}
+
+	return unit_test_suite_runner(&cryptodev_ccp_testsuite);
+}
+
 REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
 REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb);
 REGISTER_TEST_COMMAND(cryptodev_openssl_autotest, test_cryptodev_openssl);
@@ -9879,3 +10039,4 @@ REGISTER_TEST_COMMAND(cryptodev_sw_armv8_autotest, test_cryptodev_armv8);
 REGISTER_TEST_COMMAND(cryptodev_sw_mrvl_autotest, test_cryptodev_mrvl);
 REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_autotest, test_cryptodev_dpaa2_sec);
 REGISTER_TEST_COMMAND(cryptodev_dpaa_sec_autotest, test_cryptodev_dpaa_sec);
+REGISTER_TEST_COMMAND(cryptodev_ccp_autotest, test_cryptodev_ccp);
diff --git a/test/test/test_cryptodev.h b/test/test/test_cryptodev.h
index 8cdc087..d45fb7b 100644
--- a/test/test/test_cryptodev.h
+++ b/test/test/test_cryptodev.h
@@ -61,6 +61,7 @@
 #define CRYPTODEV_NAME_DPAA2_SEC_PMD	crypto_dpaa2_sec
 #define CRYPTODEV_NAME_SCHEDULER_PMD	crypto_scheduler
 #define CRYPTODEV_NAME_MRVL_PMD		crypto_mrvl
+#define CRYPTODEV_NAME_CCP_PMD		crypto_ccp
 
 /**
  * Write (spread) data from buffer to mbuf data
diff --git a/test/test/test_cryptodev_aes_test_vectors.h b/test/test/test_cryptodev_aes_test_vectors.h
index 3577ef4..6f2422a 100644
--- a/test/test/test_cryptodev_aes_test_vectors.h
+++ b/test/test/test_cryptodev_aes_test_vectors.h
@@ -1171,7 +1171,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CTR HMAC-SHA1 Decryption Digest "
@@ -1184,7 +1185,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CTR XCBC Encryption Digest",
@@ -1223,7 +1225,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CTR HMAC-SHA1 Decryption Digest "
@@ -1236,7 +1239,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest",
@@ -1249,7 +1253,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1285,7 +1290,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
@@ -1315,7 +1321,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA256 Encryption Digest "
@@ -1337,7 +1344,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA256 Decryption Digest "
@@ -1357,7 +1365,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
@@ -1366,7 +1375,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
@@ -1390,7 +1400,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA512 Decryption Digest "
@@ -1455,7 +1466,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA224 Decryption Digest "
@@ -1467,7 +1479,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA384 Encryption Digest",
@@ -1479,7 +1492,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA384 Decryption Digest "
@@ -1492,7 +1506,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1501,7 +1516,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
-			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr =
@@ -1511,7 +1527,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
-			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 };
 
@@ -1526,7 +1543,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC Decryption",
@@ -1538,7 +1556,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CBC Encryption",
@@ -1549,7 +1568,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CBC Encryption Scater gather",
@@ -1570,7 +1590,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CBC Decryption Scatter Gather",
@@ -1590,7 +1611,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CBC Decryption",
@@ -1602,7 +1624,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CBC OOP Encryption",
@@ -1612,7 +1635,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CBC OOP Decryption",
@@ -1622,7 +1646,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CTR Encryption",
@@ -1634,7 +1659,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CTR Decryption",
@@ -1646,7 +1672,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CTR Encryption",
@@ -1657,7 +1684,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CTR Decryption",
@@ -1668,7 +1696,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CTR Encryption",
@@ -1680,7 +1709,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CTR Decryption",
@@ -1692,7 +1722,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CTR Encryption (12-byte IV)",
diff --git a/test/test/test_cryptodev_blockcipher.c b/test/test/test_cryptodev_blockcipher.c
index ed06618..5835b3e 100644
--- a/test/test/test_cryptodev_blockcipher.c
+++ b/test/test/test_cryptodev_blockcipher.c
@@ -54,6 +54,8 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
 
 	int openssl_pmd = rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+	int ccp_pmd = rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD));
 	int scheduler_pmd = rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD));
 	int armv8_pmd = rte_cryptodev_driver_id_get(
@@ -94,7 +96,8 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
 			driver_id == qat_pmd ||
 			driver_id == openssl_pmd ||
 			driver_id == armv8_pmd ||
-			driver_id == mrvl_pmd) { /* Fall through */
+			driver_id == mrvl_pmd ||
+			driver_id == ccp_pmd) { /* Fall through */
 		digest_len = tdata->digest.len;
 	} else if (driver_id == aesni_mb_pmd ||
 			driver_id == scheduler_pmd) {
@@ -555,6 +558,8 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
 
 	int openssl_pmd = rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+	int ccp_pmd = rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD));
 	int dpaa2_sec_pmd = rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD));
 	int dpaa_sec_pmd = rte_cryptodev_driver_id_get(
@@ -627,6 +632,8 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
 		target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER;
 	else if (driver_id == dpaa2_sec_pmd)
 		target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC;
+	else if (driver_id == ccp_pmd)
+		target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_CCP;
 	else if (driver_id == dpaa_sec_pmd)
 		target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC;
 	else if (driver_id == mrvl_pmd)
diff --git a/test/test/test_cryptodev_blockcipher.h b/test/test/test_cryptodev_blockcipher.h
index edbdaab..93ef0ae 100644
--- a/test/test/test_cryptodev_blockcipher.h
+++ b/test/test/test_cryptodev_blockcipher.h
@@ -27,6 +27,7 @@
 #define BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC	0x0020 /* DPAA2_SEC flag */
 #define BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC	0x0040 /* DPAA_SEC flag */
 #define BLOCKCIPHER_TEST_TARGET_PMD_MRVL	0x0080 /* Marvell flag */
+#define BLOCKCIPHER_TEST_TARGET_PMD_CCP		0x0040 /* CCP flag */
 
 #define BLOCKCIPHER_TEST_OP_CIPHER	(BLOCKCIPHER_TEST_OP_ENCRYPT | \
 					BLOCKCIPHER_TEST_OP_DECRYPT)
diff --git a/test/test/test_cryptodev_des_test_vectors.h b/test/test/test_cryptodev_des_test_vectors.h
index 0be809e..a30317c 100644
--- a/test/test/test_cryptodev_des_test_vectors.h
+++ b/test/test/test_cryptodev_des_test_vectors.h
@@ -1044,7 +1044,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CBC HMAC-SHA1 Decryption Digest Verify",
@@ -1053,19 +1054,22 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CBC SHA1 Encryption Digest",
 		.test_data = &triple_des128cbc_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CBC SHA1 Decryption Digest Verify",
 		.test_data = &triple_des128cbc_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC HMAC-SHA1 Encryption Digest",
@@ -1075,7 +1079,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC HMAC-SHA1 Decryption Digest Verify",
@@ -1085,21 +1090,24 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC SHA1 Encryption Digest",
 		.test_data = &triple_des192cbc_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC SHA1 Decryption Digest Verify",
 		.test_data = &triple_des192cbc_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CTR HMAC-SHA1 Encryption Digest",
@@ -1180,7 +1188,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.test_data = &triple_des128cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr =
@@ -1189,7 +1198,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.test_data = &triple_des128cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 };
 
@@ -1201,7 +1211,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CBC Decryption",
@@ -1210,7 +1221,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC Encryption",
@@ -1220,7 +1232,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC Decryption",
@@ -1230,7 +1243,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CTR Encryption",
diff --git a/test/test/test_cryptodev_hash_test_vectors.h b/test/test/test_cryptodev_hash_test_vectors.h
index 93dacb7..6b882ae 100644
--- a/test/test/test_cryptodev_hash_test_vectors.h
+++ b/test/test/test_cryptodev_hash_test_vectors.h
@@ -358,13 +358,15 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.test_descr = "SHA1 Digest",
 		.test_data = &sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA1 Digest Verify",
 		.test_data = &sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA1 Digest",
@@ -375,7 +377,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA1 Digest Scatter Gather",
@@ -394,7 +397,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA1 Digest Verify Scatter Gather",
@@ -408,13 +412,15 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.test_descr = "SHA224 Digest",
 		.test_data = &sha224_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			    BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA224 Digest Verify",
 		.test_data = &sha224_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			    BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA224 Digest",
@@ -425,7 +431,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA224 Digest Verify",
@@ -436,19 +443,22 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA256 Digest",
 		.test_data = &sha256_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA256 Digest Verify",
 		.test_data = &sha256_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA256 Digest",
@@ -459,7 +469,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA256 Digest Verify",
@@ -470,19 +481,22 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA384 Digest",
 		.test_data = &sha384_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA384 Digest Verify",
 		.test_data = &sha384_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA384 Digest",
@@ -493,7 +507,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA384 Digest Verify",
@@ -504,19 +519,22 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA512 Digest",
 		.test_data = &sha512_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA512 Digest Verify",
 		.test_data = &sha512_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA512 Digest",
@@ -527,7 +545,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA512 Digest Verify",
@@ -538,7 +557,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v4 19/20] doc: add document for AMD CCP crypto poll mode driver
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
                         ` (16 preceding siblings ...)
  2018-03-09  8:35       ` [PATCH v4 18/20] test/crypto: add test for AMD CCP crypto poll mode Ravi Kumar
@ 2018-03-09  8:35       ` Ravi Kumar
  2018-03-09  8:35       ` [PATCH v4 20/20] crypto/ccp: moved license headers to SPDX format Ravi Kumar
                         ` (2 subsequent siblings)
  20 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-09  8:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset=y, Size: 7869 bytes --]

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 doc/guides/cryptodevs/ccp.rst              | 127 +++++++++++++++++++++++++++++
 doc/guides/cryptodevs/features/ccp.ini     |  57 +++++++++++++
 doc/guides/cryptodevs/features/default.ini |  12 +++
 doc/guides/cryptodevs/index.rst            |   1 +
 4 files changed, 197 insertions(+)
 create mode 100644 doc/guides/cryptodevs/ccp.rst
 create mode 100644 doc/guides/cryptodevs/features/ccp.ini

diff --git a/doc/guides/cryptodevs/ccp.rst b/doc/guides/cryptodevs/ccp.rst
new file mode 100644
index 0000000..59b9281
--- /dev/null
+++ b/doc/guides/cryptodevs/ccp.rst
@@ -0,0 +1,127 @@
+.. Copyright(c) 2017 Advanced Micro Devices, Inc.
+   All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions
+   are met:
+
+   * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+   * Neither the name of the copyright holder nor the names of its
+   contributors may be used to endorse or promote products derived from
+   this software without specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+AMD CCP Poll Mode Driver
+========================
+
+This code provides the initial implementation of the ccp poll mode driver.
+The CCP poll mode driver library (librte_pmd_ccp) implements support for
+AMD’s cryptographic co-processor (CCP). The CCP PMD is a virtual crypto
+poll mode driver which schedules crypto operations to one or more available
+CCP hardware engines on the platform. The CCP PMD provides poll mode crypto
+driver support for the following hardware accelerator devices::
+
+	AMD Cryptographic Co-processor (0x1456)
+	AMD Cryptographic Co-processor (0x1468)
+
+Features
+--------
+
+CCP crypto PMD has support for:
+
+Cipher algorithms:
+
+* ``RTE_CRYPTO_CIPHER_AES_CBC``
+* ``RTE_CRYPTO_CIPHER_AES_ECB``
+* ``RTE_CRYPTO_CIPHER_AES_CTR``
+* ``RTE_CRYPTO_CIPHER_3DES_CBC``
+
+Hash algorithms:
+
+* ``RTE_CRYPTO_AUTH_SHA1``
+* ``RTE_CRYPTO_AUTH_SHA1_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA224``
+* ``RTE_CRYPTO_AUTH_SHA224_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA256``
+* ``RTE_CRYPTO_AUTH_SHA256_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA384``
+* ``RTE_CRYPTO_AUTH_SHA384_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA512``
+* ``RTE_CRYPTO_AUTH_SHA512_HMAC``
+* ``RTE_CRYPTO_AUTH_MD5_HMAC``
+* ``RTE_CRYPTO_AUTH_AES_CMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_224``
+* ``RTE_CRYPTO_AUTH_SHA3_224_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_256``
+* ``RTE_CRYPTO_AUTH_SHA3_256_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_384``
+* ``RTE_CRYPTO_AUTH_SHA3_384_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_512``
+* ``RTE_CRYPTO_AUTH_SHA3_512_HMAC``
+
+AEAD algorithms:
+
+* ``RTE_CRYPTO_AEAD_AES_GCM``
+
+Installation
+------------
+
+To compile CCP PMD, it has to be enabled in the config/common_base file.
+* ``CONFIG_RTE_LIBRTE_PMD_CCP=y``
+
+The CCP PMD also supports computing authentication over CPU with cipher offloaded
+to CCP. To enable this feature, enable following in the configuration.
+* ``CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=y``
+
+This code was verified on Ubuntu 16.04.
+
+Initialization
+--------------
+
+Bind the CCP devices to DPDK UIO driver module before running the CCP PMD stack.
+e.g. for the 0x1456 device::
+
+	cd to the top-level DPDK directory
+	modprobe uio
+	insmod ./build/kmod/igb_uio.ko
+	echo "1022 1456" > /sys/bus/pci/drivers/igb_uio/new_id
+
+Another way to bind the CCP devices to DPDK UIO driver is by using the ``dpdk-devbind.py`` script.
+The following command assumes ``BFD`` of ``0000:09:00.2``::
+
+	cd to the top-level DPDK directory
+	./usertools/dpdk-devbind.py -b igb_uio 0000:09:00.2
+
+To verify real traffic l2fwd-crypto example can be used with following command:
+
+.. code-block:: console
+
+	sudo ./build/l2fwd-crypto -l 1 -n 4 --vdev "crypto_ccp" -- -p 0x1
+	--chain CIPHER_HASH --cipher_op ENCRYPT --cipher_algo AES_CBC
+	--cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f
+	--iv 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:ff
+	--auth_op GENERATE --auth_algo SHA1_HMAC
+	--auth_key 11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+	:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+	:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+
+Limitations
+-----------
+
+* Chained mbufs are not supported
+* MD5_HMAC is supported only if ``CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=y`` is enabled in configuration
diff --git a/doc/guides/cryptodevs/features/ccp.ini b/doc/guides/cryptodevs/features/ccp.ini
new file mode 100644
index 0000000..add4bd8
--- /dev/null
+++ b/doc/guides/cryptodevs/features/ccp.ini
@@ -0,0 +1,57 @@
+;
+; Supported features of the 'ccp' crypto poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Symmetric crypto       = Y
+Sym operation chaining = Y
+HW Accelerated         = Y
+
+;
+; Supported crypto algorithms of the 'ccp' crypto driver.
+;
+[Cipher]
+AES CBC (128)  = Y
+AES CBC (192)  = Y
+AES CBC (256)  = Y
+AES ECB (128)  = Y
+AES ECB (192)  = Y
+AES ECB (256)  = Y
+AES CTR (128)  = Y
+AES CTR (192)  = Y
+AES CTR (256)  = Y
+3DES CBC       = Y
+
+;
+; Supported authentication algorithms of the 'ccp' crypto driver.
+;
+[Auth]
+MD5 HMAC       = Y
+SHA1           = Y
+SHA1 HMAC      = Y
+SHA224	       = Y
+SHA224 HMAC    = Y
+SHA256         = Y
+SHA256 HMAC    = Y
+SHA384         = Y
+SHA384 HMAC    = Y
+SHA512         = Y
+SHA512 HMAC    = Y
+AES CMAC       = Y
+SHA3_224       = Y
+SHA3_224 HMAC  = Y
+SHA3_256       = Y
+SHA3_256 HMAC  = Y
+SHA3_384       = Y
+SHA3_384 HMAC  = Y
+SHA3_512       = Y
+SHA3_512 HMAC  = Y
+
+;
+; Supported AEAD algorithms of the 'ccp' crypto driver.
+;
+[AEAD]
+AES GCM (128) = Y
+AES GCM (192) = Y
+AES GCM (256) = Y
diff --git a/doc/guides/cryptodevs/features/default.ini b/doc/guides/cryptodevs/features/default.ini
index 728ce3b..aa1ca31 100644
--- a/doc/guides/cryptodevs/features/default.ini
+++ b/doc/guides/cryptodevs/features/default.ini
@@ -28,6 +28,9 @@ NULL           =
 AES CBC (128)  =
 AES CBC (192)  =
 AES CBC (256)  =
+AES ECB (128)  =
+AES ECB (192)  =
+AES ECB (256)  =
 AES CTR (128)  =
 AES CTR (192)  =
 AES CTR (256)  =
@@ -62,6 +65,15 @@ AES GMAC     =
 SNOW3G UIA2  =
 KASUMI F9    =
 ZUC EIA3     =
+AES CMAC     =
+SHA3_224       =
+SHA3_224 HMAC  =
+SHA3_256       =
+SHA3_256 HMAC  =
+SHA3_384       =
+SHA3_384 HMAC  =
+SHA3_512       =
+SHA3_512 HMAC  =
 
 ;
 ; Supported AEAD algorithms of a default crypto driver.
diff --git a/doc/guides/cryptodevs/index.rst b/doc/guides/cryptodevs/index.rst
index 558c926..8a921dd 100644
--- a/doc/guides/cryptodevs/index.rst
+++ b/doc/guides/cryptodevs/index.rst
@@ -13,6 +13,7 @@ Crypto Device Drivers
     aesni_mb
     aesni_gcm
     armv8
+    ccp
     dpaa2_sec
     dpaa_sec
     kasumi
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v4 20/20] crypto/ccp: moved license headers to SPDX format
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
                         ` (17 preceding siblings ...)
  2018-03-09  8:35       ` [PATCH v4 19/20] doc: add document for AMD CCP crypto poll mode driver Ravi Kumar
@ 2018-03-09  8:35       ` Ravi Kumar
  2018-03-12  9:18         ` De Lara Guarch, Pablo
  2018-03-09 17:36       ` [PATCH v4 01/20] crypto/ccp: add AMD ccp skeleton PMD Hemant Agrawal
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
  20 siblings, 1 reply; 131+ messages in thread
From: Ravi Kumar @ 2018-03-09  8:35 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 doc/guides/cryptodevs/ccp.rst        | 29 ++---------------------------
 drivers/crypto/ccp/Makefile          | 30 ++----------------------------
 drivers/crypto/ccp/ccp_crypto.c      | 29 ++---------------------------
 drivers/crypto/ccp/ccp_crypto.h      | 29 ++---------------------------
 drivers/crypto/ccp/ccp_dev.c         | 29 ++---------------------------
 drivers/crypto/ccp/ccp_dev.h         | 29 ++---------------------------
 drivers/crypto/ccp/ccp_pci.c         | 29 ++---------------------------
 drivers/crypto/ccp/ccp_pci.h         | 29 ++---------------------------
 drivers/crypto/ccp/ccp_pmd_ops.c     | 29 ++---------------------------
 drivers/crypto/ccp/ccp_pmd_private.h | 29 ++---------------------------
 drivers/crypto/ccp/rte_ccp_pmd.c     | 29 ++---------------------------
 11 files changed, 22 insertions(+), 298 deletions(-)

diff --git a/doc/guides/cryptodevs/ccp.rst b/doc/guides/cryptodevs/ccp.rst
index 59b9281..9c0e428 100644
--- a/doc/guides/cryptodevs/ccp.rst
+++ b/doc/guides/cryptodevs/ccp.rst
@@ -1,30 +1,5 @@
-.. Copyright(c) 2017 Advanced Micro Devices, Inc.
-   All rights reserved.
-
-   Redistribution and use in source and binary forms, with or without
-   modification, are permitted provided that the following conditions
-   are met:
-
-   * Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-   * Redistributions in binary form must reproduce the above copyright
-   notice, this list of conditions and the following disclaimer in the
-   documentation and/or other materials provided with the distribution.
-   * Neither the name of the copyright holder nor the names of its
-   contributors may be used to endorse or promote products derived from
-   this software without specific prior written permission.
-
-   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+.. Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+   SPDX-License-Identifier: BSD-3-Clause
 
 AMD CCP Poll Mode Driver
 ========================
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 5241465..1475a6c 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -1,31 +1,5 @@
-#
-#   Copyright(c) 2018 Advanced Micro Devices, Inc.
-#   All rights reserved.
-#
-#   Redistribution and use in source and binary forms, with or without
-#   modification, are permitted provided that the following conditions
-#   are met:
-#
-# 	* Redistributions of source code must retain the above copyright
-# 	notice, this list of conditions and the following disclaimer.
-# 	* Redistributions in binary form must reproduce the above copyright
-# 	notice, this list of conditions and the following disclaimer in the
-# 	documentation and/or other materials provided with the distribution.
-# 	* Neither the name of the copyright holder nor the names of its
-# 	contributors may be used to endorse or promote products derived from
-# 	this software without specific prior written permission.
-#
-#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-#   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+#   SPDX-License-Identifier: BSD-3-Clause
 
 include $(RTE_SDK)/mk/rte.vars.mk
 
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 133db76..615a8c5 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -1,31 +1,6 @@
 /*-
- *   Copyright(c) 2018 Advanced Micro Devices, Inc.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *	* Redistributions of source code must retain the above copyright
- *	notice, this list of conditions and the following disclaimer.
- *	* Redistributions in binary form must reproduce the above copyright
- *	notice, this list of conditions and the following disclaimer in the
- *	documentation and/or other materials provided with the distribution.
- *	* Neither the name of the copyright holder nor the names of its
- *	contributors may be used to endorse or promote products derived from
- *	this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ *   SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <dirent.h>
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index f526329..6a4fa69 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -1,31 +1,6 @@
 /*-
- *   Copyright(c) 2018 Advanced Micro Devices, Inc.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *	* Redistributions of source code must retain the above copyright
- *	notice, this list of conditions and the following disclaimer.
- *	* Redistributions in binary form must reproduce the above copyright
- *	notice, this list of conditions and the following disclaimer in the
- *	documentation and/or other materials provided with the distribution.
- *	* Neither the name of the copyright holder nor the names of its
- *	contributors may be used to endorse or promote products derived from
- *	this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ *   SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef _CCP_CRYPTO_H_
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index d8c0ab4..7ad1227 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -1,31 +1,6 @@
 /*-
- *   Copyright(c) 2018 Advanced Micro Devices, Inc.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *	* Redistributions of source code must retain the above copyright
- *	notice, this list of conditions and the following disclaimer.
- *	* Redistributions in binary form must reproduce the above copyright
- *	notice, this list of conditions and the following disclaimer in the
- *	documentation and/or other materials provided with the distribution.
- *	* Neither the name of the copyright holder nor the names of its
- *	contributors may be used to endorse or promote products derived from
- *	this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ *   SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <dirent.h>
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index 759afc1..6f3ad5b 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -1,31 +1,6 @@
 /*-
- *   Copyright(c) 2018 Advanced Micro Devices, Inc.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *	* Redistributions of source code must retain the above copyright
- *	notice, this list of conditions and the following disclaimer.
- *	* Redistributions in binary form must reproduce the above copyright
- *	notice, this list of conditions and the following disclaimer in the
- *	documentation and/or other materials provided with the distribution.
- *	* Neither the name of the copyright holder nor the names of its
- *	contributors may be used to endorse or promote products derived from
- *	this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ *   SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef _CCP_DEV_H_
diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c
index ddf4b49..d5e0eeb 100644
--- a/drivers/crypto/ccp/ccp_pci.c
+++ b/drivers/crypto/ccp/ccp_pci.c
@@ -1,31 +1,6 @@
 /*-
- *   Copyright(c) 2018 Advanced Micro Devices, Inc.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *	* Redistributions of source code must retain the above copyright
- *	notice, this list of conditions and the following disclaimer.
- *	* Redistributions in binary form must reproduce the above copyright
- *	notice, this list of conditions and the following disclaimer in the
- *	documentation and/or other materials provided with the distribution.
- *	* Neither the name of the copyright holder nor the names of its
- *	contributors may be used to endorse or promote products derived from
- *	this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ *   SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <dirent.h>
diff --git a/drivers/crypto/ccp/ccp_pci.h b/drivers/crypto/ccp/ccp_pci.h
index a4c09c8..8f98976 100644
--- a/drivers/crypto/ccp/ccp_pci.h
+++ b/drivers/crypto/ccp/ccp_pci.h
@@ -1,31 +1,6 @@
 /*-
- *   Copyright(c) 2018 Advanced Micro Devices, Inc.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *	* Redistributions of source code must retain the above copyright
- *	notice, this list of conditions and the following disclaimer.
- *	* Redistributions in binary form must reproduce the above copyright
- *	notice, this list of conditions and the following disclaimer in the
- *	documentation and/or other materials provided with the distribution.
- *	* Neither the name of the copyright holder nor the names of its
- *	contributors may be used to endorse or promote products derived from
- *	this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ *   SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef _CCP_PCI_H_
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 1b67070..9acde00 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -1,31 +1,6 @@
 /*-
- *   Copyright(c) 2018 Advanced Micro Devices, Inc.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *	* Redistributions of source code must retain the above copyright
- *	notice, this list of conditions and the following disclaimer.
- *	* Redistributions in binary form must reproduce the above copyright
- *	notice, this list of conditions and the following disclaimer in the
- *	documentation and/or other materials provided with the distribution.
- *	* Neither the name of the copyright holder nor the names of its
- *	contributors may be used to endorse or promote products derived from
- *	this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ *   SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <string.h>
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
index cd9f6ae..6199849 100644
--- a/drivers/crypto/ccp/ccp_pmd_private.h
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -1,31 +1,6 @@
 /*-
- *   Copyright(c) 2018 Advanced Micro Devices, Inc.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *	* Redistributions of source code must retain the above copyright
- *	notice, this list of conditions and the following disclaimer.
- *	* Redistributions in binary form must reproduce the above copyright
- *	notice, this list of conditions and the following disclaimer in the
- *	documentation and/or other materials provided with the distribution.
- *	* Neither the name of the copyright holder nor the names of its
- *	contributors may be used to endorse or promote products derived from
- *	this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ *   SPDX-License-Identifier: BSD-3-Clause
  */
 
 #ifndef _CCP_PMD_PRIVATE_H_
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index 23d3af3..bb4d241 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -1,31 +1,6 @@
 /*-
- *   Copyright(c) 2018 Advanced Micro Devices, Inc.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *	* Redistributions of source code must retain the above copyright
- *	notice, this list of conditions and the following disclaimer.
- *	* Redistributions in binary form must reproduce the above copyright
- *	notice, this list of conditions and the following disclaimer in the
- *	documentation and/or other materials provided with the distribution.
- *	* Neither the name of the copyright holder nor the names of its
- *	contributors may be used to endorse or promote products derived from
- *	this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ *   SPDX-License-Identifier: BSD-3-Clause
  */
 
 #include <rte_bus_pci.h>
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* Re: [PATCH v4 01/20] crypto/ccp: add AMD ccp skeleton PMD
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
                         ` (18 preceding siblings ...)
  2018-03-09  8:35       ` [PATCH v4 20/20] crypto/ccp: moved license headers to SPDX format Ravi Kumar
@ 2018-03-09 17:36       ` Hemant Agrawal
  2018-03-09 17:46         ` Hemant Agrawal
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
  20 siblings, 1 reply; 131+ messages in thread
From: Hemant Agrawal @ 2018-03-09 17:36 UTC (permalink / raw)
  To: Ravi Kumar, dev; +Cc: pablo.de.lara.guarch

Hi Ravi,
	Since 18.02, DPDK community will prefer all new submission to use SPDX 
licensing tags.

Regards,
Hemant
On 3/9/2018 2:05 PM, Ravi Kumar wrote:
> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
> ---
<snip>..
> diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
> new file mode 100644
> index 0000000..51c5e5b
> --- /dev/null
> +++ b/drivers/crypto/ccp/Makefile
> @@ -0,0 +1,55 @@
> +#
> +#   Copyright(c) 2018 Advanced Micro Devices, Inc.
> +#   All rights reserved.
> +#
> +#   Redistribution and use in source and binary forms, with or without
> +#   modification, are permitted provided that the following conditions
> +#   are met:
> +#
> +# 	* Redistributions of source code must retain the above copyright
> +# 	notice, this list of conditions and the following disclaimer.
> +# 	* Redistributions in binary form must reproduce the above copyright
> +# 	notice, this list of conditions and the following disclaimer in the
> +# 	documentation and/or other materials provided with the distribution.
> +# 	* Neither the name of the copyright holder nor the names of its
> +# 	contributors may be used to endorse or promote products derived from
> +# 	this software without specific prior written permission.
> +#
> +#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> +#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> +#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> +#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> +#   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> +#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> +#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> +#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> +#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> +#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> +#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v4 01/20] crypto/ccp: add AMD ccp skeleton PMD
  2018-03-09 17:36       ` [PATCH v4 01/20] crypto/ccp: add AMD ccp skeleton PMD Hemant Agrawal
@ 2018-03-09 17:46         ` Hemant Agrawal
  2018-03-12 11:26           ` Kumar, Ravi1
  0 siblings, 1 reply; 131+ messages in thread
From: Hemant Agrawal @ 2018-03-09 17:46 UTC (permalink / raw)
  To: Hemant Agrawal, Ravi Kumar, dev; +Cc: pablo.de.lara.guarch

Please ignore my comment, 

I just checked that you have added a patch to convert the license to SPDX. 

However, it is not the best  way. A new patchset should have this change in the individual patches itself.

Regards,
Hemant

> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Hemant Agrawal
> Sent: Friday, March 09, 2018 11:06 PM
> To: Ravi Kumar <Ravi1.kumar@amd.com>; dev@dpdk.org
> Cc: pablo.de.lara.guarch@intel.com
> Subject: Re: [dpdk-dev] [PATCH v4 01/20] crypto/ccp: add AMD ccp skeleton
> PMD
> 
> Hi Ravi,
> 	Since 18.02, DPDK community will prefer all new submission to use SPDX
> licensing tags.
> 
> Regards,
> Hemant
> On 3/9/2018 2:05 PM, Ravi Kumar wrote:
> > Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
> > ---
> <snip>..
> > diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
> > new file mode 100644 index 0000000..51c5e5b
> > --- /dev/null
> > +++ b/drivers/crypto/ccp/Makefile
> > @@ -0,0 +1,55 @@
> > +#
> > +#   Copyright(c) 2018 Advanced Micro Devices, Inc.
> > +#   All rights reserved.
> > +#
> > +#   Redistribution and use in source and binary forms, with or without
> > +#   modification, are permitted provided that the following conditions
> > +#   are met:
> > +#
> > +# 	* Redistributions of source code must retain the above copyright
> > +# 	notice, this list of conditions and the following disclaimer.
> > +# 	* Redistributions in binary form must reproduce the above copyright
> > +# 	notice, this list of conditions and the following disclaimer in the
> > +# 	documentation and/or other materials provided with the distribution.
> > +# 	* Neither the name of the copyright holder nor the names of its
> > +# 	contributors may be used to endorse or promote products derived from
> > +# 	this software without specific prior written permission.
> > +#
> > +#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> CONTRIBUTORS
> > +#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
> NOT
> > +#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
> FITNESS FOR
> > +#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> COPYRIGHT
> > +#   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> INCIDENTAL,
> > +#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
> NOT
> > +#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
> OF USE,
> > +#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
> AND ON ANY
> > +#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
> TORT
> > +#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
> THE USE
> > +#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
> DAMAGE.

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v4 20/20] crypto/ccp: moved license headers to SPDX format
  2018-03-09  8:35       ` [PATCH v4 20/20] crypto/ccp: moved license headers to SPDX format Ravi Kumar
@ 2018-03-12  9:18         ` De Lara Guarch, Pablo
  2018-03-12 11:23           ` Kumar, Ravi1
  0 siblings, 1 reply; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2018-03-12  9:18 UTC (permalink / raw)
  To: Ravi Kumar, dev

Hi Ravi,

> -----Original Message-----
> From: Ravi Kumar [mailto:Ravi1.kumar@amd.com]
> Sent: Friday, March 9, 2018 8:35 AM
> To: dev@dpdk.org
> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
> Subject: [PATCH v4 20/20] crypto/ccp: moved license headers to SPDX
> format

As Ferruh suggested for "axgbe", could you use directly the SPDX license headers in the other patches and
get rid of this one?

Thanks,
Pablo

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v4 20/20] crypto/ccp: moved license headers to SPDX format
  2018-03-12  9:18         ` De Lara Guarch, Pablo
@ 2018-03-12 11:23           ` Kumar, Ravi1
  0 siblings, 0 replies; 131+ messages in thread
From: Kumar, Ravi1 @ 2018-03-12 11:23 UTC (permalink / raw)
  To: De Lara Guarch, Pablo, dev



>-----Original Message-----
>From: De Lara Guarch, Pablo [mailto:pablo.de.lara.guarch@intel.com] 
>Sent: Monday, March 12, 2018 2:49 PM
>To: Kumar, Ravi1 <Ravi1.Kumar@amd.com>; dev@dpdk.org
>Subject: RE: [PATCH v4 20/20] crypto/ccp: moved license headers to SPDX format
>
>Hi Ravi,
>
>> -----Original Message-----
>> From: Ravi Kumar [mailto:Ravi1.kumar@amd.com]
>> Sent: Friday, March 9, 2018 8:35 AM
>> To: dev@dpdk.org
>> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
>> Subject: [PATCH v4 20/20] crypto/ccp: moved license headers to SPDX 
>> format
>
>As Ferruh suggested for "axgbe", could you use directly the SPDX license headers in the other patches and get rid of this one?
>
>Thanks,
>Pablo
>

Hi Pablo,

Sure we will do that. Next version of the patches will take care of this. 

Regards,
Ravi

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v4 01/20] crypto/ccp: add AMD ccp skeleton PMD
  2018-03-09 17:46         ` Hemant Agrawal
@ 2018-03-12 11:26           ` Kumar, Ravi1
  0 siblings, 0 replies; 131+ messages in thread
From: Kumar, Ravi1 @ 2018-03-12 11:26 UTC (permalink / raw)
  To: Hemant Agrawal, dev; +Cc: pablo.de.lara.guarch

>
>
>-----Original Message-----
>From: Hemant Agrawal [mailto:hemant.agrawal@nxp.com] 
>Sent: Friday, March 09, 2018 11:17 PM
>To: Hemant Agrawal <hemant.agrawal@nxp.com>; Kumar, Ravi1 <Ravi1.Kumar@amd.com>; dev@dpdk.org
>Cc: pablo.de.lara.guarch@intel.com
>Subject: RE: [dpdk-dev] [PATCH v4 01/20] crypto/ccp: add AMD ccp skeleton PMD
>
>Please ignore my comment, 
>
>I just checked that you have added a patch to convert the license to SPDX. 
>
>However, it is not the best  way. A new patchset should have this change in the individual patches itself.
>
>Regards,
>Hemant


Hi Hemant,

Thanks for reviewing. We will fix this in the next version of the patches. 

Regards,
Ravi
>
>> -----Original Message-----
>> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Hemant Agrawal
>> Sent: Friday, March 09, 2018 11:06 PM
>> To: Ravi Kumar <Ravi1.kumar@amd.com>; dev@dpdk.org
>> Cc: pablo.de.lara.guarch@intel.com
>> Subject: Re: [dpdk-dev] [PATCH v4 01/20] crypto/ccp: add AMD ccp 
>> skeleton PMD
>> 
>> Hi Ravi,
>> 	Since 18.02, DPDK community will prefer all new submission to use 
>> SPDX licensing tags.
>> 
>> Regards,
>> Hemant
>> On 3/9/2018 2:05 PM, Ravi Kumar wrote:
>> > Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
>> > ---
>> <snip>..
>> > diff --git a/drivers/crypto/ccp/Makefile 
>> > b/drivers/crypto/ccp/Makefile new file mode 100644 index 
>> > 0000000..51c5e5b
>> > --- /dev/null
>> > +++ b/drivers/crypto/ccp/Makefile
>> > @@ -0,0 +1,55 @@
>> > +#
>> > +#   Copyright(c) 2018 Advanced Micro Devices, Inc.
>> > +#   All rights reserved.
>> > +#
>> > +#   Redistribution and use in source and binary forms, with or without
>> > +#   modification, are permitted provided that the following conditions
>> > +#   are met:
>> > +#
>> > +# 	* Redistributions of source code must retain the above copyright
>> > +# 	notice, this list of conditions and the following disclaimer.
>> > +# 	* Redistributions in binary form must reproduce the above copyright
>> > +# 	notice, this list of conditions and the following disclaimer in the
>> > +# 	documentation and/or other materials provided with the distribution.
>> > +# 	* Neither the name of the copyright holder nor the names of its
>> > +# 	contributors may be used to endorse or promote products derived from
>> > +# 	this software without specific prior written permission.
>> > +#
>> > +#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
>> CONTRIBUTORS
>> > +#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
>> NOT
>> > +#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
>> FITNESS FOR
>> > +#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
>> COPYRIGHT
>> > +#   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
>> INCIDENTAL,
>> > +#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
>> NOT
>> > +#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
>> OF USE,
>> > +#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
>> AND ON ANY
>> > +#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
>> TORT
>> > +#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
>> THE USE
>> > +#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
>> DAMAGE.
>

^ permalink raw reply	[flat|nested] 131+ messages in thread

* [PATCH v5 01/19] crypto/ccp: add AMD ccp skeleton PMD
  2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
                         ` (19 preceding siblings ...)
  2018-03-09 17:36       ` [PATCH v4 01/20] crypto/ccp: add AMD ccp skeleton PMD Hemant Agrawal
@ 2018-03-19 12:23       ` Ravi Kumar
  2018-03-19 12:23         ` [PATCH v5 02/19] crypto/ccp: support ccp device initialization and deintialization Ravi Kumar
                           ` (19 more replies)
  20 siblings, 20 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-19 12:23 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 MAINTAINERS                                |  6 +++++
 config/common_base                         |  5 +++++
 doc/guides/rel_notes/release_18_05.rst     |  5 +++++
 drivers/crypto/Makefile                    |  1 +
 drivers/crypto/ccp/Makefile                | 29 ++++++++++++++++++++++++
 drivers/crypto/ccp/rte_ccp_pmd.c           | 36 ++++++++++++++++++++++++++++++
 drivers/crypto/ccp/rte_pmd_ccp_version.map |  4 ++++
 mk/rte.app.mk                              |  2 ++
 8 files changed, 88 insertions(+)
 create mode 100644 drivers/crypto/ccp/Makefile
 create mode 100644 drivers/crypto/ccp/rte_ccp_pmd.c
 create mode 100644 drivers/crypto/ccp/rte_pmd_ccp_version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index a646ca3..8481731 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -640,6 +640,12 @@ M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
 T: git://dpdk.org/next/dpdk-next-crypto
 F: doc/guides/cryptodevs/features/default.ini
 
+AMD CCP Crypto PMD
+M: Ravi Kumar <ravi1.kumar@amd.com>
+F: drivers/crypto/ccp/
+F: doc/guides/cryptodevs/ccp.rst
+F: doc/guides/cryptodevs/features/ccp.ini
+
 ARMv8 Crypto
 M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
 F: drivers/crypto/armv8/
diff --git a/config/common_base b/config/common_base
index ad03cf4..28237f0 100644
--- a/config/common_base
+++ b/config/common_base
@@ -529,6 +529,11 @@ CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n
 CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
 
 #
+# Compile PMD for AMD CCP crypto device
+#
+CONFIG_RTE_LIBRTE_PMD_CCP=n
+
+#
 # Compile PMD for Marvell Crypto device
 #
 CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO=n
diff --git a/doc/guides/rel_notes/release_18_05.rst b/doc/guides/rel_notes/release_18_05.rst
index 3923dc2..c5b2854 100644
--- a/doc/guides/rel_notes/release_18_05.rst
+++ b/doc/guides/rel_notes/release_18_05.rst
@@ -41,6 +41,11 @@ New Features
      Also, make sure to start the actual text at the margin.
      =========================================================
 
+* **Added a new crypto poll mode driver for AMD CCP devices.**
+
+  Added the new ``ccp`` crypto driver for AMD CCP devices. See the
+  :doc:`../cryptodevs/ccp` crypto driver guide for more details on
+  this new driver.
 
 API Changes
 -----------
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 26e503e..9fbd986 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -20,5 +20,6 @@ endif
 ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec
 endif
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp
 
 include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
new file mode 100644
index 0000000..3b8442d
--- /dev/null
+++ b/drivers/crypto/ccp/Makefile
@@ -0,0 +1,29 @@
+#   SPDX-License-Identifier: BSD-3-Clause
+#   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_ccp.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += -I$(SRCDIR)
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# external library include paths
+LDLIBS += -lcrypto
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_pci -lrte_bus_pci
+
+# versioning export map
+EXPORT_MAP := rte_pmd_ccp_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
new file mode 100644
index 0000000..71e7023
--- /dev/null
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -0,0 +1,36 @@
+/*   SPDX-License-Identifier: BSD-3-Clause
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <rte_bus_vdev.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+
+uint8_t ccp_cryptodev_driver_id;
+
+/** Remove ccp pmd */
+static int
+cryptodev_ccp_remove(struct rte_vdev_device *dev __rte_unused)
+{
+	return 0;
+}
+
+/** Probe ccp pmd */
+static int
+cryptodev_ccp_probe(struct rte_vdev_device *vdev __rte_unused)
+{
+	return 0;
+}
+
+static struct rte_vdev_driver cryptodev_ccp_pmd_drv = {
+	.probe = cryptodev_ccp_probe,
+	.remove = cryptodev_ccp_remove
+};
+
+static struct cryptodev_driver ccp_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD,
+	"max_nb_queue_pairs=<int> max_nb_sessions=<int> socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv,
+			       ccp_cryptodev_driver_id);
diff --git a/drivers/crypto/ccp/rte_pmd_ccp_version.map b/drivers/crypto/ccp/rte_pmd_ccp_version.map
new file mode 100644
index 0000000..9b9ab1a
--- /dev/null
+++ b/drivers/crypto/ccp/rte_pmd_ccp_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+	local: *;
+};
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 94525dc..6f50a15 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -221,6 +221,8 @@ ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC)   += -lrte_pmd_dpaa_sec
 endif # CONFIG_RTE_LIBRTE_DPAA_BUS
 
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_CCP)   += -lrte_pmd_ccp -lcrypto
+
 endif # CONFIG_RTE_LIBRTE_CRYPTODEV
 
 ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y)
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v5 02/19] crypto/ccp: support ccp device initialization and deintialization
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
@ 2018-03-19 12:23         ` Ravi Kumar
  2018-03-19 12:23         ` [PATCH v5 03/19] crypto/ccp: support basic pmd ops Ravi Kumar
                           ` (18 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-19 12:23 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/Makefile          |   3 +
 drivers/crypto/ccp/ccp_dev.c         | 761 +++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_dev.h         | 284 +++++++++++++
 drivers/crypto/ccp/ccp_pci.c         | 236 +++++++++++
 drivers/crypto/ccp/ccp_pci.h         |  27 ++
 drivers/crypto/ccp/ccp_pmd_ops.c     |  29 ++
 drivers/crypto/ccp/ccp_pmd_private.h |  56 +++
 drivers/crypto/ccp/rte_ccp_pmd.c     | 151 ++++++-
 8 files changed, 1545 insertions(+), 2 deletions(-)
 create mode 100644 drivers/crypto/ccp/ccp_dev.c
 create mode 100644 drivers/crypto/ccp/ccp_dev.h
 create mode 100644 drivers/crypto/ccp/ccp_pci.c
 create mode 100644 drivers/crypto/ccp/ccp_pci.h
 create mode 100644 drivers/crypto/ccp/ccp_pmd_ops.c
 create mode 100644 drivers/crypto/ccp/ccp_pmd_private.h

diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 3b8442d..f121712 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -25,5 +25,8 @@ EXPORT_MAP := rte_pmd_ccp_version.map
 
 # library source files
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
new file mode 100644
index 0000000..42cff75
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -0,0 +1,761 @@
+/*   SPDX-License-Identifier: BSD-3-Clause
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <sys/file.h>
+#include <unistd.h>
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+
+#include "ccp_dev.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
+static int ccp_dev_id;
+
+static const struct rte_memzone *
+ccp_queue_dma_zone_reserve(const char *queue_name,
+			   uint32_t queue_size,
+			   int socket_id)
+{
+	const struct rte_memzone *mz;
+	unsigned int memzone_flags = 0;
+	const struct rte_memseg *ms;
+
+	mz = rte_memzone_lookup(queue_name);
+	if (mz != 0)
+		return mz;
+
+	ms = rte_eal_get_physmem_layout();
+	switch (ms[0].hugepage_sz) {
+	case(RTE_PGSIZE_2M):
+		memzone_flags = RTE_MEMZONE_2MB;
+		break;
+	case(RTE_PGSIZE_1G):
+		memzone_flags = RTE_MEMZONE_1GB;
+		break;
+	case(RTE_PGSIZE_16M):
+		memzone_flags = RTE_MEMZONE_16MB;
+		break;
+	case(RTE_PGSIZE_16G):
+		memzone_flags = RTE_MEMZONE_16GB;
+		break;
+	default:
+		memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
+	}
+
+	return rte_memzone_reserve_aligned(queue_name,
+					   queue_size,
+					   socket_id,
+					   memzone_flags,
+					   queue_size);
+}
+
+/* bitmap support apis */
+static inline void
+ccp_set_bit(unsigned long *bitmap, int n)
+{
+	__sync_fetch_and_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)));
+}
+
+static inline void
+ccp_clear_bit(unsigned long *bitmap, int n)
+{
+	__sync_fetch_and_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)));
+}
+
+static inline uint32_t
+ccp_get_bit(unsigned long *bitmap, int n)
+{
+	return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0);
+}
+
+
+static inline uint32_t
+ccp_ffz(unsigned long word)
+{
+	unsigned long first_zero;
+
+	first_zero = __builtin_ffsl(~word);
+	return first_zero ? (first_zero - 1) :
+		BITS_PER_WORD;
+}
+
+static inline uint32_t
+ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit)
+{
+	uint32_t i;
+	uint32_t nwords = 0;
+
+	nwords = (limit - 1) / BITS_PER_WORD + 1;
+	for (i = 0; i < nwords; i++) {
+		if (addr[i] == 0UL)
+			return i * BITS_PER_WORD;
+		if (addr[i] < ~(0UL))
+			break;
+	}
+	return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]);
+}
+
+static void
+ccp_bitmap_set(unsigned long *map, unsigned int start, int len)
+{
+	unsigned long *p = map + WORD_OFFSET(start);
+	const unsigned int size = start + len;
+	int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD);
+	unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+	while (len - bits_to_set >= 0) {
+		*p |= mask_to_set;
+		len -= bits_to_set;
+		bits_to_set = BITS_PER_WORD;
+		mask_to_set = ~0UL;
+		p++;
+	}
+	if (len) {
+		mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size);
+		*p |= mask_to_set;
+	}
+}
+
+static void
+ccp_bitmap_clear(unsigned long *map, unsigned int start, int len)
+{
+	unsigned long *p = map + WORD_OFFSET(start);
+	const unsigned int size = start + len;
+	int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD);
+	unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+	while (len - bits_to_clear >= 0) {
+		*p &= ~mask_to_clear;
+		len -= bits_to_clear;
+		bits_to_clear = BITS_PER_WORD;
+		mask_to_clear = ~0UL;
+		p++;
+	}
+	if (len) {
+		mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size);
+		*p &= ~mask_to_clear;
+	}
+}
+
+
+static unsigned long
+_ccp_find_next_bit(const unsigned long *addr,
+		   unsigned long nbits,
+		   unsigned long start,
+		   unsigned long invert)
+{
+	unsigned long tmp;
+
+	if (!nbits || start >= nbits)
+		return nbits;
+
+	tmp = addr[start / BITS_PER_WORD] ^ invert;
+
+	/* Handle 1st word. */
+	tmp &= CCP_BITMAP_FIRST_WORD_MASK(start);
+	start = ccp_round_down(start, BITS_PER_WORD);
+
+	while (!tmp) {
+		start += BITS_PER_WORD;
+		if (start >= nbits)
+			return nbits;
+
+		tmp = addr[start / BITS_PER_WORD] ^ invert;
+	}
+
+	return RTE_MIN(start + (ffs(tmp) - 1), nbits);
+}
+
+static unsigned long
+ccp_find_next_bit(const unsigned long *addr,
+		  unsigned long size,
+		  unsigned long offset)
+{
+	return _ccp_find_next_bit(addr, size, offset, 0UL);
+}
+
+static unsigned long
+ccp_find_next_zero_bit(const unsigned long *addr,
+		       unsigned long size,
+		       unsigned long offset)
+{
+	return _ccp_find_next_bit(addr, size, offset, ~0UL);
+}
+
+/**
+ * bitmap_find_next_zero_area - find a contiguous aligned zero area
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ */
+static unsigned long
+ccp_bitmap_find_next_zero_area(unsigned long *map,
+			       unsigned long size,
+			       unsigned long start,
+			       unsigned int nr)
+{
+	unsigned long index, end, i;
+
+again:
+	index = ccp_find_next_zero_bit(map, size, start);
+
+	end = index + nr;
+	if (end > size)
+		return end;
+	i = ccp_find_next_bit(map, end, index);
+	if (i < end) {
+		start = i + 1;
+		goto again;
+	}
+	return index;
+}
+
+static uint32_t
+ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count)
+{
+	struct ccp_device *ccp;
+	int start;
+
+	/* First look at the map for the queue */
+	if (cmd_q->lsb >= 0) {
+		start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap,
+								 LSB_SIZE, 0,
+								 count);
+		if (start < LSB_SIZE) {
+			ccp_bitmap_set(cmd_q->lsbmap, start, count);
+			return start + cmd_q->lsb * LSB_SIZE;
+		}
+	}
+
+	/* try to get an entry from the shared blocks */
+	ccp = cmd_q->dev;
+
+	rte_spinlock_lock(&ccp->lsb_lock);
+
+	start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap,
+						    MAX_LSB_CNT * LSB_SIZE,
+						    0, count);
+	if (start <= MAX_LSB_CNT * LSB_SIZE) {
+		ccp_bitmap_set(ccp->lsbmap, start, count);
+		rte_spinlock_unlock(&ccp->lsb_lock);
+		return start * LSB_ITEM_SIZE;
+	}
+	CCP_LOG_ERR("NO LSBs available");
+
+	rte_spinlock_unlock(&ccp->lsb_lock);
+
+	return 0;
+}
+
+static void __rte_unused
+ccp_lsb_free(struct ccp_queue *cmd_q,
+	     unsigned int start,
+	     unsigned int count)
+{
+	int lsbno = start / LSB_SIZE;
+
+	if (!start)
+		return;
+
+	if (cmd_q->lsb == lsbno) {
+		/* An entry from the private LSB */
+		ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
+	} else {
+		/* From the shared LSBs */
+		struct ccp_device *ccp = cmd_q->dev;
+
+		rte_spinlock_lock(&ccp->lsb_lock);
+		ccp_bitmap_clear(ccp->lsbmap, start, count);
+		rte_spinlock_unlock(&ccp->lsb_lock);
+	}
+}
+
+static int
+ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status)
+{
+	int q_mask = 1 << cmd_q->id;
+	int weight = 0;
+	int j;
+
+	/* Build a bit mask to know which LSBs
+	 * this queue has access to.
+	 * Don't bother with segment 0
+	 * as it has special
+	 * privileges.
+	 */
+	cmd_q->lsbmask = 0;
+	status >>= LSB_REGION_WIDTH;
+	for (j = 1; j < MAX_LSB_CNT; j++) {
+		if (status & q_mask)
+			ccp_set_bit(&cmd_q->lsbmask, j);
+
+		status >>= LSB_REGION_WIDTH;
+	}
+
+	for (j = 0; j < MAX_LSB_CNT; j++)
+		if (ccp_get_bit(&cmd_q->lsbmask, j))
+			weight++;
+
+	printf("Queue %d can access %d LSB regions  of mask  %lu\n",
+	       (int)cmd_q->id, weight, cmd_q->lsbmask);
+
+	return weight ? 0 : -EINVAL;
+}
+
+static int
+ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
+			     int lsb_cnt, int n_lsbs,
+			     unsigned long *lsb_pub)
+{
+	unsigned long qlsb = 0;
+	int bitno = 0;
+	int qlsb_wgt = 0;
+	int i, j;
+
+	/* For each queue:
+	 * If the count of potential LSBs available to a queue matches the
+	 * ordinal given to us in lsb_cnt:
+	 * Copy the mask of possible LSBs for this queue into "qlsb";
+	 * For each bit in qlsb, see if the corresponding bit in the
+	 * aggregation mask is set; if so, we have a match.
+	 *     If we have a match, clear the bit in the aggregation to
+	 *     mark it as no longer available.
+	 *     If there is no match, clear the bit in qlsb and keep looking.
+	 */
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		struct ccp_queue *cmd_q = &ccp->cmd_q[i];
+
+		qlsb_wgt = 0;
+		for (j = 0; j < MAX_LSB_CNT; j++)
+			if (ccp_get_bit(&cmd_q->lsbmask, j))
+				qlsb_wgt++;
+
+		if (qlsb_wgt == lsb_cnt) {
+			qlsb = cmd_q->lsbmask;
+
+			bitno = ffs(qlsb) - 1;
+			while (bitno < MAX_LSB_CNT) {
+				if (ccp_get_bit(lsb_pub, bitno)) {
+					/* We found an available LSB
+					 * that this queue can access
+					 */
+					cmd_q->lsb = bitno;
+					ccp_clear_bit(lsb_pub, bitno);
+					break;
+				}
+				ccp_clear_bit(&qlsb, bitno);
+				bitno = ffs(qlsb) - 1;
+			}
+			if (bitno >= MAX_LSB_CNT)
+				return -EINVAL;
+			n_lsbs--;
+		}
+	}
+	return n_lsbs;
+}
+
+/* For each queue, from the most- to least-constrained:
+ * find an LSB that can be assigned to the queue. If there are N queues that
+ * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
+ * dedicated LSB. Remaining LSB regions become a shared resource.
+ * If we have fewer LSBs than queues, all LSB regions become shared
+ * resources.
+ */
+static int
+ccp_assign_lsbs(struct ccp_device *ccp)
+{
+	unsigned long lsb_pub = 0, qlsb = 0;
+	int n_lsbs = 0;
+	int bitno;
+	int i, lsb_cnt;
+	int rc = 0;
+
+	rte_spinlock_init(&ccp->lsb_lock);
+
+	/* Create an aggregate bitmap to get a total count of available LSBs */
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		lsb_pub |= ccp->cmd_q[i].lsbmask;
+
+	for (i = 0; i < MAX_LSB_CNT; i++)
+		if (ccp_get_bit(&lsb_pub, i))
+			n_lsbs++;
+
+	if (n_lsbs >= ccp->cmd_q_count) {
+		/* We have enough LSBS to give every queue a private LSB.
+		 * Brute force search to start with the queues that are more
+		 * constrained in LSB choice. When an LSB is privately
+		 * assigned, it is removed from the public mask.
+		 * This is an ugly N squared algorithm with some optimization.
+		 */
+		for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
+		     lsb_cnt++) {
+			rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
+							  &lsb_pub);
+			if (rc < 0)
+				return -EINVAL;
+			n_lsbs = rc;
+		}
+	}
+
+	rc = 0;
+	/* What's left of the LSBs, according to the public mask, now become
+	 * shared. Any zero bits in the lsb_pub mask represent an LSB region
+	 * that can't be used as a shared resource, so mark the LSB slots for
+	 * them as "in use".
+	 */
+	qlsb = lsb_pub;
+	bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+	while (bitno < MAX_LSB_CNT) {
+		ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
+		ccp_set_bit(&qlsb, bitno);
+		bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+	}
+
+	return rc;
+}
+
+static int
+ccp_add_device(struct ccp_device *dev, int type)
+{
+	int i;
+	uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi;
+	uint64_t status;
+	struct ccp_queue *cmd_q;
+	const struct rte_memzone *q_mz;
+	void *vaddr;
+
+	if (dev == NULL)
+		return -1;
+
+	dev->id = ccp_dev_id++;
+	dev->qidx = 0;
+	vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+	if (type == CCP_VERSION_5B) {
+		CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57);
+		CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003);
+		for (i = 0; i < 12; i++) {
+			CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET,
+				      CCP_READ_REG(vaddr, TRNG_OUT_REG));
+		}
+		CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F);
+		CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D);
+		CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000);
+
+		CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF);
+		CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF);
+
+		CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
+	}
+	CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
+
+	/* Copy the private LSB mask to the public registers */
+	status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
+	status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET);
+	CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo);
+	CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi);
+	status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo);
+
+	dev->cmd_q_count = 0;
+	/* Find available queues */
+	qmr = CCP_READ_REG(vaddr, Q_MASK_REG);
+	for (i = 0; i < MAX_HW_QUEUES; i++) {
+		if (!(qmr & (1 << i)))
+			continue;
+		cmd_q = &dev->cmd_q[dev->cmd_q_count++];
+		cmd_q->dev = dev;
+		cmd_q->id = i;
+		cmd_q->qidx = 0;
+		cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+
+		cmd_q->reg_base = (uint8_t *)vaddr +
+			CMD_Q_STATUS_INCR * (i + 1);
+
+		/* CCP queue memory */
+		snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name),
+			 "%s_%d_%s_%d_%s",
+			 "ccp_dev",
+			 (int)dev->id, "queue",
+			 (int)cmd_q->id, "mem");
+		q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name,
+						  cmd_q->qsize, SOCKET_ID_ANY);
+		cmd_q->qbase_addr = (void *)q_mz->addr;
+		cmd_q->qbase_desc = (void *)q_mz->addr;
+		cmd_q->qbase_phys_addr =  q_mz->phys_addr;
+
+		cmd_q->qcontrol = 0;
+		/* init control reg to zero */
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol);
+
+		/* Disable the interrupts */
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00);
+		CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE);
+		CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE);
+
+		/* Clear the interrupts */
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE,
+			      ALL_INTERRUPTS);
+
+		/* Configure size of each virtual queue accessible to host */
+		cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT);
+		cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT;
+
+		dma_addr_lo = low32_value(cmd_q->qbase_phys_addr);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+			      (uint32_t)dma_addr_lo);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE,
+			      (uint32_t)dma_addr_lo);
+
+		dma_addr_hi = high32_value(cmd_q->qbase_phys_addr);
+		cmd_q->qcontrol |= (dma_addr_hi << 16);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol);
+
+		/* create LSB Mask map */
+		if (ccp_find_lsb_regions(cmd_q, status))
+			CCP_LOG_ERR("queue doesn't have lsb regions");
+		cmd_q->lsb = -1;
+
+		rte_atomic64_init(&cmd_q->free_slots);
+		rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1));
+		/* unused slot barrier b/w H&T */
+	}
+
+	if (ccp_assign_lsbs(dev))
+		CCP_LOG_ERR("Unable to assign lsb region");
+
+	/* pre-allocate LSB slots */
+	for (i = 0; i < dev->cmd_q_count; i++) {
+		dev->cmd_q[i].sb_key =
+			ccp_lsb_alloc(&dev->cmd_q[i], 1);
+		dev->cmd_q[i].sb_iv =
+			ccp_lsb_alloc(&dev->cmd_q[i], 1);
+		dev->cmd_q[i].sb_sha =
+			ccp_lsb_alloc(&dev->cmd_q[i], 2);
+		dev->cmd_q[i].sb_hmac =
+			ccp_lsb_alloc(&dev->cmd_q[i], 2);
+	}
+
+	TAILQ_INSERT_TAIL(&ccp_list, dev, next);
+	return 0;
+}
+
+static void
+ccp_remove_device(struct ccp_device *dev)
+{
+	if (dev == NULL)
+		return;
+
+	TAILQ_REMOVE(&ccp_list, dev, next);
+}
+
+static int
+is_ccp_device(const char *dirname,
+	      const struct rte_pci_id *ccp_id,
+	      int *type)
+{
+	char filename[PATH_MAX];
+	const struct rte_pci_id *id;
+	uint16_t vendor, device_id;
+	int i;
+	unsigned long tmp;
+
+	/* get vendor id */
+	snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		return 0;
+	vendor = (uint16_t)tmp;
+
+	/* get device id */
+	snprintf(filename, sizeof(filename), "%s/device", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		return 0;
+	device_id = (uint16_t)tmp;
+
+	for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) {
+		if (vendor == id->vendor_id &&
+		    device_id == id->device_id) {
+			*type = i;
+			return 1; /* Matched device */
+		}
+	}
+	return 0;
+}
+
+static int
+ccp_probe_device(const char *dirname, uint16_t domain,
+		 uint8_t bus, uint8_t devid,
+		 uint8_t function, int ccp_type)
+{
+	struct ccp_device *ccp_dev = NULL;
+	struct rte_pci_device *pci;
+	char filename[PATH_MAX];
+	unsigned long tmp;
+	int uio_fd = -1, i, uio_num;
+	char uio_devname[PATH_MAX];
+	void *map_addr;
+
+	ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
+			      RTE_CACHE_LINE_SIZE);
+	if (ccp_dev == NULL)
+		goto fail;
+	pci = &(ccp_dev->pci);
+
+	pci->addr.domain = domain;
+	pci->addr.bus = bus;
+	pci->addr.devid = devid;
+	pci->addr.function = function;
+
+	/* get vendor id */
+	snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.vendor_id = (uint16_t)tmp;
+
+	/* get device id */
+	snprintf(filename, sizeof(filename), "%s/device", dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.device_id = (uint16_t)tmp;
+
+	/* get subsystem_vendor id */
+	snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
+			dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.subsystem_vendor_id = (uint16_t)tmp;
+
+	/* get subsystem_device id */
+	snprintf(filename, sizeof(filename), "%s/subsystem_device",
+			dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	pci->id.subsystem_device_id = (uint16_t)tmp;
+
+	/* get class_id */
+	snprintf(filename, sizeof(filename), "%s/class",
+			dirname);
+	if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+		goto fail;
+	/* the least 24 bits are valid: class, subclass, program interface */
+	pci->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
+
+	/* parse resources */
+	snprintf(filename, sizeof(filename), "%s/resource", dirname);
+	if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
+		goto fail;
+
+	uio_num = ccp_find_uio_devname(dirname);
+	if (uio_num < 0) {
+		/*
+		 * It may take time for uio device to appear,
+		 * wait  here and try again
+		 */
+		usleep(100000);
+		uio_num = ccp_find_uio_devname(dirname);
+		if (uio_num < 0)
+			goto fail;
+	}
+	snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
+
+	uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK);
+	if (uio_fd < 0)
+		goto fail;
+	if (flock(uio_fd, LOCK_EX | LOCK_NB))
+		goto fail;
+
+	/* Map the PCI memory resource of device */
+	for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+
+		char devname[PATH_MAX];
+		int res_fd;
+
+		if (pci->mem_resource[i].phys_addr == 0)
+			continue;
+		snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
+		res_fd = open(devname, O_RDWR);
+		if (res_fd < 0)
+			goto fail;
+		map_addr = mmap(NULL, pci->mem_resource[i].len,
+				PROT_READ | PROT_WRITE,
+				MAP_SHARED, res_fd, 0);
+		if (map_addr == MAP_FAILED)
+			goto fail;
+
+		pci->mem_resource[i].addr = map_addr;
+	}
+
+	/* device is valid, add in list */
+	if (ccp_add_device(ccp_dev, ccp_type)) {
+		ccp_remove_device(ccp_dev);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	CCP_LOG_ERR("CCP Device probe failed");
+	if (uio_fd > 0)
+		close(uio_fd);
+	if (ccp_dev)
+		rte_free(ccp_dev);
+	return -1;
+}
+
+int
+ccp_probe_devices(const struct rte_pci_id *ccp_id)
+{
+	int dev_cnt = 0;
+	int ccp_type = 0;
+	struct dirent *d;
+	DIR *dir;
+	int ret = 0;
+	int module_idx = 0;
+	uint16_t domain;
+	uint8_t bus, devid, function;
+	char dirname[PATH_MAX];
+
+	module_idx = ccp_check_pci_uio_module();
+	if (module_idx < 0)
+		return -1;
+
+	TAILQ_INIT(&ccp_list);
+	dir = opendir(SYSFS_PCI_DEVICES);
+	if (dir == NULL)
+		return -1;
+	while ((d = readdir(dir)) != NULL) {
+		if (d->d_name[0] == '.')
+			continue;
+		if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name),
+					&domain, &bus, &devid, &function) != 0)
+			continue;
+		snprintf(dirname, sizeof(dirname), "%s/%s",
+			     SYSFS_PCI_DEVICES, d->d_name);
+		if (is_ccp_device(dirname, ccp_id, &ccp_type)) {
+			printf("CCP : Detected CCP device with ID = 0x%x\n",
+			       ccp_id[ccp_type].device_id);
+			ret = ccp_probe_device(dirname, domain, bus, devid,
+					       function, ccp_type);
+			if (ret == 0)
+				dev_cnt++;
+		}
+	}
+	closedir(dir);
+	return dev_cnt;
+}
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
new file mode 100644
index 0000000..f594e91
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -0,0 +1,284 @@
+/*   SPDX-License-Identifier: BSD-3-Clause
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_DEV_H_
+#define _CCP_DEV_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_bus_pci.h>
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+/**< CCP sspecific */
+#define MAX_HW_QUEUES                   5
+
+/**< CCP Register Mappings */
+#define Q_MASK_REG                      0x000
+#define TRNG_OUT_REG                    0x00c
+
+/* CCP Version 5 Specifics */
+#define CMD_QUEUE_MASK_OFFSET		0x00
+#define	CMD_QUEUE_PRIO_OFFSET		0x04
+#define CMD_REQID_CONFIG_OFFSET		0x08
+#define	CMD_CMD_TIMEOUT_OFFSET		0x10
+#define LSB_PUBLIC_MASK_LO_OFFSET	0x18
+#define LSB_PUBLIC_MASK_HI_OFFSET	0x1C
+#define LSB_PRIVATE_MASK_LO_OFFSET	0x20
+#define LSB_PRIVATE_MASK_HI_OFFSET	0x24
+
+#define CMD_Q_CONTROL_BASE		0x0000
+#define CMD_Q_TAIL_LO_BASE		0x0004
+#define CMD_Q_HEAD_LO_BASE		0x0008
+#define CMD_Q_INT_ENABLE_BASE		0x000C
+#define CMD_Q_INTERRUPT_STATUS_BASE	0x0010
+
+#define CMD_Q_STATUS_BASE		0x0100
+#define CMD_Q_INT_STATUS_BASE		0x0104
+
+#define	CMD_CONFIG_0_OFFSET		0x6000
+#define	CMD_TRNG_CTL_OFFSET		0x6008
+#define	CMD_AES_MASK_OFFSET		0x6010
+#define	CMD_CLK_GATE_CTL_OFFSET		0x603C
+
+/* Address offset between two virtual queue registers */
+#define CMD_Q_STATUS_INCR		0x1000
+
+/* Bit masks */
+#define CMD_Q_RUN			0x1
+#define CMD_Q_SIZE			0x1F
+#define CMD_Q_SHIFT			3
+#define COMMANDS_PER_QUEUE		2048
+
+#define QUEUE_SIZE_VAL                  ((ffs(COMMANDS_PER_QUEUE) - 2) & \
+					 CMD_Q_SIZE)
+#define Q_DESC_SIZE                     sizeof(struct ccp_desc)
+#define Q_SIZE(n)                       (COMMANDS_PER_QUEUE*(n))
+
+#define INT_COMPLETION                  0x1
+#define INT_ERROR                       0x2
+#define INT_QUEUE_STOPPED               0x4
+#define ALL_INTERRUPTS                  (INT_COMPLETION| \
+					 INT_ERROR| \
+					 INT_QUEUE_STOPPED)
+
+#define LSB_REGION_WIDTH                5
+#define MAX_LSB_CNT                     8
+
+#define LSB_SIZE                        16
+#define LSB_ITEM_SIZE                   32
+#define SLSB_MAP_SIZE                   (MAX_LSB_CNT * LSB_SIZE)
+
+/* bitmap */
+enum {
+	BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT
+};
+
+#define WORD_OFFSET(b) ((b) / BITS_PER_WORD)
+#define BIT_OFFSET(b)  ((b) % BITS_PER_WORD)
+
+#define CCP_DIV_ROUND_UP(n, d)  (((n) + (d) - 1) / (d))
+#define CCP_BITMAP_SIZE(nr) \
+	CCP_DIV_ROUND_UP(nr, CHAR_BIT * sizeof(unsigned long))
+
+#define CCP_BITMAP_FIRST_WORD_MASK(start) \
+	(~0UL << ((start) & (BITS_PER_WORD - 1)))
+#define CCP_BITMAP_LAST_WORD_MASK(nbits) \
+	(~0UL >> (-(nbits) & (BITS_PER_WORD - 1)))
+
+#define __ccp_round_mask(x, y) ((typeof(x))((y)-1))
+#define ccp_round_down(x, y) ((x) & ~__ccp_round_mask(x, y))
+
+/** CCP registers Write/Read */
+
+static inline void ccp_pci_reg_write(void *base, int offset,
+				     uint32_t value)
+{
+	volatile void *reg_addr = ((uint8_t *)base + offset);
+
+	rte_write32((rte_cpu_to_le_32(value)), reg_addr);
+}
+
+static inline uint32_t ccp_pci_reg_read(void *base, int offset)
+{
+	volatile void *reg_addr = ((uint8_t *)base + offset);
+
+	return rte_le_to_cpu_32(rte_read32(reg_addr));
+}
+
+#define CCP_READ_REG(hw_addr, reg_offset) \
+	ccp_pci_reg_read(hw_addr, reg_offset)
+
+#define CCP_WRITE_REG(hw_addr, reg_offset, value) \
+	ccp_pci_reg_write(hw_addr, reg_offset, value)
+
+TAILQ_HEAD(ccp_list, ccp_device);
+
+extern struct ccp_list ccp_list;
+
+/**
+ * CCP device version
+ */
+enum ccp_device_version {
+	CCP_VERSION_5A = 0,
+	CCP_VERSION_5B,
+};
+
+/**
+ * A structure describing a CCP command queue.
+ */
+struct ccp_queue {
+	struct ccp_device *dev;
+	char memz_name[RTE_MEMZONE_NAMESIZE];
+
+	rte_atomic64_t free_slots;
+	/**< available free slots updated from enq/deq calls */
+
+	/* Queue identifier */
+	uint64_t id;	/**< queue id */
+	uint64_t qidx;	/**< queue index */
+	uint64_t qsize;	/**< queue size */
+
+	/* Queue address */
+	struct ccp_desc *qbase_desc;
+	void *qbase_addr;
+	phys_addr_t qbase_phys_addr;
+	/**< queue-page registers addr */
+	void *reg_base;
+
+	uint32_t qcontrol;
+	/**< queue ctrl reg */
+
+	int lsb;
+	/**< lsb region assigned to queue */
+	unsigned long lsbmask;
+	/**< lsb regions queue can access */
+	unsigned long lsbmap[CCP_BITMAP_SIZE(LSB_SIZE)];
+	/**< all lsb resources which queue is using */
+	uint32_t sb_key;
+	/**< lsb assigned for queue */
+	uint32_t sb_iv;
+	/**< lsb assigned for iv */
+	uint32_t sb_sha;
+	/**< lsb assigned for sha ctx */
+	uint32_t sb_hmac;
+	/**< lsb assigned for hmac ctx */
+} ____cacheline_aligned;
+
+/**
+ * A structure describing a CCP device.
+ */
+struct ccp_device {
+	TAILQ_ENTRY(ccp_device) next;
+	int id;
+	/**< ccp dev id on platform */
+	struct ccp_queue cmd_q[MAX_HW_QUEUES];
+	/**< ccp queue */
+	int cmd_q_count;
+	/**< no. of ccp Queues */
+	struct rte_pci_device pci;
+	/**< ccp pci identifier */
+	unsigned long lsbmap[CCP_BITMAP_SIZE(SLSB_MAP_SIZE)];
+	/**< shared lsb mask of ccp */
+	rte_spinlock_t lsb_lock;
+	/**< protection for shared lsb region allocation */
+	int qidx;
+	/**< current queue index */
+} __rte_cache_aligned;
+
+/**
+ * descriptor for version 5 CPP commands
+ * 8 32-bit words:
+ * word 0: function; engine; control bits
+ * word 1: length of source data
+ * word 2: low 32 bits of source pointer
+ * word 3: upper 16 bits of source pointer; source memory type
+ * word 4: low 32 bits of destination pointer
+ * word 5: upper 16 bits of destination pointer; destination memory
+ * type
+ * word 6: low 32 bits of key pointer
+ * word 7: upper 16 bits of key pointer; key memory type
+ */
+struct dword0 {
+	uint32_t soc:1;
+	uint32_t ioc:1;
+	uint32_t rsvd1:1;
+	uint32_t init:1;
+	uint32_t eom:1;
+	uint32_t function:15;
+	uint32_t engine:4;
+	uint32_t prot:1;
+	uint32_t rsvd2:7;
+};
+
+struct dword3 {
+	uint32_t src_hi:16;
+	uint32_t src_mem:2;
+	uint32_t lsb_cxt_id:8;
+	uint32_t rsvd1:5;
+	uint32_t fixed:1;
+};
+
+union dword4 {
+	uint32_t dst_lo;	/* NON-SHA */
+	uint32_t sha_len_lo;	/* SHA */
+};
+
+union dword5 {
+	struct {
+		uint32_t dst_hi:16;
+		uint32_t dst_mem:2;
+		uint32_t rsvd1:13;
+		uint32_t fixed:1;
+	}
+	fields;
+	uint32_t sha_len_hi;
+};
+
+struct dword7 {
+	uint32_t key_hi:16;
+	uint32_t key_mem:2;
+	uint32_t rsvd1:14;
+};
+
+struct ccp_desc {
+	struct dword0 dw0;
+	uint32_t length;
+	uint32_t src_lo;
+	struct dword3 dw3;
+	union dword4 dw4;
+	union dword5 dw5;
+	uint32_t key_lo;
+	struct dword7 dw7;
+};
+
+static inline uint32_t
+low32_value(unsigned long addr)
+{
+	return ((uint64_t)addr) & 0x0ffffffff;
+}
+
+static inline uint32_t
+high32_value(unsigned long addr)
+{
+	return ((uint64_t)addr >> 32) & 0x00000ffff;
+}
+
+/**
+ * Detect ccp platform and initialize all ccp devices
+ *
+ * @param ccp_id rte_pci_id list for supported CCP devices
+ * @return no. of successfully initialized CCP devices
+ */
+int ccp_probe_devices(const struct rte_pci_id *ccp_id);
+
+#endif /* _CCP_DEV_H_ */
diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c
new file mode 100644
index 0000000..59152ca
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pci.c
@@ -0,0 +1,236 @@
+/*   SPDX-License-Identifier: BSD-3-Clause
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_string_fns.h>
+
+#include "ccp_pci.h"
+
+static const char * const uio_module_names[] = {
+	"igb_uio",
+	"uio_pci_generic",
+};
+
+int
+ccp_check_pci_uio_module(void)
+{
+	FILE *fp;
+	int i;
+	char buf[BUFSIZ];
+
+	fp = fopen(PROC_MODULES, "r");
+	if (fp == NULL)
+		return -1;
+	i = 0;
+	while (uio_module_names[i] != NULL) {
+		while (fgets(buf, sizeof(buf), fp) != NULL) {
+			if (!strncmp(buf, uio_module_names[i],
+				     strlen(uio_module_names[i])))
+				return i;
+		}
+		i++;
+		rewind(fp);
+	}
+	printf("Insert igb_uio or uio_pci_generic kernel module(s)");
+	return -1;/* uio not inserted */
+}
+
+/*
+ * split up a pci address into its constituent parts.
+ */
+int
+ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+			  uint8_t *bus, uint8_t *devid, uint8_t *function)
+{
+	/* first split on ':' */
+	union splitaddr {
+		struct {
+			char *domain;
+			char *bus;
+			char *devid;
+			char *function;
+		};
+		char *str[PCI_FMT_NVAL];
+		/* last element-separator is "." not ":" */
+	} splitaddr;
+
+	char *buf_copy = strndup(buf, bufsize);
+
+	if (buf_copy == NULL)
+		return -1;
+
+	if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':')
+			!= PCI_FMT_NVAL - 1)
+		goto error;
+	/* final split is on '.' between devid and function */
+	splitaddr.function = strchr(splitaddr.devid, '.');
+	if (splitaddr.function == NULL)
+		goto error;
+	*splitaddr.function++ = '\0';
+
+	/* now convert to int values */
+	errno = 0;
+	*domain = (uint8_t)strtoul(splitaddr.domain, NULL, 16);
+	*bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16);
+	*devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16);
+	*function = (uint8_t)strtoul(splitaddr.function, NULL, 10);
+	if (errno != 0)
+		goto error;
+
+	free(buf_copy); /* free the copy made with strdup */
+	return 0;
+error:
+	free(buf_copy);
+	return -1;
+}
+
+int
+ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val)
+{
+	FILE *f;
+	char buf[BUFSIZ];
+	char *end = NULL;
+
+	f = fopen(filename, "r");
+	if (f == NULL)
+		return -1;
+	if (fgets(buf, sizeof(buf), f) == NULL) {
+		fclose(f);
+		return -1;
+	}
+	*val = strtoul(buf, &end, 0);
+	if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
+		fclose(f);
+		return -1;
+	}
+	fclose(f);
+	return 0;
+}
+
+/** IO resource type: */
+#define IORESOURCE_IO         0x00000100
+#define IORESOURCE_MEM        0x00000200
+
+/* parse one line of the "resource" sysfs file (note that the 'line'
+ * string is modified)
+ */
+static int
+ccp_pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr,
+				 uint64_t *end_addr, uint64_t *flags)
+{
+	union pci_resource_info {
+		struct {
+			char *phys_addr;
+			char *end_addr;
+			char *flags;
+		};
+		char *ptrs[PCI_RESOURCE_FMT_NVAL];
+	} res_info;
+
+	if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3)
+		return -1;
+	errno = 0;
+	*phys_addr = strtoull(res_info.phys_addr, NULL, 16);
+	*end_addr = strtoull(res_info.end_addr, NULL, 16);
+	*flags = strtoull(res_info.flags, NULL, 16);
+	if (errno != 0)
+		return -1;
+
+	return 0;
+}
+
+/* parse the "resource" sysfs file */
+int
+ccp_pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
+{
+	FILE *fp;
+	char buf[BUFSIZ];
+	int i;
+	uint64_t phys_addr, end_addr, flags;
+
+	fp = fopen(filename, "r");
+	if (fp == NULL)
+		return -1;
+
+	for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+		if (fgets(buf, sizeof(buf), fp) == NULL)
+			goto error;
+		if (ccp_pci_parse_one_sysfs_resource(buf, sizeof(buf),
+				&phys_addr, &end_addr, &flags) < 0)
+			goto error;
+
+		if (flags & IORESOURCE_MEM) {
+			dev->mem_resource[i].phys_addr = phys_addr;
+			dev->mem_resource[i].len = end_addr - phys_addr + 1;
+			/* not mapped for now */
+			dev->mem_resource[i].addr = NULL;
+		}
+	}
+	fclose(fp);
+	return 0;
+
+error:
+	fclose(fp);
+	return -1;
+}
+
+int
+ccp_find_uio_devname(const char *dirname)
+{
+
+	DIR *dir;
+	struct dirent *e;
+	char dirname_uio[PATH_MAX];
+	unsigned int uio_num;
+	int ret = -1;
+
+	/* depending on kernel version, uio can be located in uio/uioX
+	 * or uio:uioX
+	 */
+	snprintf(dirname_uio, sizeof(dirname_uio), "%s/uio", dirname);
+	dir = opendir(dirname_uio);
+	if (dir == NULL) {
+	/* retry with the parent directory might be different kernel version*/
+		dir = opendir(dirname);
+		if (dir == NULL)
+			return -1;
+	}
+
+	/* take the first file starting with "uio" */
+	while ((e = readdir(dir)) != NULL) {
+		/* format could be uio%d ...*/
+		int shortprefix_len = sizeof("uio") - 1;
+		/* ... or uio:uio%d */
+		int longprefix_len = sizeof("uio:uio") - 1;
+		char *endptr;
+
+		if (strncmp(e->d_name, "uio", 3) != 0)
+			continue;
+
+		/* first try uio%d */
+		errno = 0;
+		uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
+		if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
+			ret = uio_num;
+			break;
+		}
+
+		/* then try uio:uio%d */
+		errno = 0;
+		uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
+		if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
+			ret = uio_num;
+			break;
+		}
+	}
+	closedir(dir);
+	return ret;
+
+
+}
diff --git a/drivers/crypto/ccp/ccp_pci.h b/drivers/crypto/ccp/ccp_pci.h
new file mode 100644
index 0000000..7ed3bac
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pci.h
@@ -0,0 +1,27 @@
+/*   SPDX-License-Identifier: BSD-3-Clause
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_PCI_H_
+#define _CCP_PCI_H_
+
+#include <stdint.h>
+
+#include <rte_bus_pci.h>
+
+#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
+#define PROC_MODULES "/proc/modules"
+
+int ccp_check_pci_uio_module(void);
+
+int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+			      uint8_t *bus, uint8_t *devid, uint8_t *function);
+
+int ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val);
+
+int ccp_pci_parse_sysfs_resource(const char *filename,
+				 struct rte_pci_device *dev);
+
+int ccp_find_uio_devname(const char *dirname);
+
+#endif /* _CCP_PCI_H_ */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
new file mode 100644
index 0000000..099eb21
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -0,0 +1,29 @@
+/*   SPDX-License-Identifier: BSD-3-Clause
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <rte_cryptodev_pmd.h>
+
+struct rte_cryptodev_ops ccp_ops = {
+		.dev_configure		= NULL,
+		.dev_start		= NULL,
+		.dev_stop		= NULL,
+		.dev_close		= NULL,
+
+		.stats_get		= NULL,
+		.stats_reset		= NULL,
+
+		.dev_infos_get		= NULL,
+
+		.queue_pair_setup	= NULL,
+		.queue_pair_release	= NULL,
+		.queue_pair_start	= NULL,
+		.queue_pair_stop	= NULL,
+		.queue_pair_count	= NULL,
+
+		.session_get_size	= NULL,
+		.session_configure	= NULL,
+		.session_clear		= NULL,
+};
+
+struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops;
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
new file mode 100644
index 0000000..0da9b92
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -0,0 +1,56 @@
+/*   SPDX-License-Identifier: BSD-3-Clause
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_PMD_PRIVATE_H_
+#define _CCP_PMD_PRIVATE_H_
+
+#include <rte_cryptodev.h>
+
+#define CRYPTODEV_NAME_CCP_PMD crypto_ccp
+
+#define CCP_LOG_ERR(fmt, args...) \
+	RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",  \
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+			__func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_CCP_DEBUG
+#define CCP_LOG_INFO(fmt, args...) \
+	RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+			__func__, __LINE__, ## args)
+
+#define CCP_LOG_DBG(fmt, args...) \
+	RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+			__func__, __LINE__, ## args)
+#else
+#define CCP_LOG_INFO(fmt, args...)
+#define CCP_LOG_DBG(fmt, args...)
+#endif
+
+/**< Maximum queue pairs supported by CCP PMD */
+#define CCP_PMD_MAX_QUEUE_PAIRS	1
+#define CCP_NB_MAX_DESCRIPTORS 1024
+#define CCP_MAX_BURST 64
+
+/* private data structure for each CCP crypto device */
+struct ccp_private {
+	unsigned int max_nb_qpairs;	/**< Max number of queue pairs */
+	unsigned int max_nb_sessions;	/**< Max number of sessions */
+	uint8_t crypto_num_dev;		/**< Number of working crypto devices */
+};
+
+/**< device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *ccp_pmd_ops;
+
+uint16_t
+ccp_cpu_pmd_enqueue_burst(void *queue_pair,
+			  struct rte_crypto_op **ops,
+			  uint16_t nb_ops);
+uint16_t
+ccp_cpu_pmd_dequeue_burst(void *queue_pair,
+			  struct rte_crypto_op **ops,
+			  uint16_t nb_ops);
+
+#endif /* _CCP_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index 71e7023..ce64dbc 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -2,23 +2,170 @@
  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
  */
 
+#include <rte_bus_pci.h>
 #include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_config.h>
 #include <rte_cryptodev.h>
 #include <rte_cryptodev_pmd.h>
+#include <rte_pci.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
 
+#include "ccp_dev.h"
+#include "ccp_pmd_private.h"
+
+/**
+ * Global static parameter used to find if CCP device is already initialized.
+ */
+static unsigned int ccp_pmd_init_done;
 uint8_t ccp_cryptodev_driver_id;
 
+static uint16_t
+ccp_pmd_enqueue_burst(void *queue_pair __rte_unused,
+		      struct rte_crypto_op **ops __rte_unused,
+		      uint16_t nb_ops __rte_unused)
+{
+	uint16_t enq_cnt = 0;
+
+	return enq_cnt;
+}
+
+static uint16_t
+ccp_pmd_dequeue_burst(void *queue_pair __rte_unused,
+		      struct rte_crypto_op **ops __rte_unused,
+		      uint16_t nb_ops __rte_unused)
+{
+	uint16_t nb_dequeued = 0;
+
+	return nb_dequeued;
+}
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id ccp_pci_id[] = {
+	{
+		RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */
+	},
+	{
+		RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */
+	},
+	{.device_id = 0},
+};
+
 /** Remove ccp pmd */
 static int
-cryptodev_ccp_remove(struct rte_vdev_device *dev __rte_unused)
+cryptodev_ccp_remove(struct rte_vdev_device *dev)
 {
+	const char *name;
+
+	ccp_pmd_init_done = 0;
+	name = rte_vdev_device_name(dev);
+	if (name == NULL)
+		return -EINVAL;
+
+	RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n",
+			name, rte_socket_id());
+
 	return 0;
 }
 
+/** Create crypto device */
+static int
+cryptodev_ccp_create(const char *name,
+		     struct rte_vdev_device *vdev,
+		     struct rte_cryptodev_pmd_init_params *init_params)
+{
+	struct rte_cryptodev *dev;
+	struct ccp_private *internals;
+	uint8_t cryptodev_cnt = 0;
+
+	if (init_params->name[0] == '\0')
+		snprintf(init_params->name, sizeof(init_params->name),
+				"%s", name);
+
+	dev = rte_cryptodev_pmd_create(init_params->name,
+				       &vdev->device,
+				       init_params);
+	if (dev == NULL) {
+		CCP_LOG_ERR("failed to create cryptodev vdev");
+		goto init_error;
+	}
+
+	cryptodev_cnt = ccp_probe_devices(ccp_pci_id);
+
+	if (cryptodev_cnt == 0) {
+		CCP_LOG_ERR("failed to detect CCP crypto device");
+		goto init_error;
+	}
+
+	printf("CCP : Crypto device count = %d\n", cryptodev_cnt);
+	dev->driver_id = ccp_cryptodev_driver_id;
+
+	/* register rx/tx burst functions for data path */
+	dev->dev_ops = ccp_pmd_ops;
+	dev->enqueue_burst = ccp_pmd_enqueue_burst;
+	dev->dequeue_burst = ccp_pmd_dequeue_burst;
+
+	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+			RTE_CRYPTODEV_FF_HW_ACCELERATED |
+			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+	internals = dev->data->dev_private;
+
+	internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+	internals->max_nb_sessions = init_params->max_nb_sessions;
+	internals->crypto_num_dev = cryptodev_cnt;
+
+	return 0;
+
+init_error:
+	CCP_LOG_ERR("driver %s: %s() failed",
+		    init_params->name, __func__);
+	cryptodev_ccp_remove(vdev);
+
+	return -EFAULT;
+}
+
 /** Probe ccp pmd */
 static int
-cryptodev_ccp_probe(struct rte_vdev_device *vdev __rte_unused)
+cryptodev_ccp_probe(struct rte_vdev_device *vdev)
 {
+	int rc = 0;
+	const char *name;
+	struct rte_cryptodev_pmd_init_params init_params = {
+		"",
+		sizeof(struct ccp_private),
+		rte_socket_id(),
+		CCP_PMD_MAX_QUEUE_PAIRS,
+		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+	};
+	const char *input_args;
+
+	if (ccp_pmd_init_done) {
+		RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
+		return -EFAULT;
+	}
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	input_args = rte_vdev_device_args(vdev);
+	rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
+	init_params.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS;
+
+	RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+			init_params.socket_id);
+	RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n",
+			init_params.max_nb_queue_pairs);
+	RTE_LOG(INFO, PMD, "Max number of sessions = %d\n",
+			init_params.max_nb_sessions);
+
+	rc = cryptodev_ccp_create(name, vdev, &init_params);
+	if (rc)
+		return rc;
+	ccp_pmd_init_done = 1;
 	return 0;
 }
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v5 03/19] crypto/ccp: support basic pmd ops
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
  2018-03-19 12:23         ` [PATCH v5 02/19] crypto/ccp: support ccp device initialization and deintialization Ravi Kumar
@ 2018-03-19 12:23         ` Ravi Kumar
  2018-03-19 12:23         ` [PATCH v5 04/19] crypto/ccp: support session related crypto " Ravi Kumar
                           ` (17 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-19 12:23 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_dev.c         |  9 ++++++
 drivers/crypto/ccp/ccp_dev.h         |  9 ++++++
 drivers/crypto/ccp/ccp_pmd_ops.c     | 61 +++++++++++++++++++++++++++++++++---
 drivers/crypto/ccp/ccp_pmd_private.h | 43 +++++++++++++++++++++++++
 4 files changed, 117 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index 42cff75..546f0a7 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -26,6 +26,15 @@
 struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
 static int ccp_dev_id;
 
+int
+ccp_dev_start(struct rte_cryptodev *dev)
+{
+	struct ccp_private *priv = dev->data->dev_private;
+
+	priv->last_dev = TAILQ_FIRST(&ccp_list);
+	return 0;
+}
+
 static const struct rte_memzone *
 ccp_queue_dma_zone_reserve(const char *queue_name,
 			   uint32_t queue_size,
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index f594e91..c360fe0 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -78,6 +78,10 @@
 #define LSB_ITEM_SIZE                   32
 #define SLSB_MAP_SIZE                   (MAX_LSB_CNT * LSB_SIZE)
 
+/* General CCP Defines */
+
+#define CCP_SB_BYTES                    32
+
 /* bitmap */
 enum {
 	BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT
@@ -273,6 +277,11 @@ high32_value(unsigned long addr)
 	return ((uint64_t)addr >> 32) & 0x00000ffff;
 }
 
+/*
+ * Start CCP device
+ */
+int ccp_dev_start(struct rte_cryptodev *dev);
+
 /**
  * Detect ccp platform and initialize all ccp devices
  *
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 099eb21..99b8ca5 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -2,18 +2,69 @@
  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
  */
 
+#include <string.h>
+
+#include <rte_common.h>
 #include <rte_cryptodev_pmd.h>
+#include <rte_malloc.h>
+
+#include "ccp_pmd_private.h"
+#include "ccp_dev.h"
+
+static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+ccp_pmd_config(struct rte_cryptodev *dev __rte_unused,
+	       struct rte_cryptodev_config *config __rte_unused)
+{
+	return 0;
+}
+
+static int
+ccp_pmd_start(struct rte_cryptodev *dev)
+{
+	return ccp_dev_start(dev);
+}
+
+static void
+ccp_pmd_stop(struct rte_cryptodev *dev __rte_unused)
+{
+
+}
+
+static int
+ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
+{
+	return 0;
+}
+
+static void
+ccp_pmd_info_get(struct rte_cryptodev *dev,
+		 struct rte_cryptodev_info *dev_info)
+{
+	struct ccp_private *internals = dev->data->dev_private;
+
+	if (dev_info != NULL) {
+		dev_info->driver_id = dev->driver_id;
+		dev_info->feature_flags = dev->feature_flags;
+		dev_info->capabilities = ccp_pmd_capabilities;
+		dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+		dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+	}
+}
 
 struct rte_cryptodev_ops ccp_ops = {
-		.dev_configure		= NULL,
-		.dev_start		= NULL,
-		.dev_stop		= NULL,
-		.dev_close		= NULL,
+		.dev_configure		= ccp_pmd_config,
+		.dev_start		= ccp_pmd_start,
+		.dev_stop		= ccp_pmd_stop,
+		.dev_close		= ccp_pmd_close,
 
 		.stats_get		= NULL,
 		.stats_reset		= NULL,
 
-		.dev_infos_get		= NULL,
+		.dev_infos_get		= ccp_pmd_info_get,
 
 		.queue_pair_setup	= NULL,
 		.queue_pair_release	= NULL,
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
index 0da9b92..47c4fb2 100644
--- a/drivers/crypto/ccp/ccp_pmd_private.h
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -34,13 +34,56 @@
 #define CCP_NB_MAX_DESCRIPTORS 1024
 #define CCP_MAX_BURST 64
 
+#include "ccp_dev.h"
+
 /* private data structure for each CCP crypto device */
 struct ccp_private {
 	unsigned int max_nb_qpairs;	/**< Max number of queue pairs */
 	unsigned int max_nb_sessions;	/**< Max number of sessions */
 	uint8_t crypto_num_dev;		/**< Number of working crypto devices */
+	struct ccp_device *last_dev;	/**< Last working crypto device */
 };
 
+/* CCP batch info */
+struct ccp_batch_info {
+	struct rte_crypto_op *op[CCP_MAX_BURST];
+	/**< optable populated at enque time from app*/
+	int op_idx;
+	struct ccp_queue *cmd_q;
+	uint16_t opcnt;
+	/**< no. of crypto ops in batch*/
+	int desccnt;
+	/**< no. of ccp queue descriptors*/
+	uint32_t head_offset;
+	/**< ccp queue head tail offsets time of enqueue*/
+	uint32_t tail_offset;
+	uint8_t lsb_buf[CCP_SB_BYTES * CCP_MAX_BURST];
+	phys_addr_t lsb_buf_phys;
+	/**< LSB intermediate buf for passthru */
+	int lsb_buf_idx;
+} __rte_cache_aligned;
+
+/**< CCP crypto queue pair */
+struct ccp_qp {
+	uint16_t id;
+	/**< Queue Pair Identifier */
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	/**< Unique Queue Pair Name */
+	struct rte_ring *processed_pkts;
+	/**< Ring for placing process packets */
+	struct rte_mempool *sess_mp;
+	/**< Session Mempool */
+	struct rte_mempool *batch_mp;
+	/**< Session Mempool for batch info */
+	struct rte_cryptodev_stats qp_stats;
+	/**< Queue pair statistics */
+	struct ccp_batch_info *b_info;
+	/**< Store ops pulled out of queue */
+	struct rte_cryptodev *dev;
+	/**< rte crypto device to which this qp belongs */
+} __rte_cache_aligned;
+
+
 /**< device specific operations function pointer structure */
 extern struct rte_cryptodev_ops *ccp_pmd_ops;
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v5 04/19] crypto/ccp: support session related crypto pmd ops
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
  2018-03-19 12:23         ` [PATCH v5 02/19] crypto/ccp: support ccp device initialization and deintialization Ravi Kumar
  2018-03-19 12:23         ` [PATCH v5 03/19] crypto/ccp: support basic pmd ops Ravi Kumar
@ 2018-03-19 12:23         ` Ravi Kumar
  2018-03-19 12:23         ` [PATCH v5 05/19] crypto/ccp: support queue pair related " Ravi Kumar
                           ` (16 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-19 12:23 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/Makefile      |   3 +-
 drivers/crypto/ccp/ccp_crypto.c  | 203 +++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_crypto.h  | 241 +++++++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_dev.h     | 129 +++++++++++++++++++++
 drivers/crypto/ccp/ccp_pmd_ops.c |  61 +++++++++-
 5 files changed, 633 insertions(+), 4 deletions(-)
 create mode 100644 drivers/crypto/ccp/ccp_crypto.c
 create mode 100644 drivers/crypto/ccp/ccp_crypto.h

diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index f121712..3f8330d 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -25,8 +25,9 @@ EXPORT_MAP := rte_pmd_ccp_version.map
 
 # library source files
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_crypto.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
new file mode 100644
index 0000000..8bf4ce1
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -0,0 +1,203 @@
+/*   SPDX-License-Identifier: BSD-3-Clause
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "ccp_dev.h"
+#include "ccp_crypto.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+static enum ccp_cmd_order
+ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+{
+	enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
+
+	if (xform == NULL)
+		return res;
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (xform->next == NULL)
+			return CCP_CMD_AUTH;
+		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+			return CCP_CMD_HASH_CIPHER;
+	}
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (xform->next == NULL)
+			return CCP_CMD_CIPHER;
+		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+			return CCP_CMD_CIPHER_HASH;
+	}
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+		return CCP_CMD_COMBINED;
+	return res;
+}
+
+/* configure session */
+static int
+ccp_configure_session_cipher(struct ccp_session *sess,
+			     const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+
+	cipher_xform = &xform->cipher;
+
+	/* set cipher direction */
+	if (cipher_xform->op ==  RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+		sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+	else
+		sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+
+	/* set cipher key */
+	sess->cipher.key_length = cipher_xform->key.length;
+	rte_memcpy(sess->cipher.key, cipher_xform->key.data,
+		   cipher_xform->key.length);
+
+	/* set iv parameters */
+	sess->iv.offset = cipher_xform->iv.offset;
+	sess->iv.length = cipher_xform->iv.length;
+
+	switch (cipher_xform->algo) {
+	default:
+		CCP_LOG_ERR("Unsupported cipher algo");
+		return -1;
+	}
+
+
+	switch (sess->cipher.engine) {
+	default:
+		CCP_LOG_ERR("Invalid CCP Engine");
+		return -ENOTSUP;
+	}
+	return 0;
+}
+
+static int
+ccp_configure_session_auth(struct ccp_session *sess,
+			   const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_auth_xform *auth_xform = NULL;
+
+	auth_xform = &xform->auth;
+
+	sess->auth.digest_length = auth_xform->digest_length;
+	if (auth_xform->op ==  RTE_CRYPTO_AUTH_OP_GENERATE)
+		sess->auth.op = CCP_AUTH_OP_GENERATE;
+	else
+		sess->auth.op = CCP_AUTH_OP_VERIFY;
+	switch (auth_xform->algo) {
+	default:
+		CCP_LOG_ERR("Unsupported hash algo");
+		return -ENOTSUP;
+	}
+	return 0;
+}
+
+static int
+ccp_configure_session_aead(struct ccp_session *sess,
+			   const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_aead_xform *aead_xform = NULL;
+
+	aead_xform = &xform->aead;
+
+	sess->cipher.key_length = aead_xform->key.length;
+	rte_memcpy(sess->cipher.key, aead_xform->key.data,
+		   aead_xform->key.length);
+
+	if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+		sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+		sess->auth.op = CCP_AUTH_OP_GENERATE;
+	} else {
+		sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+		sess->auth.op = CCP_AUTH_OP_VERIFY;
+	}
+	sess->auth.aad_length = aead_xform->aad_length;
+	sess->auth.digest_length = aead_xform->digest_length;
+
+	/* set iv parameters */
+	sess->iv.offset = aead_xform->iv.offset;
+	sess->iv.length = aead_xform->iv.length;
+
+	switch (aead_xform->algo) {
+	default:
+		CCP_LOG_ERR("Unsupported aead algo");
+		return -ENOTSUP;
+	}
+	return 0;
+}
+
+int
+ccp_set_session_parameters(struct ccp_session *sess,
+			   const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_sym_xform *cipher_xform = NULL;
+	const struct rte_crypto_sym_xform *auth_xform = NULL;
+	const struct rte_crypto_sym_xform *aead_xform = NULL;
+	int ret = 0;
+
+	sess->cmd_id = ccp_get_cmd_id(xform);
+
+	switch (sess->cmd_id) {
+	case CCP_CMD_CIPHER:
+		cipher_xform = xform;
+		break;
+	case CCP_CMD_AUTH:
+		auth_xform = xform;
+		break;
+	case CCP_CMD_CIPHER_HASH:
+		cipher_xform = xform;
+		auth_xform = xform->next;
+		break;
+	case CCP_CMD_HASH_CIPHER:
+		auth_xform = xform;
+		cipher_xform = xform->next;
+		break;
+	case CCP_CMD_COMBINED:
+		aead_xform = xform;
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported cmd_id");
+		return -1;
+	}
+
+	/* Default IV length = 0 */
+	sess->iv.length = 0;
+	if (cipher_xform) {
+		ret = ccp_configure_session_cipher(sess, cipher_xform);
+		if (ret != 0) {
+			CCP_LOG_ERR("Invalid/unsupported cipher parameters");
+			return ret;
+		}
+	}
+	if (auth_xform) {
+		ret = ccp_configure_session_auth(sess, auth_xform);
+		if (ret != 0) {
+			CCP_LOG_ERR("Invalid/unsupported auth parameters");
+			return ret;
+		}
+	}
+	if (aead_xform) {
+		ret = ccp_configure_session_aead(sess, aead_xform);
+		if (ret != 0) {
+			CCP_LOG_ERR("Invalid/unsupported aead parameters");
+			return ret;
+		}
+	}
+	return ret;
+}
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
new file mode 100644
index 0000000..8d9de07
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -0,0 +1,241 @@
+/*   SPDX-License-Identifier: BSD-3-Clause
+ *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_CRYPTO_H_
+#define _CCP_CRYPTO_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+#include "ccp_dev.h"
+
+#define CCP_SHA3_CTX_SIZE 200
+/**
+ * CCP supported AES modes
+ */
+enum ccp_aes_mode {
+	CCP_AES_MODE_ECB = 0,
+	CCP_AES_MODE_CBC,
+	CCP_AES_MODE_OFB,
+	CCP_AES_MODE_CFB,
+	CCP_AES_MODE_CTR,
+	CCP_AES_MODE_CMAC,
+	CCP_AES_MODE_GHASH,
+	CCP_AES_MODE_GCTR,
+	CCP_AES_MODE__LAST,
+};
+
+/**
+ * CCP AES GHASH mode
+ */
+enum ccp_aes_ghash_mode {
+	CCP_AES_MODE_GHASH_AAD = 0,
+	CCP_AES_MODE_GHASH_FINAL
+};
+
+/**
+ * CCP supported AES types
+ */
+enum ccp_aes_type {
+	CCP_AES_TYPE_128 = 0,
+	CCP_AES_TYPE_192,
+	CCP_AES_TYPE_256,
+	CCP_AES_TYPE__LAST,
+};
+
+/***** 3DES engine *****/
+
+/**
+ * CCP supported DES/3DES modes
+ */
+enum ccp_des_mode {
+	CCP_DES_MODE_ECB = 0, /* Not supported */
+	CCP_DES_MODE_CBC,
+	CCP_DES_MODE_CFB,
+};
+
+/**
+ * CCP supported DES types
+ */
+enum ccp_des_type {
+	CCP_DES_TYPE_128 = 0,	/* 112 + 16 parity */
+	CCP_DES_TYPE_192,	/* 168 + 24 parity */
+	CCP_DES_TYPE__LAST,
+};
+
+/***** SHA engine *****/
+
+/**
+ * ccp_sha_type - type of SHA operation
+ *
+ * @CCP_SHA_TYPE_1: SHA-1 operation
+ * @CCP_SHA_TYPE_224: SHA-224 operation
+ * @CCP_SHA_TYPE_256: SHA-256 operation
+ */
+enum ccp_sha_type {
+	CCP_SHA_TYPE_1 = 1,
+	CCP_SHA_TYPE_224,
+	CCP_SHA_TYPE_256,
+	CCP_SHA_TYPE_384,
+	CCP_SHA_TYPE_512,
+	CCP_SHA_TYPE_RSVD1,
+	CCP_SHA_TYPE_RSVD2,
+	CCP_SHA3_TYPE_224,
+	CCP_SHA3_TYPE_256,
+	CCP_SHA3_TYPE_384,
+	CCP_SHA3_TYPE_512,
+	CCP_SHA_TYPE__LAST,
+};
+
+/**
+ * CCP supported cipher algorithms
+ */
+enum ccp_cipher_algo {
+	CCP_CIPHER_ALGO_AES_CBC = 0,
+	CCP_CIPHER_ALGO_AES_ECB,
+	CCP_CIPHER_ALGO_AES_CTR,
+	CCP_CIPHER_ALGO_AES_GCM,
+	CCP_CIPHER_ALGO_3DES_CBC,
+};
+
+/**
+ * CCP cipher operation type
+ */
+enum ccp_cipher_dir {
+	CCP_CIPHER_DIR_DECRYPT = 0,
+	CCP_CIPHER_DIR_ENCRYPT = 1,
+};
+
+/**
+ * CCP supported hash algorithms
+ */
+enum ccp_hash_algo {
+	CCP_AUTH_ALGO_SHA1 = 0,
+	CCP_AUTH_ALGO_SHA1_HMAC,
+	CCP_AUTH_ALGO_SHA224,
+	CCP_AUTH_ALGO_SHA224_HMAC,
+	CCP_AUTH_ALGO_SHA3_224,
+	CCP_AUTH_ALGO_SHA3_224_HMAC,
+	CCP_AUTH_ALGO_SHA256,
+	CCP_AUTH_ALGO_SHA256_HMAC,
+	CCP_AUTH_ALGO_SHA3_256,
+	CCP_AUTH_ALGO_SHA3_256_HMAC,
+	CCP_AUTH_ALGO_SHA384,
+	CCP_AUTH_ALGO_SHA384_HMAC,
+	CCP_AUTH_ALGO_SHA3_384,
+	CCP_AUTH_ALGO_SHA3_384_HMAC,
+	CCP_AUTH_ALGO_SHA512,
+	CCP_AUTH_ALGO_SHA512_HMAC,
+	CCP_AUTH_ALGO_SHA3_512,
+	CCP_AUTH_ALGO_SHA3_512_HMAC,
+	CCP_AUTH_ALGO_AES_CMAC,
+	CCP_AUTH_ALGO_AES_GCM,
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	CCP_AUTH_ALGO_MD5_HMAC,
+#endif
+};
+
+/**
+ * CCP hash operation type
+ */
+enum ccp_hash_op {
+	CCP_AUTH_OP_GENERATE = 0,
+	CCP_AUTH_OP_VERIFY = 1,
+};
+
+/* CCP crypto private session structure */
+struct ccp_session {
+	enum ccp_cmd_order cmd_id;
+	/**< chain order mode */
+	struct {
+		uint16_t length;
+		uint16_t offset;
+	} iv;
+	/**< IV parameters */
+	struct {
+		enum ccp_cipher_algo  algo;
+		enum ccp_engine  engine;
+		union {
+			enum ccp_aes_mode aes_mode;
+			enum ccp_des_mode des_mode;
+		} um;
+		union {
+			enum ccp_aes_type aes_type;
+			enum ccp_des_type des_type;
+		} ut;
+		enum ccp_cipher_dir dir;
+		uint64_t key_length;
+		/**< max cipher key size 256 bits */
+		uint8_t key[32];
+		/**ccp key format*/
+		uint8_t key_ccp[32];
+		phys_addr_t key_phys;
+		/**AES-ctr nonce(4) iv(8) ctr*/
+		uint8_t nonce[32];
+		phys_addr_t nonce_phys;
+	} cipher;
+	/**< Cipher Parameters */
+
+	struct {
+		enum ccp_hash_algo algo;
+		enum ccp_engine  engine;
+		union {
+			enum ccp_aes_mode aes_mode;
+		} um;
+		union {
+			enum ccp_sha_type sha_type;
+			enum ccp_aes_type aes_type;
+		} ut;
+		enum ccp_hash_op op;
+		uint64_t key_length;
+		/**< max hash key size 144 bytes (struct capabilties) */
+		uint8_t key[144];
+		/**< max be key size of AES is 32*/
+		uint8_t key_ccp[32];
+		phys_addr_t key_phys;
+		uint64_t digest_length;
+		void *ctx;
+		int ctx_len;
+		int offset;
+		int block_size;
+		/**< Buffer to store  Software generated precomute values*/
+		/**< For HMAC H(ipad ^ key) and H(opad ^ key) */
+		/**< For CMAC K1 IV and K2 IV*/
+		uint8_t pre_compute[2 * CCP_SHA3_CTX_SIZE];
+		/**< SHA3 initial ctx all zeros*/
+		uint8_t sha3_ctx[200];
+		int aad_length;
+	} auth;
+	/**< Authentication Parameters */
+	enum rte_crypto_aead_algorithm aead_algo;
+	/**< AEAD Algorithm */
+
+	uint32_t reserved;
+} __rte_cache_aligned;
+
+extern uint8_t ccp_cryptodev_driver_id;
+
+struct ccp_qp;
+
+/**
+ * Set and validate CCP crypto session parameters
+ *
+ * @param sess ccp private session
+ * @param xform crypto xform for this session
+ * @return 0 on success otherwise -1
+ */
+int ccp_set_session_parameters(struct ccp_session *sess,
+			       const struct rte_crypto_sym_xform *xform);
+
+#endif /* _CCP_CRYPTO_H_ */
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index c360fe0..d88f70c 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -199,6 +199,123 @@ struct ccp_device {
 	/**< current queue index */
 } __rte_cache_aligned;
 
+/**< CCP H/W engine related */
+/**
+ * ccp_engine - CCP operation identifiers
+ *
+ * @CCP_ENGINE_AES: AES operation
+ * @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation
+ * @CCP_ENGINE_3DES: DES/3DES operation
+ * @CCP_ENGINE_SHA: SHA operation
+ * @CCP_ENGINE_RSA: RSA operation
+ * @CCP_ENGINE_PASSTHRU: pass-through operation
+ * @CCP_ENGINE_ZLIB_DECOMPRESS: unused
+ * @CCP_ENGINE_ECC: ECC operation
+ */
+enum ccp_engine {
+	CCP_ENGINE_AES = 0,
+	CCP_ENGINE_XTS_AES_128,
+	CCP_ENGINE_3DES,
+	CCP_ENGINE_SHA,
+	CCP_ENGINE_RSA,
+	CCP_ENGINE_PASSTHRU,
+	CCP_ENGINE_ZLIB_DECOMPRESS,
+	CCP_ENGINE_ECC,
+	CCP_ENGINE__LAST,
+};
+
+/* Passthru engine */
+/**
+ * ccp_passthru_bitwise - type of bitwise passthru operation
+ *
+ * @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed
+ * @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask
+ * @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask
+ * @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask
+ * @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask
+ */
+enum ccp_passthru_bitwise {
+	CCP_PASSTHRU_BITWISE_NOOP = 0,
+	CCP_PASSTHRU_BITWISE_AND,
+	CCP_PASSTHRU_BITWISE_OR,
+	CCP_PASSTHRU_BITWISE_XOR,
+	CCP_PASSTHRU_BITWISE_MASK,
+	CCP_PASSTHRU_BITWISE__LAST,
+};
+
+/**
+ * ccp_passthru_byteswap - type of byteswap passthru operation
+ *
+ * @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed
+ * @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words
+ * @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words
+ */
+enum ccp_passthru_byteswap {
+	CCP_PASSTHRU_BYTESWAP_NOOP = 0,
+	CCP_PASSTHRU_BYTESWAP_32BIT,
+	CCP_PASSTHRU_BYTESWAP_256BIT,
+	CCP_PASSTHRU_BYTESWAP__LAST,
+};
+
+/**
+ * CCP passthru
+ */
+struct ccp_passthru {
+	phys_addr_t src_addr;
+	phys_addr_t dest_addr;
+	enum ccp_passthru_bitwise bit_mod;
+	enum ccp_passthru_byteswap byte_swap;
+	int len;
+	int dir;
+};
+
+/* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
+union ccp_function {
+	struct {
+		uint16_t size:7;
+		uint16_t encrypt:1;
+		uint16_t mode:5;
+		uint16_t type:2;
+	} aes;
+	struct {
+		uint16_t size:7;
+		uint16_t encrypt:1;
+		uint16_t mode:5;
+		uint16_t type:2;
+	} des;
+	struct {
+		uint16_t size:7;
+		uint16_t encrypt:1;
+		uint16_t rsvd:5;
+		uint16_t type:2;
+	} aes_xts;
+	struct {
+		uint16_t rsvd1:10;
+		uint16_t type:4;
+		uint16_t rsvd2:1;
+	} sha;
+	struct {
+		uint16_t mode:3;
+		uint16_t size:12;
+	} rsa;
+	struct {
+		uint16_t byteswap:2;
+		uint16_t bitwise:3;
+		uint16_t reflect:2;
+		uint16_t rsvd:8;
+	} pt;
+	struct  {
+		uint16_t rsvd:13;
+	} zlib;
+	struct {
+		uint16_t size:10;
+		uint16_t type:2;
+		uint16_t mode:3;
+	} ecc;
+	uint16_t raw;
+};
+
+
 /**
  * descriptor for version 5 CPP commands
  * 8 32-bit words:
@@ -265,6 +382,18 @@ struct ccp_desc {
 	struct dword7 dw7;
 };
 
+/**
+ * cmd id to follow order
+ */
+enum ccp_cmd_order {
+	CCP_CMD_CIPHER = 0,
+	CCP_CMD_AUTH,
+	CCP_CMD_CIPHER_HASH,
+	CCP_CMD_HASH_CIPHER,
+	CCP_CMD_COMBINED,
+	CCP_CMD_NOT_SUPPORTED,
+};
+
 static inline uint32_t
 low32_value(unsigned long addr)
 {
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 99b8ca5..0560f68 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -10,6 +10,7 @@
 
 #include "ccp_pmd_private.h"
 #include "ccp_dev.h"
+#include "ccp_crypto.h"
 
 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
@@ -55,6 +56,60 @@ ccp_pmd_info_get(struct rte_cryptodev *dev,
 	}
 }
 
+static unsigned
+ccp_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct ccp_session);
+}
+
+static int
+ccp_pmd_session_configure(struct rte_cryptodev *dev,
+			  struct rte_crypto_sym_xform *xform,
+			  struct rte_cryptodev_sym_session *sess,
+			  struct rte_mempool *mempool)
+{
+	int ret;
+	void *sess_private_data;
+
+	if (unlikely(sess == NULL || xform == NULL)) {
+		CCP_LOG_ERR("Invalid session struct or xform");
+		return -ENOMEM;
+	}
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		CCP_LOG_ERR("Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+	ret = ccp_set_session_parameters(sess_private_data, xform);
+	if (ret != 0) {
+		CCP_LOG_ERR("failed configure session parameters");
+
+		/* Return session to mempool */
+		rte_mempool_put(mempool, sess_private_data);
+		return ret;
+	}
+	set_session_private_data(sess, dev->driver_id,
+				 sess_private_data);
+
+	return 0;
+}
+
+static void
+ccp_pmd_session_clear(struct rte_cryptodev *dev,
+		      struct rte_cryptodev_sym_session *sess)
+{
+	uint8_t index = dev->driver_id;
+	void *sess_priv = get_session_private_data(sess, index);
+
+	if (sess_priv) {
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+		rte_mempool_put(sess_mp, sess_priv);
+		memset(sess_priv, 0, sizeof(struct ccp_session));
+		set_session_private_data(sess, index, NULL);
+	}
+}
+
 struct rte_cryptodev_ops ccp_ops = {
 		.dev_configure		= ccp_pmd_config,
 		.dev_start		= ccp_pmd_start,
@@ -72,9 +127,9 @@ struct rte_cryptodev_ops ccp_ops = {
 		.queue_pair_stop	= NULL,
 		.queue_pair_count	= NULL,
 
-		.session_get_size	= NULL,
-		.session_configure	= NULL,
-		.session_clear		= NULL,
+		.session_get_size	= ccp_pmd_session_get_size,
+		.session_configure	= ccp_pmd_session_configure,
+		.session_clear		= ccp_pmd_session_clear,
 };
 
 struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v5 05/19] crypto/ccp: support queue pair related pmd ops
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
                           ` (2 preceding siblings ...)
  2018-03-19 12:23         ` [PATCH v5 04/19] crypto/ccp: support session related crypto " Ravi Kumar
@ 2018-03-19 12:23         ` Ravi Kumar
  2018-03-19 12:23         ` [PATCH v5 06/19] crypto/ccp: support crypto enqueue and dequeue burst api Ravi Kumar
                           ` (15 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-19 12:23 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_pmd_ops.c | 149 +++++++++++++++++++++++++++++++++++++--
 1 file changed, 144 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 0560f68..bd0aea4 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -56,6 +56,145 @@ ccp_pmd_info_get(struct rte_cryptodev *dev,
 	}
 }
 
+static int
+ccp_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+	struct ccp_qp *qp;
+
+	if (dev->data->queue_pairs[qp_id] != NULL) {
+		qp = (struct ccp_qp *)dev->data->queue_pairs[qp_id];
+		rte_ring_free(qp->processed_pkts);
+		rte_mempool_free(qp->batch_mp);
+		rte_free(qp);
+		dev->data->queue_pairs[qp_id] = NULL;
+	}
+	return 0;
+}
+
+static int
+ccp_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+		struct ccp_qp *qp)
+{
+	unsigned int n = snprintf(qp->name, sizeof(qp->name),
+			"ccp_pmd_%u_qp_%u",
+			dev->data->dev_id, qp->id);
+
+	if (n > sizeof(qp->name))
+		return -1;
+
+	return 0;
+}
+
+static struct rte_ring *
+ccp_pmd_qp_create_batch_info_ring(struct ccp_qp *qp,
+				  unsigned int ring_size, int socket_id)
+{
+	struct rte_ring *r;
+
+	r = rte_ring_lookup(qp->name);
+	if (r) {
+		if (r->size >= ring_size) {
+			CCP_LOG_INFO(
+				"Reusing ring %s for processed packets",
+				qp->name);
+			return r;
+		}
+		CCP_LOG_INFO(
+			"Unable to reuse ring %s for processed packets",
+			 qp->name);
+		return NULL;
+	}
+
+	return rte_ring_create(qp->name, ring_size, socket_id,
+			RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+static int
+ccp_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+		 const struct rte_cryptodev_qp_conf *qp_conf,
+		 int socket_id, struct rte_mempool *session_pool)
+{
+	struct ccp_private *internals = dev->data->dev_private;
+	struct ccp_qp *qp;
+	int retval = 0;
+
+	if (qp_id >= internals->max_nb_qpairs) {
+		CCP_LOG_ERR("Invalid qp_id %u, should be less than %u",
+			    qp_id, internals->max_nb_qpairs);
+		return (-EINVAL);
+	}
+
+	/* Free memory prior to re-allocation if needed. */
+	if (dev->data->queue_pairs[qp_id] != NULL)
+		ccp_pmd_qp_release(dev, qp_id);
+
+	/* Allocate the queue pair data structure. */
+	qp = rte_zmalloc_socket("CCP Crypto PMD Queue Pair", sizeof(*qp),
+					RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp == NULL) {
+		CCP_LOG_ERR("Failed to allocate queue pair memory");
+		return (-ENOMEM);
+	}
+
+	qp->dev = dev;
+	qp->id = qp_id;
+	dev->data->queue_pairs[qp_id] = qp;
+
+	retval = ccp_pmd_qp_set_unique_name(dev, qp);
+	if (retval) {
+		CCP_LOG_ERR("Failed to create unique name for ccp qp");
+		goto qp_setup_cleanup;
+	}
+
+	qp->processed_pkts = ccp_pmd_qp_create_batch_info_ring(qp,
+			qp_conf->nb_descriptors, socket_id);
+	if (qp->processed_pkts == NULL) {
+		CCP_LOG_ERR("Failed to create batch info ring");
+		goto qp_setup_cleanup;
+	}
+
+	qp->sess_mp = session_pool;
+
+	/* mempool for batch info */
+	qp->batch_mp = rte_mempool_create(
+				qp->name,
+				qp_conf->nb_descriptors,
+				sizeof(struct ccp_batch_info),
+				RTE_CACHE_LINE_SIZE,
+				0, NULL, NULL, NULL, NULL,
+				SOCKET_ID_ANY, 0);
+	if (qp->batch_mp == NULL)
+		goto qp_setup_cleanup;
+	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	return 0;
+
+qp_setup_cleanup:
+	dev->data->queue_pairs[qp_id] = NULL;
+	if (qp)
+		rte_free(qp);
+	return -1;
+}
+
+static int
+ccp_pmd_qp_start(struct rte_cryptodev *dev __rte_unused,
+		 uint16_t queue_pair_id __rte_unused)
+{
+	return -ENOTSUP;
+}
+
+static int
+ccp_pmd_qp_stop(struct rte_cryptodev *dev __rte_unused,
+		uint16_t queue_pair_id __rte_unused)
+{
+	return -ENOTSUP;
+}
+
+static uint32_t
+ccp_pmd_qp_count(struct rte_cryptodev *dev)
+{
+	return dev->data->nb_queue_pairs;
+}
+
 static unsigned
 ccp_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
 {
@@ -121,11 +260,11 @@ struct rte_cryptodev_ops ccp_ops = {
 
 		.dev_infos_get		= ccp_pmd_info_get,
 
-		.queue_pair_setup	= NULL,
-		.queue_pair_release	= NULL,
-		.queue_pair_start	= NULL,
-		.queue_pair_stop	= NULL,
-		.queue_pair_count	= NULL,
+		.queue_pair_setup	= ccp_pmd_qp_setup,
+		.queue_pair_release	= ccp_pmd_qp_release,
+		.queue_pair_start	= ccp_pmd_qp_start,
+		.queue_pair_stop	= ccp_pmd_qp_stop,
+		.queue_pair_count	= ccp_pmd_qp_count,
 
 		.session_get_size	= ccp_pmd_session_get_size,
 		.session_configure	= ccp_pmd_session_configure,
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v5 06/19] crypto/ccp: support crypto enqueue and dequeue burst api
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
                           ` (3 preceding siblings ...)
  2018-03-19 12:23         ` [PATCH v5 05/19] crypto/ccp: support queue pair related " Ravi Kumar
@ 2018-03-19 12:23         ` Ravi Kumar
  2018-03-19 12:23         ` [PATCH v5 07/19] crypto/ccp: support sessionless operations Ravi Kumar
                           ` (14 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-19 12:23 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 360 +++++++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_crypto.h  |  35 ++++
 drivers/crypto/ccp/ccp_dev.c     |  27 +++
 drivers/crypto/ccp/ccp_dev.h     |   9 +
 drivers/crypto/ccp/rte_ccp_pmd.c |  64 ++++++-
 5 files changed, 488 insertions(+), 7 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 8bf4ce1..7ced435 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -201,3 +201,363 @@ ccp_set_session_parameters(struct ccp_session *sess,
 	}
 	return ret;
 }
+
+/* calculate CCP descriptors requirement */
+static inline int
+ccp_cipher_slot(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->cipher.algo) {
+	default:
+		CCP_LOG_ERR("Unsupported cipher algo %d",
+			    session->cipher.algo);
+	}
+	return count;
+}
+
+static inline int
+ccp_auth_slot(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->auth.algo) {
+	default:
+		CCP_LOG_ERR("Unsupported auth algo %d",
+			    session->auth.algo);
+	}
+
+	return count;
+}
+
+static int
+ccp_aead_slot(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->aead_algo) {
+	default:
+		CCP_LOG_ERR("Unsupported aead algo %d",
+			    session->aead_algo);
+	}
+	return count;
+}
+
+int
+ccp_compute_slot_count(struct ccp_session *session)
+{
+	int count = 0;
+
+	switch (session->cmd_id) {
+	case CCP_CMD_CIPHER:
+		count = ccp_cipher_slot(session);
+		break;
+	case CCP_CMD_AUTH:
+		count = ccp_auth_slot(session);
+		break;
+	case CCP_CMD_CIPHER_HASH:
+	case CCP_CMD_HASH_CIPHER:
+		count = ccp_cipher_slot(session);
+		count += ccp_auth_slot(session);
+		break;
+	case CCP_CMD_COMBINED:
+		count = ccp_aead_slot(session);
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported cmd_id");
+
+	}
+
+	return count;
+}
+
+static inline int
+ccp_crypto_cipher(struct rte_crypto_op *op,
+		  struct ccp_queue *cmd_q __rte_unused,
+		  struct ccp_batch_info *b_info __rte_unused)
+{
+	int result = 0;
+	struct ccp_session *session;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 ccp_cryptodev_driver_id);
+
+	switch (session->cipher.algo) {
+	default:
+		CCP_LOG_ERR("Unsupported cipher algo %d",
+			    session->cipher.algo);
+		return -ENOTSUP;
+	}
+	return result;
+}
+
+static inline int
+ccp_crypto_auth(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q __rte_unused,
+		struct ccp_batch_info *b_info __rte_unused)
+{
+
+	int result = 0;
+	struct ccp_session *session;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	switch (session->auth.algo) {
+	default:
+		CCP_LOG_ERR("Unsupported auth algo %d",
+			    session->auth.algo);
+		return -ENOTSUP;
+	}
+
+	return result;
+}
+
+static inline int
+ccp_crypto_aead(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q __rte_unused,
+		struct ccp_batch_info *b_info __rte_unused)
+{
+	int result = 0;
+	struct ccp_session *session;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	switch (session->aead_algo) {
+	default:
+		CCP_LOG_ERR("Unsupported aead algo %d",
+			    session->aead_algo);
+		return -ENOTSUP;
+	}
+	return result;
+}
+
+int
+process_ops_to_enqueue(const struct ccp_qp *qp,
+		       struct rte_crypto_op **op,
+		       struct ccp_queue *cmd_q,
+		       uint16_t nb_ops,
+		       int slots_req)
+{
+	int i, result = 0;
+	struct ccp_batch_info *b_info;
+	struct ccp_session *session;
+
+	if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
+		CCP_LOG_ERR("batch info allocation failed");
+		return 0;
+	}
+	/* populate batch info necessary for dequeue */
+	b_info->op_idx = 0;
+	b_info->lsb_buf_idx = 0;
+	b_info->desccnt = 0;
+	b_info->cmd_q = cmd_q;
+	b_info->lsb_buf_phys =
+		(phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+	rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
+
+	b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+					 Q_DESC_SIZE);
+	for (i = 0; i < nb_ops; i++) {
+		session = (struct ccp_session *)get_session_private_data(
+						 op[i]->sym->session,
+						 ccp_cryptodev_driver_id);
+		switch (session->cmd_id) {
+		case CCP_CMD_CIPHER:
+			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_AUTH:
+			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_CIPHER_HASH:
+			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+			if (result)
+				break;
+			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_HASH_CIPHER:
+			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+			if (result)
+				break;
+			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+			break;
+		case CCP_CMD_COMBINED:
+			result = ccp_crypto_aead(op[i], cmd_q, b_info);
+			break;
+		default:
+			CCP_LOG_ERR("Unsupported cmd_id");
+			result = -1;
+		}
+		if (unlikely(result < 0)) {
+			rte_atomic64_add(&b_info->cmd_q->free_slots,
+					 (slots_req - b_info->desccnt));
+			break;
+		}
+		b_info->op[i] = op[i];
+	}
+
+	b_info->opcnt = i;
+	b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+					 Q_DESC_SIZE);
+
+	rte_wmb();
+	/* Write the new tail address back to the queue register */
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+			      b_info->tail_offset);
+	/* Turn the queue back on using our cached control register */
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol | CMD_Q_RUN);
+
+	rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
+
+	return i;
+}
+
+static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
+{
+	struct ccp_session *session;
+	uint8_t *digest_data, *addr;
+	struct rte_mbuf *m_last;
+	int offset, digest_offset;
+	uint8_t digest_le[64];
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	if (session->cmd_id == CCP_CMD_COMBINED) {
+		digest_data = op->sym->aead.digest.data;
+		digest_offset = op->sym->aead.data.offset +
+					op->sym->aead.data.length;
+	} else {
+		digest_data = op->sym->auth.digest.data;
+		digest_offset = op->sym->auth.data.offset +
+					op->sym->auth.data.length;
+	}
+	m_last = rte_pktmbuf_lastseg(op->sym->m_src);
+	addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
+			   m_last->data_len - session->auth.ctx_len);
+
+	rte_mb();
+	offset = session->auth.offset;
+
+	if (session->auth.engine == CCP_ENGINE_SHA)
+		if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
+		    (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
+		    (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
+			/* All other algorithms require byte
+			 * swap done by host
+			 */
+			unsigned int i;
+
+			offset = session->auth.ctx_len -
+				session->auth.offset - 1;
+			for (i = 0; i < session->auth.digest_length; i++)
+				digest_le[i] = addr[offset - i];
+			offset = 0;
+			addr = digest_le;
+		}
+
+	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	if (session->auth.op == CCP_AUTH_OP_VERIFY) {
+		if (memcmp(addr + offset, digest_data,
+			   session->auth.digest_length) != 0)
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+	} else {
+		if (unlikely(digest_data == 0))
+			digest_data = rte_pktmbuf_mtod_offset(
+					op->sym->m_dst, uint8_t *,
+					digest_offset);
+		rte_memcpy(digest_data, addr + offset,
+			   session->auth.digest_length);
+	}
+	/* Trim area used for digest from mbuf. */
+	rte_pktmbuf_trim(op->sym->m_src,
+			 session->auth.ctx_len);
+}
+
+static int
+ccp_prepare_ops(struct rte_crypto_op **op_d,
+		struct ccp_batch_info *b_info,
+		uint16_t nb_ops)
+{
+	int i, min_ops;
+	struct ccp_session *session;
+
+	min_ops = RTE_MIN(nb_ops, b_info->opcnt);
+
+	for (i = 0; i < min_ops; i++) {
+		op_d[i] = b_info->op[b_info->op_idx++];
+		session = (struct ccp_session *)get_session_private_data(
+						 op_d[i]->sym->session,
+						ccp_cryptodev_driver_id);
+		switch (session->cmd_id) {
+		case CCP_CMD_CIPHER:
+			op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+			break;
+		case CCP_CMD_AUTH:
+		case CCP_CMD_CIPHER_HASH:
+		case CCP_CMD_HASH_CIPHER:
+		case CCP_CMD_COMBINED:
+			ccp_auth_dq_prepare(op_d[i]);
+			break;
+		default:
+			CCP_LOG_ERR("Unsupported cmd_id");
+		}
+	}
+
+	b_info->opcnt -= min_ops;
+	return min_ops;
+}
+
+int
+process_ops_to_dequeue(struct ccp_qp *qp,
+		       struct rte_crypto_op **op,
+		       uint16_t nb_ops)
+{
+	struct ccp_batch_info *b_info;
+	uint32_t cur_head_offset;
+
+	if (qp->b_info != NULL) {
+		b_info = qp->b_info;
+		if (unlikely(b_info->op_idx > 0))
+			goto success;
+	} else if (rte_ring_dequeue(qp->processed_pkts,
+				    (void **)&b_info))
+		return 0;
+	cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
+				       CMD_Q_HEAD_LO_BASE);
+
+	if (b_info->head_offset < b_info->tail_offset) {
+		if ((cur_head_offset >= b_info->head_offset) &&
+		    (cur_head_offset < b_info->tail_offset)) {
+			qp->b_info = b_info;
+			return 0;
+		}
+	} else {
+		if ((cur_head_offset >= b_info->head_offset) ||
+		    (cur_head_offset < b_info->tail_offset)) {
+			qp->b_info = b_info;
+			return 0;
+		}
+	}
+
+
+success:
+	nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
+	rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
+	b_info->desccnt = 0;
+	if (b_info->opcnt > 0) {
+		qp->b_info = b_info;
+	} else {
+		rte_mempool_put(qp->batch_mp, (void *)b_info);
+		qp->b_info = NULL;
+	}
+
+	return nb_ops;
+}
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 8d9de07..c2fce1d 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -238,4 +238,39 @@ struct ccp_qp;
 int ccp_set_session_parameters(struct ccp_session *sess,
 			       const struct rte_crypto_sym_xform *xform);
 
+/**
+ * Find count of slots
+ *
+ * @param session CCP private session
+ * @return count of free slots available
+ */
+int ccp_compute_slot_count(struct ccp_session *session);
+
+/**
+ * process crypto ops to be enqueued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param cmd_q CCP cmd queue
+ * @param nb_ops No. of ops to be submitted
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_enqueue(const struct ccp_qp *qp,
+			   struct rte_crypto_op **op,
+			   struct ccp_queue *cmd_q,
+			   uint16_t nb_ops,
+			   int slots_req);
+
+/**
+ * process crypto ops to be dequeued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param nb_ops requested no. of ops
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_dequeue(struct ccp_qp *qp,
+			   struct rte_crypto_op **op,
+			   uint16_t nb_ops);
+
 #endif /* _CCP_CRYPTO_H_ */
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index 546f0a7..2e702de 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -35,6 +35,33 @@ ccp_dev_start(struct rte_cryptodev *dev)
 	return 0;
 }
 
+struct ccp_queue *
+ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
+{
+	int i, ret = 0;
+	struct ccp_device *dev;
+	struct ccp_private *priv = cdev->data->dev_private;
+
+	dev = TAILQ_NEXT(priv->last_dev, next);
+	if (unlikely(dev == NULL))
+		dev = TAILQ_FIRST(&ccp_list);
+	priv->last_dev = dev;
+	if (dev->qidx >= dev->cmd_q_count)
+		dev->qidx = 0;
+	ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+	if (ret >= slot_req)
+		return &dev->cmd_q[dev->qidx];
+	for (i = 0; i < dev->cmd_q_count; i++) {
+		dev->qidx++;
+		if (dev->qidx >= dev->cmd_q_count)
+			dev->qidx = 0;
+		ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+		if (ret >= slot_req)
+			return &dev->cmd_q[dev->qidx];
+	}
+	return NULL;
+}
+
 static const struct rte_memzone *
 ccp_queue_dma_zone_reserve(const char *queue_name,
 			   uint32_t queue_size,
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index d88f70c..abc788c 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -419,4 +419,13 @@ int ccp_dev_start(struct rte_cryptodev *dev);
  */
 int ccp_probe_devices(const struct rte_pci_id *ccp_id);
 
+/**
+ * allocate a ccp command queue
+ *
+ * @dev rte crypto device
+ * @param slot_req number of required
+ * @return allotted CCP queue on success otherwise NULL
+ */
+struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
+
 #endif /* _CCP_DEV_H_ */
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index ce64dbc..fd7d2d3 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -12,6 +12,7 @@
 #include <rte_dev.h>
 #include <rte_malloc.h>
 
+#include "ccp_crypto.h"
 #include "ccp_dev.h"
 #include "ccp_pmd_private.h"
 
@@ -21,23 +22,72 @@
 static unsigned int ccp_pmd_init_done;
 uint8_t ccp_cryptodev_driver_id;
 
+static struct ccp_session *
+get_ccp_session(struct ccp_qp *qp __rte_unused, struct rte_crypto_op *op)
+{
+	struct ccp_session *sess = NULL;
+
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		if (unlikely(op->sym->session == NULL))
+			return NULL;
+
+		sess = (struct ccp_session *)
+			get_session_private_data(
+				op->sym->session,
+				ccp_cryptodev_driver_id);
+	}
+
+	return sess;
+}
+
 static uint16_t
-ccp_pmd_enqueue_burst(void *queue_pair __rte_unused,
-		      struct rte_crypto_op **ops __rte_unused,
-		      uint16_t nb_ops __rte_unused)
+ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		      uint16_t nb_ops)
 {
-	uint16_t enq_cnt = 0;
+	struct ccp_session *sess = NULL;
+	struct ccp_qp *qp = queue_pair;
+	struct ccp_queue *cmd_q;
+	struct rte_cryptodev *dev = qp->dev;
+	uint16_t i, enq_cnt = 0, slots_req = 0;
+
+	if (nb_ops == 0)
+		return 0;
+
+	if (unlikely(rte_ring_full(qp->processed_pkts) != 0))
+		return 0;
+
+	for (i = 0; i < nb_ops; i++) {
+		sess = get_ccp_session(qp, ops[i]);
+		if (unlikely(sess == NULL) && (i == 0)) {
+			qp->qp_stats.enqueue_err_count++;
+			return 0;
+		} else if (sess == NULL) {
+			nb_ops = i;
+			break;
+		}
+		slots_req += ccp_compute_slot_count(sess);
+	}
+
+	cmd_q = ccp_allot_queue(dev, slots_req);
+	if (unlikely(cmd_q == NULL))
+		return 0;
 
+	enq_cnt = process_ops_to_enqueue(qp, ops, cmd_q, nb_ops, slots_req);
+	qp->qp_stats.enqueued_count += enq_cnt;
 	return enq_cnt;
 }
 
 static uint16_t
-ccp_pmd_dequeue_burst(void *queue_pair __rte_unused,
-		      struct rte_crypto_op **ops __rte_unused,
-		      uint16_t nb_ops __rte_unused)
+ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
 {
+	struct ccp_qp *qp = queue_pair;
 	uint16_t nb_dequeued = 0;
 
+	nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops);
+
+	qp->qp_stats.dequeued_count += nb_dequeued;
+
 	return nb_dequeued;
 }
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v5 07/19] crypto/ccp: support sessionless operations
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
                           ` (4 preceding siblings ...)
  2018-03-19 12:23         ` [PATCH v5 06/19] crypto/ccp: support crypto enqueue and dequeue burst api Ravi Kumar
@ 2018-03-19 12:23         ` Ravi Kumar
  2018-03-19 12:23         ` [PATCH v5 08/19] crypto/ccp: support stats related crypto pmd ops Ravi Kumar
                           ` (13 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-19 12:23 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/rte_ccp_pmd.c | 33 +++++++++++++++++++++++++++++++--
 1 file changed, 31 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index fd7d2d3..b3b2651 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -23,7 +23,7 @@ static unsigned int ccp_pmd_init_done;
 uint8_t ccp_cryptodev_driver_id;
 
 static struct ccp_session *
-get_ccp_session(struct ccp_qp *qp __rte_unused, struct rte_crypto_op *op)
+get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op)
 {
 	struct ccp_session *sess = NULL;
 
@@ -35,6 +35,27 @@ get_ccp_session(struct ccp_qp *qp __rte_unused, struct rte_crypto_op *op)
 			get_session_private_data(
 				op->sym->session,
 				ccp_cryptodev_driver_id);
+	} else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		void *_sess;
+		void *_sess_private_data = NULL;
+
+		if (rte_mempool_get(qp->sess_mp, &_sess))
+			return NULL;
+		if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+			return NULL;
+
+		sess = (struct ccp_session *)_sess_private_data;
+
+		if (unlikely(ccp_set_session_parameters(sess,
+							op->sym->xform) != 0)) {
+			rte_mempool_put(qp->sess_mp, _sess);
+			rte_mempool_put(qp->sess_mp, _sess_private_data);
+			sess = NULL;
+		}
+		op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+		set_session_private_data(op->sym->session,
+					 ccp_cryptodev_driver_id,
+					 _sess_private_data);
 	}
 
 	return sess;
@@ -82,10 +103,18 @@ ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
 		uint16_t nb_ops)
 {
 	struct ccp_qp *qp = queue_pair;
-	uint16_t nb_dequeued = 0;
+	uint16_t nb_dequeued = 0, i;
 
 	nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops);
 
+	/* Free session if a session-less crypto op */
+	for (i = 0; i < nb_dequeued; i++)
+		if (unlikely(ops[i]->sess_type ==
+			     RTE_CRYPTO_OP_SESSIONLESS)) {
+			rte_mempool_put(qp->sess_mp,
+					ops[i]->sym->session);
+			ops[i]->sym->session = NULL;
+		}
 	qp->qp_stats.dequeued_count += nb_dequeued;
 
 	return nb_dequeued;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v5 08/19] crypto/ccp: support stats related crypto pmd ops
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
                           ` (5 preceding siblings ...)
  2018-03-19 12:23         ` [PATCH v5 07/19] crypto/ccp: support sessionless operations Ravi Kumar
@ 2018-03-19 12:23         ` Ravi Kumar
  2018-03-19 12:23         ` [PATCH v5 09/19] crypto/ccp: support ccp hwrng feature Ravi Kumar
                           ` (12 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-19 12:23 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_pmd_ops.c | 34 ++++++++++++++++++++++++++++++++--
 1 file changed, 32 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index bd0aea4..d3708f4 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -42,6 +42,36 @@ ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
 }
 
 static void
+ccp_pmd_stats_get(struct rte_cryptodev *dev,
+		  struct rte_cryptodev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+		stats->enqueued_count += qp->qp_stats.enqueued_count;
+		stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+	}
+
+}
+
+static void
+ccp_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+	}
+}
+
+static void
 ccp_pmd_info_get(struct rte_cryptodev *dev,
 		 struct rte_cryptodev_info *dev_info)
 {
@@ -255,8 +285,8 @@ struct rte_cryptodev_ops ccp_ops = {
 		.dev_stop		= ccp_pmd_stop,
 		.dev_close		= ccp_pmd_close,
 
-		.stats_get		= NULL,
-		.stats_reset		= NULL,
+		.stats_get		= ccp_pmd_stats_get,
+		.stats_reset		= ccp_pmd_stats_reset,
 
 		.dev_infos_get		= ccp_pmd_info_get,
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v5 09/19] crypto/ccp: support ccp hwrng feature
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
                           ` (6 preceding siblings ...)
  2018-03-19 12:23         ` [PATCH v5 08/19] crypto/ccp: support stats related crypto pmd ops Ravi Kumar
@ 2018-03-19 12:23         ` Ravi Kumar
  2018-03-19 12:23         ` [PATCH v5 10/19] crypto/ccp: support aes cipher algo Ravi Kumar
                           ` (11 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-19 12:23 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_dev.c | 20 ++++++++++++++++++++
 drivers/crypto/ccp/ccp_dev.h | 11 +++++++++++
 2 files changed, 31 insertions(+)

diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index 2e702de..55cfcdd 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -62,6 +62,26 @@ ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
 	return NULL;
 }
 
+int
+ccp_read_hwrng(uint32_t *value)
+{
+	struct ccp_device *dev;
+
+	TAILQ_FOREACH(dev, &ccp_list, next) {
+		void *vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+		while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) {
+			*value = CCP_READ_REG(vaddr, TRNG_OUT_REG);
+			if (*value) {
+				dev->hwrng_retries = 0;
+				return 0;
+			}
+		}
+		dev->hwrng_retries = 0;
+	}
+	return -1;
+}
+
 static const struct rte_memzone *
 ccp_queue_dma_zone_reserve(const char *queue_name,
 			   uint32_t queue_size,
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index abc788c..de375ee 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -21,6 +21,7 @@
 
 /**< CCP sspecific */
 #define MAX_HW_QUEUES                   5
+#define CCP_MAX_TRNG_RETRIES		10
 
 /**< CCP Register Mappings */
 #define Q_MASK_REG                      0x000
@@ -197,6 +198,8 @@ struct ccp_device {
 	/**< protection for shared lsb region allocation */
 	int qidx;
 	/**< current queue index */
+	int hwrng_retries;
+	/**< retry counter for CCP TRNG */
 } __rte_cache_aligned;
 
 /**< CCP H/W engine related */
@@ -428,4 +431,12 @@ int ccp_probe_devices(const struct rte_pci_id *ccp_id);
  */
 struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
 
+/**
+ * read hwrng value
+ *
+ * @param trng_value data pointer to write RNG value
+ * @return 0 on success otherwise -1
+ */
+int ccp_read_hwrng(uint32_t *trng_value);
+
 #endif /* _CCP_DEV_H_ */
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v5 10/19] crypto/ccp: support aes cipher algo
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
                           ` (7 preceding siblings ...)
  2018-03-19 12:23         ` [PATCH v5 09/19] crypto/ccp: support ccp hwrng feature Ravi Kumar
@ 2018-03-19 12:23         ` Ravi Kumar
  2018-03-19 12:23         ` [PATCH v5 11/19] crypto/ccp: support 3des " Ravi Kumar
                           ` (10 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-19 12:23 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 197 ++++++++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_crypto.h  |  13 +++
 drivers/crypto/ccp/ccp_dev.h     |  53 +++++++++++
 drivers/crypto/ccp/ccp_pmd_ops.c |  60 ++++++++++++
 4 files changed, 321 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 7ced435..3c46ac3 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -54,6 +54,7 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 			     const struct rte_crypto_sym_xform *xform)
 {
 	const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+	size_t i;
 
 	cipher_xform = &xform->cipher;
 
@@ -73,6 +74,21 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 	sess->iv.length = cipher_xform->iv.length;
 
 	switch (cipher_xform->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_ECB:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo");
 		return -1;
@@ -80,10 +96,27 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 
 
 	switch (sess->cipher.engine) {
+	case CCP_ENGINE_AES:
+		if (sess->cipher.key_length == 16)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+		else if (sess->cipher.key_length == 24)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+		else if (sess->cipher.key_length == 32)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+		else {
+			CCP_LOG_ERR("Invalid cipher key length");
+			return -1;
+		}
+		for (i = 0; i < sess->cipher.key_length ; i++)
+			sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+				sess->cipher.key[i];
+		break;
 	default:
 		CCP_LOG_ERR("Invalid CCP Engine");
 		return -ENOTSUP;
 	}
+	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
 	return 0;
 }
 
@@ -209,6 +242,18 @@ ccp_cipher_slot(struct ccp_session *session)
 	int count = 0;
 
 	switch (session->cipher.algo) {
+	case CCP_CIPHER_ALGO_AES_CBC:
+		count = 2;
+		/**< op + passthrough for iv */
+		break;
+	case CCP_CIPHER_ALGO_AES_ECB:
+		count = 1;
+		/**<only op*/
+		break;
+	case CCP_CIPHER_ALGO_AES_CTR:
+		count = 2;
+		/**< op + passthrough for iv */
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo %d",
 			    session->cipher.algo);
@@ -271,10 +316,146 @@ ccp_compute_slot_count(struct ccp_session *session)
 	return count;
 }
 
+static void
+ccp_perform_passthru(struct ccp_passthru *pst,
+		     struct ccp_queue *cmd_q)
+{
+	struct ccp_desc *desc;
+	union ccp_function function;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 0;
+	CCP_CMD_EOM(desc) = 0;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_PT_BYTESWAP(&function) = pst->byte_swap;
+	CCP_PT_BITWISE(&function) = pst->bit_mod;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = pst->len;
+
+	if (pst->dir) {
+		CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+		CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+		CCP_CMD_DST_HI(desc) = 0;
+		CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+		if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+			CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
+	} else {
+
+		CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+		CCP_CMD_SRC_HI(desc) = 0;
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
+
+		CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+		CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
+		CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+	}
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+}
+
+static int
+ccp_perform_aes(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	uint8_t *lsb_buf;
+	struct ccp_passthru pst = {0};
+	struct ccp_desc *desc;
+	phys_addr_t src_addr, dest_addr, key_addr;
+	uint8_t *iv;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+	function.raw = 0;
+
+	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+	if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
+		if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
+			rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
+				   iv, session->iv.length);
+			pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
+			CCP_AES_SIZE(&function) = 0x1F;
+		} else {
+			lsb_buf =
+			&(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+			rte_memcpy(lsb_buf +
+				   (CCP_SB_BYTES - session->iv.length),
+				   iv, session->iv.length);
+			pst.src_addr = b_info->lsb_buf_phys +
+				(b_info->lsb_buf_idx * CCP_SB_BYTES);
+			b_info->lsb_buf_idx++;
+		}
+
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+	}
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->cipher.data.offset);
+	if (likely(op->sym->m_dst != NULL))
+		dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+						op->sym->cipher.data.offset);
+	else
+		dest_addr = src_addr;
+	key_addr = session->cipher.key_phys;
+
+	/* prepare desc for aes command */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+	CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
 static inline int
 ccp_crypto_cipher(struct rte_crypto_op *op,
-		  struct ccp_queue *cmd_q __rte_unused,
-		  struct ccp_batch_info *b_info __rte_unused)
+		  struct ccp_queue *cmd_q,
+		  struct ccp_batch_info *b_info)
 {
 	int result = 0;
 	struct ccp_session *session;
@@ -284,6 +465,18 @@ ccp_crypto_cipher(struct rte_crypto_op *op,
 					 ccp_cryptodev_driver_id);
 
 	switch (session->cipher.algo) {
+	case CCP_CIPHER_ALGO_AES_CBC:
+		result = ccp_perform_aes(op, cmd_q, b_info);
+		b_info->desccnt += 2;
+		break;
+	case CCP_CIPHER_ALGO_AES_CTR:
+		result = ccp_perform_aes(op, cmd_q, b_info);
+		b_info->desccnt += 2;
+		break;
+	case CCP_CIPHER_ALGO_AES_ECB:
+		result = ccp_perform_aes(op, cmd_q, b_info);
+		b_info->desccnt += 1;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo %d",
 			    session->cipher.algo);
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index c2fce1d..13cdb6e 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -20,7 +20,20 @@
 
 #include "ccp_dev.h"
 
+#define AES_BLOCK_SIZE 16
+#define CMAC_PAD_VALUE 0x80
+#define CTR_NONCE_SIZE 4
+#define CTR_IV_SIZE 8
 #define CCP_SHA3_CTX_SIZE 200
+
+/**Macro helpers for CCP command creation*/
+#define	CCP_AES_SIZE(p)		((p)->aes.size)
+#define	CCP_AES_ENCRYPT(p)	((p)->aes.encrypt)
+#define	CCP_AES_MODE(p)		((p)->aes.mode)
+#define	CCP_AES_TYPE(p)		((p)->aes.type)
+#define	CCP_PT_BYTESWAP(p)	((p)->pt.byteswap)
+#define	CCP_PT_BITWISE(p)	((p)->pt.bitwise)
+
 /**
  * CCP supported AES modes
  */
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index de375ee..de3e4bc 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -22,6 +22,7 @@
 /**< CCP sspecific */
 #define MAX_HW_QUEUES                   5
 #define CCP_MAX_TRNG_RETRIES		10
+#define CCP_ALIGN(x, y) ((((x) + (y - 1)) / y) * y)
 
 /**< CCP Register Mappings */
 #define Q_MASK_REG                      0x000
@@ -78,10 +79,52 @@
 #define LSB_SIZE                        16
 #define LSB_ITEM_SIZE                   32
 #define SLSB_MAP_SIZE                   (MAX_LSB_CNT * LSB_SIZE)
+#define LSB_ENTRY_NUMBER(LSB_ADDR)      (LSB_ADDR / LSB_ITEM_SIZE)
 
 /* General CCP Defines */
 
 #define CCP_SB_BYTES                    32
+/* Word 0 */
+#define CCP_CMD_DW0(p)		((p)->dw0)
+#define CCP_CMD_SOC(p)		(CCP_CMD_DW0(p).soc)
+#define CCP_CMD_IOC(p)		(CCP_CMD_DW0(p).ioc)
+#define CCP_CMD_INIT(p)	        (CCP_CMD_DW0(p).init)
+#define CCP_CMD_EOM(p)		(CCP_CMD_DW0(p).eom)
+#define CCP_CMD_FUNCTION(p)	(CCP_CMD_DW0(p).function)
+#define CCP_CMD_ENGINE(p)	(CCP_CMD_DW0(p).engine)
+#define CCP_CMD_PROT(p)	        (CCP_CMD_DW0(p).prot)
+
+/* Word 1 */
+#define CCP_CMD_DW1(p)		((p)->length)
+#define CCP_CMD_LEN(p)		(CCP_CMD_DW1(p))
+
+/* Word 2 */
+#define CCP_CMD_DW2(p)		((p)->src_lo)
+#define CCP_CMD_SRC_LO(p)	(CCP_CMD_DW2(p))
+
+/* Word 3 */
+#define CCP_CMD_DW3(p)		((p)->dw3)
+#define CCP_CMD_SRC_MEM(p)	((p)->dw3.src_mem)
+#define CCP_CMD_SRC_HI(p)	((p)->dw3.src_hi)
+#define CCP_CMD_LSB_ID(p)	((p)->dw3.lsb_cxt_id)
+#define CCP_CMD_FIX_SRC(p)	((p)->dw3.fixed)
+
+/* Words 4/5 */
+#define CCP_CMD_DW4(p)		((p)->dw4)
+#define CCP_CMD_DST_LO(p)	(CCP_CMD_DW4(p).dst_lo)
+#define CCP_CMD_DW5(p)		((p)->dw5.fields.dst_hi)
+#define CCP_CMD_DST_HI(p)	(CCP_CMD_DW5(p))
+#define CCP_CMD_DST_MEM(p)	((p)->dw5.fields.dst_mem)
+#define CCP_CMD_FIX_DST(p)	((p)->dw5.fields.fixed)
+#define CCP_CMD_SHA_LO(p)	((p)->dw4.sha_len_lo)
+#define CCP_CMD_SHA_HI(p)	((p)->dw5.sha_len_hi)
+
+/* Word 6/7 */
+#define CCP_CMD_DW6(p)		((p)->key_lo)
+#define CCP_CMD_KEY_LO(p)	(CCP_CMD_DW6(p))
+#define CCP_CMD_DW7(p)		((p)->dw7)
+#define CCP_CMD_KEY_HI(p)	((p)->dw7.key_hi)
+#define CCP_CMD_KEY_MEM(p)	((p)->dw7.key_mem)
 
 /* bitmap */
 enum {
@@ -386,6 +429,16 @@ struct ccp_desc {
 };
 
 /**
+ * ccp memory type
+ */
+enum ccp_memtype {
+	CCP_MEMTYPE_SYSTEM = 0,
+	CCP_MEMTYPE_SB,
+	CCP_MEMTYPE_LOCAL,
+	CCP_MEMTYPE_LAST,
+};
+
+/**
  * cmd id to follow order
  */
 enum ccp_cmd_order {
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index d3708f4..a6e777a 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -13,6 +13,66 @@
 #include "ccp_crypto.h"
 
 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+	{       /* AES ECB */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_ECB,
+				.block_size = 16,
+				.key_size = {
+				   .min = 16,
+				   .max = 32,
+				   .increment = 8
+				},
+				.iv_size = {
+				   .min = 0,
+				   .max = 0,
+				   .increment = 0
+				}
+			}, }
+		}, }
+	},
+	{       /* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v5 11/19] crypto/ccp: support 3des cipher algo
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
                           ` (8 preceding siblings ...)
  2018-03-19 12:23         ` [PATCH v5 10/19] crypto/ccp: support aes cipher algo Ravi Kumar
@ 2018-03-19 12:23         ` Ravi Kumar
  2018-03-19 12:23         ` [PATCH v5 12/19] crypto/ccp: support aes-cmac auth algo Ravi Kumar
                           ` (9 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-19 12:23 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 132 ++++++++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_crypto.h  |   3 +
 drivers/crypto/ccp/ccp_pmd_ops.c |  20 ++++++
 3 files changed, 154 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 3c46ac3..a946f42 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -54,7 +54,7 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 			     const struct rte_crypto_sym_xform *xform)
 {
 	const struct rte_crypto_cipher_xform *cipher_xform = NULL;
-	size_t i;
+	size_t i, j, x;
 
 	cipher_xform = &xform->cipher;
 
@@ -89,6 +89,11 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 		sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
 		sess->cipher.engine = CCP_ENGINE_AES;
 		break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
+		sess->cipher.um.des_mode = CCP_DES_MODE_CBC;
+		sess->cipher.engine = CCP_ENGINE_3DES;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo");
 		return -1;
@@ -111,6 +116,20 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 			sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
 				sess->cipher.key[i];
 		break;
+	case CCP_ENGINE_3DES:
+		if (sess->cipher.key_length == 16)
+			sess->cipher.ut.des_type = CCP_DES_TYPE_128;
+		else if (sess->cipher.key_length == 24)
+			sess->cipher.ut.des_type = CCP_DES_TYPE_192;
+		else {
+			CCP_LOG_ERR("Invalid cipher key length");
+			return -1;
+		}
+		for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
+			for (i = 0; i < 8; i++)
+				sess->cipher.key_ccp[(8 + x) - i - 1] =
+					sess->cipher.key[i + x];
+		break;
 	default:
 		CCP_LOG_ERR("Invalid CCP Engine");
 		return -ENOTSUP;
@@ -254,6 +273,10 @@ ccp_cipher_slot(struct ccp_session *session)
 		count = 2;
 		/**< op + passthrough for iv */
 		break;
+	case CCP_CIPHER_ALGO_3DES_CBC:
+		count = 2;
+		/**< op + passthrough for iv */
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo %d",
 			    session->cipher.algo);
@@ -452,6 +475,109 @@ ccp_perform_aes(struct rte_crypto_op *op,
 	return 0;
 }
 
+static int
+ccp_perform_3des(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	unsigned char *lsb_buf;
+	struct ccp_passthru pst;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	uint8_t *iv;
+	phys_addr_t src_addr, dest_addr, key_addr;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+	switch (session->cipher.um.des_mode) {
+	case CCP_DES_MODE_CBC:
+		lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+		b_info->lsb_buf_idx++;
+
+		rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
+			   iv, session->iv.length);
+
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+		break;
+	case CCP_DES_MODE_CFB:
+	case CCP_DES_MODE_ECB:
+		CCP_LOG_ERR("Unsupported DES cipher mode");
+		return -ENOTSUP;
+	}
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->cipher.data.offset);
+	if (unlikely(op->sym->m_dst != NULL))
+		dest_addr =
+			rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+						   op->sym->cipher.data.offset);
+	else
+		dest_addr = src_addr;
+
+	key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+	memset(desc, 0, Q_DESC_SIZE);
+
+	/* prepare desc for des command */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_DES_ENCRYPT(&function) = session->cipher.dir;
+	CCP_DES_MODE(&function) = session->cipher.um.des_mode;
+	CCP_DES_TYPE(&function) = session->cipher.ut.des_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	if (session->cipher.um.des_mode)
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	/* Write the new tail address back to the queue register */
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	/* Turn the queue back on using our cached control register */
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
 static inline int
 ccp_crypto_cipher(struct rte_crypto_op *op,
 		  struct ccp_queue *cmd_q,
@@ -477,6 +603,10 @@ ccp_crypto_cipher(struct rte_crypto_op *op,
 		result = ccp_perform_aes(op, cmd_q, b_info);
 		b_info->desccnt += 1;
 		break;
+	case CCP_CIPHER_ALGO_3DES_CBC:
+		result = ccp_perform_3des(op, cmd_q, b_info);
+		b_info->desccnt += 2;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported cipher algo %d",
 			    session->cipher.algo);
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 13cdb6e..3585c36 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -31,6 +31,9 @@
 #define	CCP_AES_ENCRYPT(p)	((p)->aes.encrypt)
 #define	CCP_AES_MODE(p)		((p)->aes.mode)
 #define	CCP_AES_TYPE(p)		((p)->aes.type)
+#define	CCP_DES_ENCRYPT(p)	((p)->des.encrypt)
+#define	CCP_DES_MODE(p)		((p)->des.mode)
+#define	CCP_DES_TYPE(p)		((p)->des.type)
 #define	CCP_PT_BYTESWAP(p)	((p)->pt.byteswap)
 #define	CCP_PT_BITWISE(p)	((p)->pt.bitwise)
 
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index a6e777a..a3b0d5c 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -73,6 +73,26 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			}, }
 		}, }
 	},
+	{	/* 3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 16,
+					.max = 24,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v5 12/19] crypto/ccp: support aes-cmac auth algo
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
                           ` (9 preceding siblings ...)
  2018-03-19 12:23         ` [PATCH v5 11/19] crypto/ccp: support 3des " Ravi Kumar
@ 2018-03-19 12:23         ` Ravi Kumar
  2018-03-19 12:23         ` [PATCH v5 13/19] crypto/ccp: support aes-gcm aead algo Ravi Kumar
                           ` (8 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-19 12:23 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 277 ++++++++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_pmd_ops.c |  20 +++
 2 files changed, 295 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index a946f42..6d8a0d6 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -10,6 +10,8 @@
 #include <sys/queue.h>
 #include <sys/types.h>
 #include <unistd.h>
+#include <openssl/cmac.h> /*sub key apis*/
+#include <openssl/evp.h> /*sub key apis*/
 
 #include <rte_hexdump.h>
 #include <rte_memzone.h>
@@ -48,6 +50,84 @@ ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 	return res;
 }
 
+/* prepare temporary keys K1 and K2 */
+static void prepare_key(unsigned char *k, unsigned char *l, int bl)
+{
+	int i;
+	/* Shift block to left, including carry */
+	for (i = 0; i < bl; i++) {
+		k[i] = l[i] << 1;
+		if (i < bl - 1 && l[i + 1] & 0x80)
+			k[i] |= 1;
+	}
+	/* If MSB set fixup with R */
+	if (l[0] & 0x80)
+		k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b;
+}
+
+/* subkeys K1 and K2 generation for CMAC */
+static int
+generate_cmac_subkeys(struct ccp_session *sess)
+{
+	const EVP_CIPHER *algo;
+	EVP_CIPHER_CTX *ctx;
+	unsigned char *ccp_ctx;
+	size_t i;
+	int dstlen, totlen;
+	unsigned char zero_iv[AES_BLOCK_SIZE] = {0};
+	unsigned char dst[2 * AES_BLOCK_SIZE] = {0};
+	unsigned char k1[AES_BLOCK_SIZE] = {0};
+	unsigned char k2[AES_BLOCK_SIZE] = {0};
+
+	if (sess->auth.ut.aes_type == CCP_AES_TYPE_128)
+		algo =  EVP_aes_128_cbc();
+	else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192)
+		algo =  EVP_aes_192_cbc();
+	else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256)
+		algo =  EVP_aes_256_cbc();
+	else {
+		CCP_LOG_ERR("Invalid CMAC type length");
+		return -1;
+	}
+
+	ctx = EVP_CIPHER_CTX_new();
+	if (!ctx) {
+		CCP_LOG_ERR("ctx creation failed");
+		return -1;
+	}
+	if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key,
+			    (unsigned char *)zero_iv) <= 0)
+		goto key_generate_err;
+	if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
+		goto key_generate_err;
+	if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv,
+			      AES_BLOCK_SIZE) <= 0)
+		goto key_generate_err;
+	if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
+		goto key_generate_err;
+
+	memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2);
+
+	ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1);
+	prepare_key(k1, dst, AES_BLOCK_SIZE);
+	for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
+		*ccp_ctx = k1[i];
+
+	ccp_ctx = (unsigned char *)(sess->auth.pre_compute +
+				   (2 * CCP_SB_BYTES) - 1);
+	prepare_key(k2, k1, AES_BLOCK_SIZE);
+	for (i = 0; i < AES_BLOCK_SIZE;  i++, ccp_ctx--)
+		*ccp_ctx = k2[i];
+
+	EVP_CIPHER_CTX_free(ctx);
+
+	return 0;
+
+key_generate_err:
+	CCP_LOG_ERR("CMAC Init failed");
+		return -1;
+}
+
 /* configure session */
 static int
 ccp_configure_session_cipher(struct ccp_session *sess,
@@ -144,6 +224,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   const struct rte_crypto_sym_xform *xform)
 {
 	const struct rte_crypto_auth_xform *auth_xform = NULL;
+	size_t i;
 
 	auth_xform = &xform->auth;
 
@@ -153,6 +234,33 @@ ccp_configure_session_auth(struct ccp_session *sess,
 	else
 		sess->auth.op = CCP_AUTH_OP_VERIFY;
 	switch (auth_xform->algo) {
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
+		sess->auth.engine = CCP_ENGINE_AES;
+		sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
+		sess->auth.key_length = auth_xform->key.length;
+		/**<padding and hash result*/
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = AES_BLOCK_SIZE;
+		sess->auth.block_size = AES_BLOCK_SIZE;
+		if (sess->auth.key_length == 16)
+			sess->auth.ut.aes_type = CCP_AES_TYPE_128;
+		else if (sess->auth.key_length == 24)
+			sess->auth.ut.aes_type = CCP_AES_TYPE_192;
+		else if (sess->auth.key_length == 32)
+			sess->auth.ut.aes_type = CCP_AES_TYPE_256;
+		else {
+			CCP_LOG_ERR("Invalid CMAC key length");
+			return -1;
+		}
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   sess->auth.key_length);
+		for (i = 0; i < sess->auth.key_length; i++)
+			sess->auth.key_ccp[sess->auth.key_length - i - 1] =
+				sess->auth.key[i];
+		if (generate_cmac_subkeys(sess))
+			return -1;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported hash algo");
 		return -ENOTSUP;
@@ -290,6 +398,15 @@ ccp_auth_slot(struct ccp_session *session)
 	int count = 0;
 
 	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_AES_CMAC:
+		count = 4;
+		/**
+		 * op
+		 * extra descriptor in padding case
+		 * (k1/k2(255:128) with iv(127:0))
+		 * Retrieve result
+		 */
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported auth algo %d",
 			    session->auth.algo);
@@ -389,6 +506,158 @@ ccp_perform_passthru(struct ccp_passthru *pst,
 }
 
 static int
+ccp_perform_aes_cmac(struct rte_crypto_op *op,
+		     struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_passthru pst;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	uint8_t *src_tb, *append_ptr, *ctx_addr;
+	phys_addr_t src_addr, dest_addr, key_addr;
+	int length, non_align_len;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+	key_addr = rte_mem_virt2phy(session->auth.key_ccp);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
+	CCP_AES_MODE(&function) = session->auth.um.aes_mode;
+	CCP_AES_TYPE(&function) = session->auth.ut.aes_type;
+
+	if (op->sym->auth.data.length % session->auth.block_size == 0) {
+
+		ctx_addr = session->auth.pre_compute;
+		memset(ctx_addr, 0, AES_BLOCK_SIZE);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		desc = &cmd_q->qbase_desc[cmd_q->qidx];
+		memset(desc, 0, Q_DESC_SIZE);
+
+		/* prepare desc for aes-cmac command */
+		CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+		CCP_CMD_EOM(desc) = 1;
+		CCP_CMD_FUNCTION(desc) = function.raw;
+
+		CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+		CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+		CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+		CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+		CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+		cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+		rte_wmb();
+
+		tail =
+		(uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol | CMD_Q_RUN);
+	} else {
+		ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
+		memset(ctx_addr, 0, AES_BLOCK_SIZE);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 1;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		length = (op->sym->auth.data.length / AES_BLOCK_SIZE);
+		length *= AES_BLOCK_SIZE;
+		non_align_len = op->sym->auth.data.length - length;
+		/* prepare desc for aes-cmac command */
+		/*Command 1*/
+		desc = &cmd_q->qbase_desc[cmd_q->qidx];
+		memset(desc, 0, Q_DESC_SIZE);
+
+		CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+		CCP_CMD_INIT(desc) = 1;
+		CCP_CMD_FUNCTION(desc) = function.raw;
+
+		CCP_CMD_LEN(desc) = length;
+		CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+		CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+		CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+		CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+		cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+		/*Command 2*/
+		append_ptr = append_ptr + CCP_SB_BYTES;
+		memset(append_ptr, 0, AES_BLOCK_SIZE);
+		src_tb = rte_pktmbuf_mtod_offset(op->sym->m_src,
+						 uint8_t *,
+						 op->sym->auth.data.offset +
+						 length);
+		rte_memcpy(append_ptr, src_tb, non_align_len);
+		append_ptr[non_align_len] = CMAC_PAD_VALUE;
+
+		desc = &cmd_q->qbase_desc[cmd_q->qidx];
+		memset(desc, 0, Q_DESC_SIZE);
+
+		CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+		CCP_CMD_EOM(desc) = 1;
+		CCP_CMD_FUNCTION(desc) = function.raw;
+		CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+		CCP_CMD_SRC_LO(desc) = ((uint32_t)(dest_addr + CCP_SB_BYTES));
+		CCP_CMD_SRC_HI(desc) = high32_value(dest_addr + CCP_SB_BYTES);
+		CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+		CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+		CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+		CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+		CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+		cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+		rte_wmb();
+		tail =
+		(uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+		CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+			      cmd_q->qcontrol | CMD_Q_RUN);
+	}
+	/* Retrieve result */
+	pst.dest_addr = dest_addr;
+	pst.src_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+	pst.len = CCP_SB_BYTES;
+	pst.dir = 0;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
 ccp_perform_aes(struct rte_crypto_op *op,
 		struct ccp_queue *cmd_q,
 		struct ccp_batch_info *b_info)
@@ -617,8 +886,8 @@ ccp_crypto_cipher(struct rte_crypto_op *op,
 
 static inline int
 ccp_crypto_auth(struct rte_crypto_op *op,
-		struct ccp_queue *cmd_q __rte_unused,
-		struct ccp_batch_info *b_info __rte_unused)
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
 {
 
 	int result = 0;
@@ -629,6 +898,10 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 					ccp_cryptodev_driver_id);
 
 	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_AES_CMAC:
+		result = ccp_perform_aes_cmac(op, cmd_q);
+		b_info->desccnt += 4;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported auth algo %d",
 			    session->auth.algo);
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index a3b0d5c..ab526f3 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -13,6 +13,26 @@
 #include "ccp_crypto.h"
 
 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+	{	/*AES-CMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				 .block_size = 16,
+				 .key_size = {
+					 .min = 16,
+					 .max = 32,
+					 .increment = 8
+				 },
+				 .digest_size = {
+					 .min = 16,
+					 .max = 16,
+					 .increment = 0
+				 },
+			}, }
+		}, }
+	},
 	{       /* AES ECB */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v5 13/19] crypto/ccp: support aes-gcm aead algo
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
                           ` (10 preceding siblings ...)
  2018-03-19 12:23         ` [PATCH v5 12/19] crypto/ccp: support aes-cmac auth algo Ravi Kumar
@ 2018-03-19 12:23         ` Ravi Kumar
  2018-03-19 12:23         ` [PATCH v5 14/19] crypto/ccp: support sha1 authentication algo Ravi Kumar
                           ` (7 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-19 12:23 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 235 ++++++++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_pmd_ops.c |  30 +++++
 2 files changed, 261 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 6d8a0d6..3a14b77 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -273,6 +273,7 @@ ccp_configure_session_aead(struct ccp_session *sess,
 			   const struct rte_crypto_sym_xform *xform)
 {
 	const struct rte_crypto_aead_xform *aead_xform = NULL;
+	size_t i;
 
 	aead_xform = &xform->aead;
 
@@ -287,6 +288,7 @@ ccp_configure_session_aead(struct ccp_session *sess,
 		sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
 		sess->auth.op = CCP_AUTH_OP_VERIFY;
 	}
+	sess->aead_algo = aead_xform->algo;
 	sess->auth.aad_length = aead_xform->aad_length;
 	sess->auth.digest_length = aead_xform->digest_length;
 
@@ -295,10 +297,37 @@ ccp_configure_session_aead(struct ccp_session *sess,
 	sess->iv.length = aead_xform->iv.length;
 
 	switch (aead_xform->algo) {
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		sess->cipher.algo = CCP_CIPHER_ALGO_AES_GCM;
+		sess->cipher.um.aes_mode = CCP_AES_MODE_GCTR;
+		sess->cipher.engine = CCP_ENGINE_AES;
+		if (sess->cipher.key_length == 16)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+		else if (sess->cipher.key_length == 24)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+		else if (sess->cipher.key_length == 32)
+			sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+		else {
+			CCP_LOG_ERR("Invalid aead key length");
+			return -1;
+		}
+		for (i = 0; i < sess->cipher.key_length; i++)
+			sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+				sess->cipher.key[i];
+		sess->auth.algo = CCP_AUTH_ALGO_AES_GCM;
+		sess->auth.engine = CCP_ENGINE_AES;
+		sess->auth.um.aes_mode = CCP_AES_MODE_GHASH;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = 0;
+		sess->auth.block_size = AES_BLOCK_SIZE;
+		sess->cmd_id = CCP_CMD_COMBINED;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported aead algo");
 		return -ENOTSUP;
 	}
+	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
 	return 0;
 }
 
@@ -421,10 +450,27 @@ ccp_aead_slot(struct ccp_session *session)
 	int count = 0;
 
 	switch (session->aead_algo) {
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported aead algo %d",
 			    session->aead_algo);
 	}
+	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_AES_GCM:
+		count = 5;
+		/**
+		 * 1. Passthru iv
+		 * 2. Hash AAD
+		 * 3. GCTR
+		 * 4. Reload passthru
+		 * 5. Hash Final tag
+		 */
+		break;
+	default:
+		CCP_LOG_ERR("Unsupported combined auth ALGO %d",
+			    session->auth.algo);
+	}
 	return count;
 }
 
@@ -847,6 +893,179 @@ ccp_perform_3des(struct rte_crypto_op *op,
 	return 0;
 }
 
+static int
+ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	uint8_t *iv;
+	struct ccp_passthru pst;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	uint64_t *temp;
+	phys_addr_t src_addr, dest_addr, key_addr, aad_addr;
+	phys_addr_t digest_dest_addr;
+	int length, non_align_len;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 ccp_cryptodev_driver_id);
+	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+	key_addr = session->cipher.key_phys;
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->aead.data.offset);
+	if (unlikely(op->sym->m_dst != NULL))
+		dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+						op->sym->aead.data.offset);
+	else
+		dest_addr = src_addr;
+	rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len);
+	digest_dest_addr = op->sym->aead.digest.phys_addr;
+	temp = (uint64_t *)(op->sym->aead.digest.data + AES_BLOCK_SIZE);
+	*temp++ = rte_bswap64(session->auth.aad_length << 3);
+	*temp = rte_bswap64(op->sym->aead.data.length << 3);
+
+	non_align_len = op->sym->aead.data.length % AES_BLOCK_SIZE;
+	length = CCP_ALIGN(op->sym->aead.data.length, AES_BLOCK_SIZE);
+
+	aad_addr = op->sym->aead.aad.phys_addr;
+
+	/* CMD1 IV Passthru */
+	rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE, iv,
+		   session->iv.length);
+	pst.src_addr = session->cipher.nonce_phys;
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+	pst.len = CCP_SB_BYTES;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/* CMD2 GHASH-AAD */
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_AAD;
+	CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = session->auth.aad_length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)aad_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(aad_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* CMD3 : GCTR Plain text */
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+	CCP_AES_MODE(&function) = CCP_AES_MODE_GCTR;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+	if (non_align_len == 0)
+		CCP_AES_SIZE(&function) = (AES_BLOCK_SIZE << 3) - 1;
+	else
+		CCP_AES_SIZE(&function) = (non_align_len << 3) - 1;
+
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* CMD4 : PT to copy IV */
+	pst.src_addr = session->cipher.nonce_phys;
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+	pst.len = AES_BLOCK_SIZE;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/* CMD5 : GHASH-Final */
+	function.raw = 0;
+	CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_FINAL;
+	CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+	CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+	/* Last block (AAD_len || PT_len)*/
+	CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)digest_dest_addr + AES_BLOCK_SIZE);
+	CCP_CMD_SRC_HI(desc) = high32_value(digest_dest_addr + AES_BLOCK_SIZE);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)digest_dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(digest_dest_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+	CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
 static inline int
 ccp_crypto_cipher(struct rte_crypto_op *op,
 		  struct ccp_queue *cmd_q,
@@ -913,17 +1132,25 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 
 static inline int
 ccp_crypto_aead(struct rte_crypto_op *op,
-		struct ccp_queue *cmd_q __rte_unused,
-		struct ccp_batch_info *b_info __rte_unused)
+		struct ccp_queue *cmd_q,
+		struct ccp_batch_info *b_info)
 {
 	int result = 0;
 	struct ccp_session *session;
 
 	session = (struct ccp_session *)get_session_private_data(
-					 op->sym->session,
+					op->sym->session,
 					ccp_cryptodev_driver_id);
 
-	switch (session->aead_algo) {
+	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_AES_GCM:
+		if (session->cipher.algo != CCP_CIPHER_ALGO_AES_GCM) {
+			CCP_LOG_ERR("Incorrect chain order");
+			return -1;
+		}
+		result = ccp_perform_aes_gcm(op, cmd_q);
+		b_info->desccnt += 5;
+		break;
 	default:
 		CCP_LOG_ERR("Unsupported aead algo %d",
 			    session->aead_algo);
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index ab526f3..1314c17 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -113,6 +113,36 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			}, }
 		}, }
 	},
+	{       /* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				 .algo = RTE_CRYPTO_AEAD_AES_GCM,
+				 .block_size = 16,
+				 .key_size = {
+					 .min = 16,
+					 .max = 32,
+					 .increment = 8
+				 },
+				 .digest_size = {
+					 .min = 16,
+					 .max = 16,
+					 .increment = 0
+				 },
+				 .aad_size = {
+					 .min = 0,
+					 .max = 65535,
+					 .increment = 1
+				 },
+				 .iv_size = {
+					 .min = 12,
+					 .max = 16,
+					 .increment = 4
+				 },
+			}, }
+		}, }
+	},
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v5 14/19] crypto/ccp: support sha1 authentication algo
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
                           ` (11 preceding siblings ...)
  2018-03-19 12:23         ` [PATCH v5 13/19] crypto/ccp: support aes-gcm aead algo Ravi Kumar
@ 2018-03-19 12:23         ` Ravi Kumar
  2018-03-19 12:23         ` [PATCH v5 15/19] crypto/ccp: support sha2 family " Ravi Kumar
                           ` (6 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-19 12:23 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 367 +++++++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_crypto.h  |  23 +++
 drivers/crypto/ccp/ccp_pmd_ops.c |  42 +++++
 3 files changed, 432 insertions(+)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 3a14b77..517c284 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -10,6 +10,7 @@
 #include <sys/queue.h>
 #include <sys/types.h>
 #include <unistd.h>
+#include <openssl/sha.h>
 #include <openssl/cmac.h> /*sub key apis*/
 #include <openssl/evp.h> /*sub key apis*/
 
@@ -26,6 +27,14 @@
 #include "ccp_pci.h"
 #include "ccp_pmd_private.h"
 
+/* SHA initial context values */
+static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
+	SHA1_H4, SHA1_H3,
+	SHA1_H2, SHA1_H1,
+	SHA1_H0, 0x0U,
+	0x0U, 0x0U,
+};
+
 static enum ccp_cmd_order
 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 {
@@ -50,6 +59,59 @@ ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 	return res;
 }
 
+/* partial hash using openssl */
+static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA_CTX ctx;
+
+	if (!SHA1_Init(&ctx))
+		return -EFAULT;
+	SHA1_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
+	return 0;
+}
+
+static int generate_partial_hash(struct ccp_session *sess)
+{
+
+	uint8_t ipad[sess->auth.block_size];
+	uint8_t	opad[sess->auth.block_size];
+	uint8_t *ipad_t, *opad_t;
+	uint32_t *hash_value_be32, hash_temp32[8];
+	int i, count;
+
+	opad_t = ipad_t = (uint8_t *)sess->auth.key;
+
+	hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
+
+	/* considering key size is always equal to block size of algorithm */
+	for (i = 0; i < sess->auth.block_size; i++) {
+		ipad[i] = (ipad_t[i] ^ HMAC_IPAD_VALUE);
+		opad[i] = (opad_t[i] ^ HMAC_OPAD_VALUE);
+	}
+
+	switch (sess->auth.algo) {
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+		count = SHA1_DIGEST_SIZE >> 2;
+
+		if (partial_hash_sha1(ipad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+
+		hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha1(opad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+		return 0;
+	default:
+		CCP_LOG_ERR("Invalid auth algo");
+		return -1;
+	}
+}
+
 /* prepare temporary keys K1 and K2 */
 static void prepare_key(unsigned char *k, unsigned char *l, int bl)
 {
@@ -234,6 +296,31 @@ ccp_configure_session_auth(struct ccp_session *sess,
 	else
 		sess->auth.op = CCP_AUTH_OP_VERIFY;
 	switch (auth_xform->algo) {
+	case RTE_CRYPTO_AUTH_SHA1:
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA1;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+		sess->auth.ctx = (void *)ccp_sha1_init;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+			return -1;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		sess->auth.block_size = SHA1_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_AES_CMAC:
 		sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
 		sess->auth.engine = CCP_ENGINE_AES;
@@ -427,6 +514,13 @@ ccp_auth_slot(struct ccp_session *session)
 	int count = 0;
 
 	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_SHA1:
+		count = 3;
+		/**< op + lsb passthrough cpy to/from*/
+		break;
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+		count = 6;
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		count = 4;
 		/**
@@ -552,6 +646,271 @@ ccp_perform_passthru(struct ccp_passthru *pst,
 }
 
 static int
+ccp_perform_hmac(struct rte_crypto_op *op,
+		 struct ccp_queue *cmd_q)
+{
+
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr, dest_addr_t;
+	struct ccp_passthru pst;
+	uint64_t auth_msg_bits;
+	void *append_ptr;
+	uint8_t *addr;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					 ccp_cryptodev_driver_id);
+	addr = session->auth.pre_compute;
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+	dest_addr_t = dest_addr;
+
+	/** Load PHash1 to LSB*/
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.len = session->auth.ctx_len;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/**sha engine command descriptor for IntermediateHash*/
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+	auth_msg_bits = (op->sym->auth.data.length +
+			 session->auth.block_size)  * 8;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+	CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+	CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Intermediate Hash value retrieve */
+	if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA_TYPE_512)) {
+
+		pst.src_addr =
+			(phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+	} else {
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = session->auth.ctx_len;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+	}
+
+	/** Load PHash2 to LSB*/
+	addr += session->auth.ctx_len;
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.len = session->auth.ctx_len;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/**sha engine command descriptor for FinalHash*/
+	dest_addr_t += session->auth.offset;
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = (session->auth.ctx_len -
+			     session->auth.offset);
+	auth_msg_bits = (session->auth.block_size +
+			 session->auth.ctx_len -
+			 session->auth.offset) * 8;
+
+	CCP_CMD_SRC_LO(desc) = (uint32_t)(dest_addr_t);
+	CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+	CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+	CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Retrieve hmac output */
+	pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.dest_addr = dest_addr;
+	pst.len = session->auth.ctx_len;
+	pst.dir = 0;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	else
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+
+}
+
+static int
+ccp_perform_sha(struct rte_crypto_op *op,
+		struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr;
+	struct ccp_passthru pst;
+	void *append_ptr;
+	uint64_t auth_msg_bits;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+
+	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+
+	/** Passthru sha context*/
+
+	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
+						     session->auth.ctx);
+	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.len = session->auth.ctx_len;
+	pst.dir = 1;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	/**prepare sha command descriptor*/
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+	CCP_CMD_SOC(desc) = 0;
+	CCP_CMD_IOC(desc) = 0;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+	CCP_CMD_PROT(desc) = 0;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+	auth_msg_bits = op->sym->auth.data.length * 8;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+	CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+	CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Hash value retrieve */
+	pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+	pst.dest_addr = dest_addr;
+	pst.len = session->auth.ctx_len;
+	pst.dir = 0;
+	pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+	if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+	else
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+	ccp_perform_passthru(&pst, cmd_q);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+
+}
+
+static int
 ccp_perform_aes_cmac(struct rte_crypto_op *op,
 		     struct ccp_queue *cmd_q)
 {
@@ -1117,6 +1476,14 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 					ccp_cryptodev_driver_id);
 
 	switch (session->auth.algo) {
+	case CCP_AUTH_ALGO_SHA1:
+		result = ccp_perform_sha(op, cmd_q);
+		b_info->desccnt += 3;
+		break;
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+		result = ccp_perform_hmac(op, cmd_q);
+		b_info->desccnt += 6;
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		result = ccp_perform_aes_cmac(op, cmd_q);
 		b_info->desccnt += 4;
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 3585c36..aa4787a 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -34,9 +34,32 @@
 #define	CCP_DES_ENCRYPT(p)	((p)->des.encrypt)
 #define	CCP_DES_MODE(p)		((p)->des.mode)
 #define	CCP_DES_TYPE(p)		((p)->des.type)
+#define	CCP_SHA_TYPE(p)		((p)->sha.type)
 #define	CCP_PT_BYTESWAP(p)	((p)->pt.byteswap)
 #define	CCP_PT_BITWISE(p)	((p)->pt.bitwise)
 
+/* HMAC */
+#define HMAC_IPAD_VALUE 0x36
+#define HMAC_OPAD_VALUE 0x5c
+
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+#define MD5_DIGEST_SIZE         16
+#define MD5_BLOCK_SIZE          64
+#endif
+
+/* SHA */
+#define SHA_COMMON_DIGEST_SIZE	32
+#define SHA1_DIGEST_SIZE        20
+#define SHA1_BLOCK_SIZE         64
+
+/* SHA LSB intialiazation values */
+
+#define SHA1_H0		0x67452301UL
+#define SHA1_H1		0xefcdab89UL
+#define SHA1_H2		0x98badcfeUL
+#define SHA1_H3		0x10325476UL
+#define SHA1_H4		0xc3d2e1f0UL
+
 /**
  * CCP supported AES modes
  */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 1314c17..13f6820 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -13,6 +13,48 @@
 #include "ccp_crypto.h"
 
 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+	{	/* SHA1 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA1,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 20,
+					 .max = 20,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 20,
+					 .max = 20,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/*AES-CMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v5 15/19] crypto/ccp: support sha2 family authentication algo
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
                           ` (12 preceding siblings ...)
  2018-03-19 12:23         ` [PATCH v5 14/19] crypto/ccp: support sha1 authentication algo Ravi Kumar
@ 2018-03-19 12:23         ` Ravi Kumar
  2018-03-19 12:23         ` [PATCH v5 16/19] crypto/ccp: support sha3 " Ravi Kumar
                           ` (5 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-19 12:23 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 270 +++++++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp_crypto.h  |  48 +++++++
 drivers/crypto/ccp/ccp_pmd_ops.c | 168 ++++++++++++++++++++++++
 3 files changed, 486 insertions(+)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 517c284..8b26ad3 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -35,6 +35,34 @@ static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
 	0x0U, 0x0U,
 };
 
+uint32_t ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+	SHA224_H7, SHA224_H6,
+	SHA224_H5, SHA224_H4,
+	SHA224_H3, SHA224_H2,
+	SHA224_H1, SHA224_H0,
+};
+
+uint32_t ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+	SHA256_H7, SHA256_H6,
+	SHA256_H5, SHA256_H4,
+	SHA256_H3, SHA256_H2,
+	SHA256_H1, SHA256_H0,
+};
+
+uint64_t ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+	SHA384_H7, SHA384_H6,
+	SHA384_H5, SHA384_H4,
+	SHA384_H3, SHA384_H2,
+	SHA384_H1, SHA384_H0,
+};
+
+uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+	SHA512_H7, SHA512_H6,
+	SHA512_H5, SHA512_H4,
+	SHA512_H3, SHA512_H2,
+	SHA512_H1, SHA512_H0,
+};
+
 static enum ccp_cmd_order
 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 {
@@ -71,6 +99,54 @@ static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
+static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA256_CTX ctx;
+
+	if (!SHA224_Init(&ctx))
+		return -EFAULT;
+	SHA256_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA256_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA256_CTX ctx;
+
+	if (!SHA256_Init(&ctx))
+		return -EFAULT;
+	SHA256_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA256_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA512_CTX ctx;
+
+	if (!SHA384_Init(&ctx))
+		return -EFAULT;
+	SHA512_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA512_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA512_CTX ctx;
+
+	if (!SHA512_Init(&ctx))
+		return -EFAULT;
+	SHA512_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx,
+		   SHA512_DIGEST_LENGTH);
+	return 0;
+}
+
 static int generate_partial_hash(struct ccp_session *sess)
 {
 
@@ -78,11 +154,13 @@ static int generate_partial_hash(struct ccp_session *sess)
 	uint8_t	opad[sess->auth.block_size];
 	uint8_t *ipad_t, *opad_t;
 	uint32_t *hash_value_be32, hash_temp32[8];
+	uint64_t *hash_value_be64, hash_temp64[8];
 	int i, count;
 
 	opad_t = ipad_t = (uint8_t *)sess->auth.key;
 
 	hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
+	hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute);
 
 	/* considering key size is always equal to block size of algorithm */
 	for (i = 0; i < sess->auth.block_size; i++) {
@@ -106,6 +184,66 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be32++)
 			*hash_value_be32 = hash_temp32[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+		count = SHA256_DIGEST_SIZE >> 2;
+
+		if (partial_hash_sha224(ipad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+
+		hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha224(opad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+		return 0;
+	case CCP_AUTH_ALGO_SHA256_HMAC:
+		count = SHA256_DIGEST_SIZE >> 2;
+
+		if (partial_hash_sha256(ipad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+
+		hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha256(opad, (uint8_t *)hash_temp32))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be32++)
+			*hash_value_be32 = hash_temp32[count - 1 - i];
+		return 0;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+		count = SHA512_DIGEST_SIZE >> 3;
+
+		if (partial_hash_sha384(ipad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+
+		hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha384(opad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+		return 0;
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		count = SHA512_DIGEST_SIZE >> 3;
+
+		if (partial_hash_sha512(ipad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+
+		hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha512(opad, (uint8_t *)hash_temp64))
+			return -1;
+		for (i = 0; i < count; i++, hash_value_be64++)
+			*hash_value_be64 = hash_temp64[count - 1 - i];
+		return 0;
 	default:
 		CCP_LOG_ERR("Invalid auth algo");
 		return -1;
@@ -321,6 +459,107 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
+	case RTE_CRYPTO_AUTH_SHA224:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA224;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+		sess->auth.ctx = (void *)ccp_sha224_init;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		sess->auth.block_size = SHA224_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA256;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+		sess->auth.ctx = (void *)ccp_sha256_init;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+		sess->auth.ctx_len = CCP_SB_BYTES;
+		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		sess->auth.block_size = SHA256_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA384;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+		sess->auth.ctx = (void *)ccp_sha384_init;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		sess->auth.block_size = SHA384_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA512;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+		sess->auth.ctx = (void *)ccp_sha512_init;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+		sess->auth.ctx_len = CCP_SB_BYTES << 1;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		sess->auth.block_size = SHA512_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
+
 	case RTE_CRYPTO_AUTH_AES_CMAC:
 		sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
 		sess->auth.engine = CCP_ENGINE_AES;
@@ -515,12 +754,32 @@ ccp_auth_slot(struct ccp_session *session)
 
 	switch (session->auth.algo) {
 	case CCP_AUTH_ALGO_SHA1:
+	case CCP_AUTH_ALGO_SHA224:
+	case CCP_AUTH_ALGO_SHA256:
+	case CCP_AUTH_ALGO_SHA384:
+	case CCP_AUTH_ALGO_SHA512:
 		count = 3;
 		/**< op + lsb passthrough cpy to/from*/
 		break;
 	case CCP_AUTH_ALGO_SHA1_HMAC:
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+	case CCP_AUTH_ALGO_SHA256_HMAC:
 		count = 6;
 		break;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		count = 7;
+		/**
+		 * 1. Load PHash1 = H(k ^ ipad); to LSB
+		 * 2. generate IHash = H(hash on meassage with PHash1
+		 * as init values);
+		 * 3. Retrieve IHash 2 slots for 384/512
+		 * 4. Load Phash2 = H(k ^ opad); to LSB
+		 * 5. generate FHash = H(hash on Ihash with Phash2
+		 * as init value);
+		 * 6. Retrieve HMAC output from LSB to host memory
+		 */
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		count = 4;
 		/**
@@ -1477,13 +1736,24 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 
 	switch (session->auth.algo) {
 	case CCP_AUTH_ALGO_SHA1:
+	case CCP_AUTH_ALGO_SHA224:
+	case CCP_AUTH_ALGO_SHA256:
+	case CCP_AUTH_ALGO_SHA384:
+	case CCP_AUTH_ALGO_SHA512:
 		result = ccp_perform_sha(op, cmd_q);
 		b_info->desccnt += 3;
 		break;
 	case CCP_AUTH_ALGO_SHA1_HMAC:
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+	case CCP_AUTH_ALGO_SHA256_HMAC:
 		result = ccp_perform_hmac(op, cmd_q);
 		b_info->desccnt += 6;
 		break;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		result = ccp_perform_hmac(op, cmd_q);
+		b_info->desccnt += 7;
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		result = ccp_perform_aes_cmac(op, cmd_q);
 		b_info->desccnt += 4;
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index aa4787a..e4b6445 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -52,6 +52,18 @@
 #define SHA1_DIGEST_SIZE        20
 #define SHA1_BLOCK_SIZE         64
 
+#define SHA224_DIGEST_SIZE      28
+#define SHA224_BLOCK_SIZE       64
+
+#define SHA256_DIGEST_SIZE      32
+#define SHA256_BLOCK_SIZE       64
+
+#define SHA384_DIGEST_SIZE      48
+#define SHA384_BLOCK_SIZE       128
+
+#define SHA512_DIGEST_SIZE      64
+#define SHA512_BLOCK_SIZE       128
+
 /* SHA LSB intialiazation values */
 
 #define SHA1_H0		0x67452301UL
@@ -60,6 +72,42 @@
 #define SHA1_H3		0x10325476UL
 #define SHA1_H4		0xc3d2e1f0UL
 
+#define SHA224_H0	0xc1059ed8UL
+#define SHA224_H1	0x367cd507UL
+#define SHA224_H2	0x3070dd17UL
+#define SHA224_H3	0xf70e5939UL
+#define SHA224_H4	0xffc00b31UL
+#define SHA224_H5	0x68581511UL
+#define SHA224_H6	0x64f98fa7UL
+#define SHA224_H7	0xbefa4fa4UL
+
+#define SHA256_H0	0x6a09e667UL
+#define SHA256_H1	0xbb67ae85UL
+#define SHA256_H2	0x3c6ef372UL
+#define SHA256_H3	0xa54ff53aUL
+#define SHA256_H4	0x510e527fUL
+#define SHA256_H5	0x9b05688cUL
+#define SHA256_H6	0x1f83d9abUL
+#define SHA256_H7	0x5be0cd19UL
+
+#define SHA384_H0	0xcbbb9d5dc1059ed8ULL
+#define SHA384_H1	0x629a292a367cd507ULL
+#define SHA384_H2	0x9159015a3070dd17ULL
+#define SHA384_H3	0x152fecd8f70e5939ULL
+#define SHA384_H4	0x67332667ffc00b31ULL
+#define SHA384_H5	0x8eb44a8768581511ULL
+#define SHA384_H6	0xdb0c2e0d64f98fa7ULL
+#define SHA384_H7	0x47b5481dbefa4fa4ULL
+
+#define SHA512_H0	0x6a09e667f3bcc908ULL
+#define SHA512_H1	0xbb67ae8584caa73bULL
+#define SHA512_H2	0x3c6ef372fe94f82bULL
+#define SHA512_H3	0xa54ff53a5f1d36f1ULL
+#define SHA512_H4	0x510e527fade682d1ULL
+#define SHA512_H5	0x9b05688c2b3e6c1fULL
+#define SHA512_H6	0x1f83d9abfb41bd6bULL
+#define SHA512_H7	0x5be0cd19137e2179ULL
+
 /**
  * CCP supported AES modes
  */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 13f6820..c5fd3ed 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -55,6 +55,174 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA224 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA224,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA224 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA256 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA256,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA384 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA384,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 1,
+					 .max = 128,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA512  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA512,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				 .block_size = 128,
+				 .key_size = {
+					 .min = 1,
+					 .max = 128,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/*AES-CMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v5 16/19] crypto/ccp: support sha3 family authentication algo
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
                           ` (13 preceding siblings ...)
  2018-03-19 12:23         ` [PATCH v5 15/19] crypto/ccp: support sha2 family " Ravi Kumar
@ 2018-03-19 12:23         ` Ravi Kumar
  2018-03-19 12:23         ` [PATCH v5 17/19] crypto/ccp: support cpu based md5 and sha2 " Ravi Kumar
                           ` (4 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-19 12:23 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c       | 667 +++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_crypto.h       |  22 ++
 drivers/crypto/ccp/ccp_pmd_ops.c      | 168 +++++++++
 lib/librte_cryptodev/rte_crypto_sym.h |  17 +
 4 files changed, 873 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 8b26ad3..53e731b 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -63,6 +63,74 @@ uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
 	SHA512_H1, SHA512_H0,
 };
 
+#if defined(_MSC_VER)
+#define SHA3_CONST(x) x
+#else
+#define SHA3_CONST(x) x##L
+#endif
+
+/** 'Words' here refers to uint64_t */
+#define SHA3_KECCAK_SPONGE_WORDS \
+	(((1600) / 8) / sizeof(uint64_t))
+typedef struct sha3_context_ {
+	uint64_t saved;
+	/**
+	 * The portion of the input message that we
+	 * didn't consume yet
+	 */
+	union {
+		uint64_t s[SHA3_KECCAK_SPONGE_WORDS];
+		/* Keccak's state */
+		uint8_t sb[SHA3_KECCAK_SPONGE_WORDS * 8];
+		/**total 200 ctx size**/
+	};
+	unsigned int byteIndex;
+	/**
+	 * 0..7--the next byte after the set one
+	 * (starts from 0; 0--none are buffered)
+	 */
+	unsigned int wordIndex;
+	/**
+	 * 0..24--the next word to integrate input
+	 * (starts from 0)
+	 */
+	unsigned int capacityWords;
+	/**
+	 * the double size of the hash output in
+	 * words (e.g. 16 for Keccak 512)
+	 */
+} sha3_context;
+
+#ifndef SHA3_ROTL64
+#define SHA3_ROTL64(x, y) \
+	(((x) << (y)) | ((x) >> ((sizeof(uint64_t)*8) - (y))))
+#endif
+
+static const uint64_t keccakf_rndc[24] = {
+	SHA3_CONST(0x0000000000000001UL), SHA3_CONST(0x0000000000008082UL),
+	SHA3_CONST(0x800000000000808aUL), SHA3_CONST(0x8000000080008000UL),
+	SHA3_CONST(0x000000000000808bUL), SHA3_CONST(0x0000000080000001UL),
+	SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008009UL),
+	SHA3_CONST(0x000000000000008aUL), SHA3_CONST(0x0000000000000088UL),
+	SHA3_CONST(0x0000000080008009UL), SHA3_CONST(0x000000008000000aUL),
+	SHA3_CONST(0x000000008000808bUL), SHA3_CONST(0x800000000000008bUL),
+	SHA3_CONST(0x8000000000008089UL), SHA3_CONST(0x8000000000008003UL),
+	SHA3_CONST(0x8000000000008002UL), SHA3_CONST(0x8000000000000080UL),
+	SHA3_CONST(0x000000000000800aUL), SHA3_CONST(0x800000008000000aUL),
+	SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008080UL),
+	SHA3_CONST(0x0000000080000001UL), SHA3_CONST(0x8000000080008008UL)
+};
+
+static const unsigned int keccakf_rotc[24] = {
+	1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62,
+	18, 39, 61, 20, 44
+};
+
+static const unsigned int keccakf_piln[24] = {
+	10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20,
+	14, 22, 9, 6, 1
+};
+
 static enum ccp_cmd_order
 ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 {
@@ -147,6 +215,223 @@ static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
+static void
+keccakf(uint64_t s[25])
+{
+	int i, j, round;
+	uint64_t t, bc[5];
+#define KECCAK_ROUNDS 24
+
+	for (round = 0; round < KECCAK_ROUNDS; round++) {
+
+		/* Theta */
+		for (i = 0; i < 5; i++)
+			bc[i] = s[i] ^ s[i + 5] ^ s[i + 10] ^ s[i + 15] ^
+				s[i + 20];
+
+		for (i = 0; i < 5; i++) {
+			t = bc[(i + 4) % 5] ^ SHA3_ROTL64(bc[(i + 1) % 5], 1);
+			for (j = 0; j < 25; j += 5)
+				s[j + i] ^= t;
+		}
+
+		/* Rho Pi */
+		t = s[1];
+		for (i = 0; i < 24; i++) {
+			j = keccakf_piln[i];
+			bc[0] = s[j];
+			s[j] = SHA3_ROTL64(t, keccakf_rotc[i]);
+			t = bc[0];
+		}
+
+		/* Chi */
+		for (j = 0; j < 25; j += 5) {
+			for (i = 0; i < 5; i++)
+				bc[i] = s[j + i];
+			for (i = 0; i < 5; i++)
+				s[j + i] ^= (~bc[(i + 1) % 5]) &
+					    bc[(i + 2) % 5];
+		}
+
+		/* Iota */
+		s[0] ^= keccakf_rndc[round];
+	}
+}
+
+static void
+sha3_Init224(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 224 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init256(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 256 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init384(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 384 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init512(void *priv)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->capacityWords = 2 * 512 / (8 * sizeof(uint64_t));
+}
+
+
+/* This is simply the 'update' with the padding block.
+ * The padding block is 0x01 || 0x00* || 0x80. First 0x01 and last 0x80
+ * bytes are always present, but they can be the same byte.
+ */
+static void
+sha3_Update(void *priv, void const *bufIn, size_t len)
+{
+	sha3_context *ctx = (sha3_context *) priv;
+	unsigned int old_tail = (8 - ctx->byteIndex) & 7;
+	size_t words;
+	unsigned int tail;
+	size_t i;
+	const uint8_t *buf = bufIn;
+
+	if (len < old_tail) {
+		while (len--)
+			ctx->saved |= (uint64_t) (*(buf++)) <<
+				      ((ctx->byteIndex++) * 8);
+		return;
+	}
+
+	if (old_tail) {
+		len -= old_tail;
+		while (old_tail--)
+			ctx->saved |= (uint64_t) (*(buf++)) <<
+				      ((ctx->byteIndex++) * 8);
+
+		ctx->s[ctx->wordIndex] ^= ctx->saved;
+		ctx->byteIndex = 0;
+		ctx->saved = 0;
+		if (++ctx->wordIndex ==
+		   (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+			keccakf(ctx->s);
+			ctx->wordIndex = 0;
+		}
+	}
+
+	words = len / sizeof(uint64_t);
+	tail = len - words * sizeof(uint64_t);
+
+	for (i = 0; i < words; i++, buf += sizeof(uint64_t)) {
+		const uint64_t t = (uint64_t) (buf[0]) |
+			((uint64_t) (buf[1]) << 8 * 1) |
+			((uint64_t) (buf[2]) << 8 * 2) |
+			((uint64_t) (buf[3]) << 8 * 3) |
+			((uint64_t) (buf[4]) << 8 * 4) |
+			((uint64_t) (buf[5]) << 8 * 5) |
+			((uint64_t) (buf[6]) << 8 * 6) |
+			((uint64_t) (buf[7]) << 8 * 7);
+		ctx->s[ctx->wordIndex] ^= t;
+		if (++ctx->wordIndex ==
+		   (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+			keccakf(ctx->s);
+			ctx->wordIndex = 0;
+		}
+	}
+
+	while (tail--)
+		ctx->saved |= (uint64_t) (*(buf++)) << ((ctx->byteIndex++) * 8);
+}
+
+int partial_hash_sha3_224(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init224(ctx);
+	sha3_Update(ctx, data_in, SHA3_224_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
+int partial_hash_sha3_256(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init256(ctx);
+	sha3_Update(ctx, data_in, SHA3_256_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
+int partial_hash_sha3_384(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init384(ctx);
+	sha3_Update(ctx, data_in, SHA3_384_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
+int partial_hash_sha3_512(uint8_t *data_in, uint8_t *data_out)
+{
+	sha3_context *ctx;
+	int i;
+
+	ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+	if (!ctx) {
+		CCP_LOG_ERR("sha3-ctx creation failed");
+		return -ENOMEM;
+	}
+	sha3_Init512(ctx);
+	sha3_Update(ctx, data_in, SHA3_512_BLOCK_SIZE);
+	for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+		*data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+	rte_free(ctx);
+
+	return 0;
+}
+
 static int generate_partial_hash(struct ccp_session *sess)
 {
 
@@ -156,6 +441,7 @@ static int generate_partial_hash(struct ccp_session *sess)
 	uint32_t *hash_value_be32, hash_temp32[8];
 	uint64_t *hash_value_be64, hash_temp64[8];
 	int i, count;
+	uint8_t *hash_value_sha3;
 
 	opad_t = ipad_t = (uint8_t *)sess->auth.key;
 
@@ -199,6 +485,16 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be32++)
 			*hash_value_be32 = hash_temp32[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA3_224_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_224(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					       + sess->auth.ctx_len);
+		if (partial_hash_sha3_224(opad, hash_value_sha3))
+			return -1;
+		return 0;
 	case CCP_AUTH_ALGO_SHA256_HMAC:
 		count = SHA256_DIGEST_SIZE >> 2;
 
@@ -214,6 +510,16 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be32++)
 			*hash_value_be32 = hash_temp32[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA3_256_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_256(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					      + sess->auth.ctx_len);
+		if (partial_hash_sha3_256(opad, hash_value_sha3))
+			return -1;
+		return 0;
 	case CCP_AUTH_ALGO_SHA384_HMAC:
 		count = SHA512_DIGEST_SIZE >> 3;
 
@@ -229,6 +535,16 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be64++)
 			*hash_value_be64 = hash_temp64[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA3_384_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_384(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					      + sess->auth.ctx_len);
+		if (partial_hash_sha3_384(opad, hash_value_sha3))
+			return -1;
+		return 0;
 	case CCP_AUTH_ALGO_SHA512_HMAC:
 		count = SHA512_DIGEST_SIZE >> 3;
 
@@ -244,6 +560,16 @@ static int generate_partial_hash(struct ccp_session *sess)
 		for (i = 0; i < count; i++, hash_value_be64++)
 			*hash_value_be64 = hash_temp64[count - 1 - i];
 		return 0;
+	case CCP_AUTH_ALGO_SHA3_512_HMAC:
+		hash_value_sha3 = sess->auth.pre_compute;
+		if (partial_hash_sha3_512(ipad, hash_value_sha3))
+			return -1;
+
+		hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+					      + sess->auth.ctx_len);
+		if (partial_hash_sha3_512(opad, hash_value_sha3))
+			return -1;
+		return 0;
 	default:
 		CCP_LOG_ERR("Invalid auth algo");
 		return -1;
@@ -484,6 +810,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
+	case RTE_CRYPTO_AUTH_SHA3_224:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_224;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_224_HMAC:
+		if (auth_xform->key.length > SHA3_224_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_224_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_224_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_SHA256:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA256;
 		sess->auth.engine = CCP_ENGINE_SHA;
@@ -509,6 +859,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
+	case RTE_CRYPTO_AUTH_SHA3_256:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_256;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_256_HMAC:
+		if (auth_xform->key.length > SHA3_256_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_256_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_256_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_SHA384:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA384;
 		sess->auth.engine = CCP_ENGINE_SHA;
@@ -534,6 +908,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
+	case RTE_CRYPTO_AUTH_SHA3_384:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_384;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_384_HMAC:
+		if (auth_xform->key.length > SHA3_384_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_384_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_384_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_SHA512:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA512;
 		sess->auth.engine = CCP_ENGINE_SHA;
@@ -559,7 +957,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		if (generate_partial_hash(sess))
 			return -1;
 		break;
-
+	case RTE_CRYPTO_AUTH_SHA3_512:
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_512;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+		break;
+	case RTE_CRYPTO_AUTH_SHA3_512_HMAC:
+		if (auth_xform->key.length > SHA3_512_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA3_512_HMAC;
+		sess->auth.engine = CCP_ENGINE_SHA;
+		sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+		sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+		sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+		sess->auth.block_size = SHA3_512_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		if (generate_partial_hash(sess))
+			return -1;
+		break;
 	case RTE_CRYPTO_AUTH_AES_CMAC:
 		sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
 		sess->auth.engine = CCP_ENGINE_AES;
@@ -780,6 +1201,26 @@ ccp_auth_slot(struct ccp_session *session)
 		 * 6. Retrieve HMAC output from LSB to host memory
 		 */
 		break;
+	case CCP_AUTH_ALGO_SHA3_224:
+	case CCP_AUTH_ALGO_SHA3_256:
+	case CCP_AUTH_ALGO_SHA3_384:
+	case CCP_AUTH_ALGO_SHA3_512:
+		count = 1;
+		/**< only op ctx and dst in host memory*/
+		break;
+	case CCP_AUTH_ALGO_SHA3_224_HMAC:
+	case CCP_AUTH_ALGO_SHA3_256_HMAC:
+		count = 3;
+		break;
+	case CCP_AUTH_ALGO_SHA3_384_HMAC:
+	case CCP_AUTH_ALGO_SHA3_512_HMAC:
+		count = 4;
+		/**
+		 * 1. Op to Perform Ihash
+		 * 2. Retrieve result from LSB to host memory
+		 * 3. Perform final hash
+		 */
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		count = 4;
 		/**
@@ -1170,6 +1611,213 @@ ccp_perform_sha(struct rte_crypto_op *op,
 }
 
 static int
+ccp_perform_sha3_hmac(struct rte_crypto_op *op,
+		      struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	struct ccp_passthru pst;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint8_t *append_ptr;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr, ctx_paddr, dest_addr_t;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	if (!append_ptr) {
+		CCP_LOG_ERR("CCP MBUF append failed\n");
+		return -1;
+	}
+	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
+	ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void
+						   *)session->auth.pre_compute);
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	/*desc1 for SHA3-Ihash operation */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = (cmd_q->sb_sha * CCP_SB_BYTES);
+	CCP_CMD_DST_HI(desc) = 0;
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+	CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	/* Intermediate Hash value retrieve */
+	if ((session->auth.ut.sha_type == CCP_SHA3_TYPE_384) ||
+	    (session->auth.ut.sha_type == CCP_SHA3_TYPE_512)) {
+
+		pst.src_addr =
+			(phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+
+	} else {
+		pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+		pst.dest_addr = dest_addr_t;
+		pst.len = CCP_SB_BYTES;
+		pst.dir = 0;
+		pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ccp_perform_passthru(&pst, cmd_q);
+	}
+
+	/**sha engine command descriptor for FinalHash*/
+	ctx_paddr += CCP_SHA3_CTX_SIZE;
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	if (session->auth.ut.sha_type == CCP_SHA3_TYPE_224) {
+		dest_addr_t += (CCP_SB_BYTES - SHA224_DIGEST_SIZE);
+		CCP_CMD_LEN(desc) = SHA224_DIGEST_SIZE;
+	} else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_256) {
+		CCP_CMD_LEN(desc) = SHA256_DIGEST_SIZE;
+	} else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_384) {
+		dest_addr_t += (2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE);
+		CCP_CMD_LEN(desc) = SHA384_DIGEST_SIZE;
+	} else {
+		CCP_CMD_LEN(desc) = SHA512_DIGEST_SIZE;
+	}
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)dest_addr_t);
+	CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = (uint32_t)dest_addr;
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+	CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
+ccp_perform_sha3(struct rte_crypto_op *op,
+		 struct ccp_queue *cmd_q)
+{
+	struct ccp_session *session;
+	union ccp_function function;
+	struct ccp_desc *desc;
+	uint8_t *ctx_addr, *append_ptr;
+	uint32_t tail;
+	phys_addr_t src_addr, dest_addr, ctx_paddr;
+
+	session = (struct ccp_session *)get_session_private_data(
+					 op->sym->session,
+					ccp_cryptodev_driver_id);
+
+	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					      op->sym->auth.data.offset);
+	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+						session->auth.ctx_len);
+	if (!append_ptr) {
+		CCP_LOG_ERR("CCP MBUF append failed\n");
+		return -1;
+	}
+	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	ctx_addr = session->auth.sha3_ctx;
+	ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+
+	desc = &cmd_q->qbase_desc[cmd_q->qidx];
+	memset(desc, 0, Q_DESC_SIZE);
+
+	/* prepare desc for SHA3 operation */
+	CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+	CCP_CMD_INIT(desc) = 1;
+	CCP_CMD_EOM(desc) = 1;
+
+	function.raw = 0;
+	CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+	CCP_CMD_FUNCTION(desc) = function.raw;
+
+	CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+	CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+	CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+	CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+	CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+	CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+	CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+	CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+	cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+	rte_wmb();
+
+	tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+	CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+		      cmd_q->qcontrol | CMD_Q_RUN);
+
+	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+	return 0;
+}
+
+static int
 ccp_perform_aes_cmac(struct rte_crypto_op *op,
 		     struct ccp_queue *cmd_q)
 {
@@ -1754,6 +2402,23 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 		result = ccp_perform_hmac(op, cmd_q);
 		b_info->desccnt += 7;
 		break;
+	case CCP_AUTH_ALGO_SHA3_224:
+	case CCP_AUTH_ALGO_SHA3_256:
+	case CCP_AUTH_ALGO_SHA3_384:
+	case CCP_AUTH_ALGO_SHA3_512:
+		result = ccp_perform_sha3(op, cmd_q);
+		b_info->desccnt += 1;
+		break;
+	case CCP_AUTH_ALGO_SHA3_224_HMAC:
+	case CCP_AUTH_ALGO_SHA3_256_HMAC:
+		result = ccp_perform_sha3_hmac(op, cmd_q);
+		b_info->desccnt += 3;
+		break;
+	case CCP_AUTH_ALGO_SHA3_384_HMAC:
+	case CCP_AUTH_ALGO_SHA3_512_HMAC:
+		result = ccp_perform_sha3_hmac(op, cmd_q);
+		b_info->desccnt += 4;
+		break;
 	case CCP_AUTH_ALGO_AES_CMAC:
 		result = ccp_perform_aes_cmac(op, cmd_q);
 		b_info->desccnt += 4;
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index e4b6445..23b0486 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -54,15 +54,19 @@
 
 #define SHA224_DIGEST_SIZE      28
 #define SHA224_BLOCK_SIZE       64
+#define SHA3_224_BLOCK_SIZE     144
 
 #define SHA256_DIGEST_SIZE      32
 #define SHA256_BLOCK_SIZE       64
+#define SHA3_256_BLOCK_SIZE     136
 
 #define SHA384_DIGEST_SIZE      48
 #define SHA384_BLOCK_SIZE       128
+#define SHA3_384_BLOCK_SIZE	104
 
 #define SHA512_DIGEST_SIZE      64
 #define SHA512_BLOCK_SIZE       128
+#define SHA3_512_BLOCK_SIZE     72
 
 /* SHA LSB intialiazation values */
 
@@ -360,4 +364,22 @@ int process_ops_to_dequeue(struct ccp_qp *qp,
 			   struct rte_crypto_op **op,
 			   uint16_t nb_ops);
 
+
+/**
+ * Apis for SHA3 partial hash generation
+ * @param data_in buffer pointer on which phash is applied
+ * @param data_out phash result in ccp be format is written
+ */
+int partial_hash_sha3_224(uint8_t *data_in,
+			  uint8_t *data_out);
+
+int partial_hash_sha3_256(uint8_t *data_in,
+			  uint8_t *data_out);
+
+int partial_hash_sha3_384(uint8_t *data_in,
+			  uint8_t *data_out);
+
+int partial_hash_sha3_512(uint8_t *data_in,
+			  uint8_t *data_out);
+
 #endif /* _CCP_CRYPTO_H_ */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index c5fd3ed..24ad961 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -97,6 +97,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA3-224 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_224,
+				 .block_size = 144,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-224  HMAC*/
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+				 .block_size = 144,
+				 .key_size = {
+					 .min = 1,
+					 .max = 144,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 28,
+					 .max = 28,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/* SHA256 */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -139,6 +181,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA3-256 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_256,
+				 .block_size = 136,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-256-HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+				 .block_size = 136,
+				 .key_size = {
+					 .min = 1,
+					 .max = 136,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 32,
+					 .max = 32,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/* SHA384 */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -181,6 +265,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA3-384 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_384,
+				 .block_size = 104,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-384-HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+				 .block_size = 104,
+				 .key_size = {
+					 .min = 1,
+					 .max = 104,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 48,
+					 .max = 48,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
 	{	/* SHA512  */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -223,6 +349,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
 			 }, }
 		}, }
 	},
+	{	/* SHA3-512  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_512,
+				 .block_size = 72,
+				 .key_size = {
+					 .min = 0,
+					 .max = 0,
+					 .increment = 0
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			 }, }
+		}, }
+	},
+	{	/* SHA3-512-HMAC  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+				 .block_size = 72,
+				 .key_size = {
+					 .min = 1,
+					 .max = 72,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 64,
+					 .max = 64,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			}, }
+		}, }
+	},
 	{	/*AES-CMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index 60797e9..eb5afc5 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -245,6 +245,23 @@ enum rte_crypto_auth_algorithm {
 	RTE_CRYPTO_AUTH_ZUC_EIA3,
 	/**< ZUC algorithm in EIA3 mode */
 
+	RTE_CRYPTO_AUTH_SHA3_224,
+	/**< 224 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+	/**< HMAC using 224 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_256,
+	/**< 256 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+	/**< HMAC using 256 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_384,
+	/**< 384 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+	/**< HMAC using 384 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_512,
+	/**< 512 bit SHA3 algorithm. */
+	RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+	/**< HMAC using 512 bit SHA3 algorithm. */
+
 	RTE_CRYPTO_AUTH_LIST_END
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v5 17/19] crypto/ccp: support cpu based md5 and sha2 family authentication algo
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
                           ` (14 preceding siblings ...)
  2018-03-19 12:23         ` [PATCH v5 16/19] crypto/ccp: support sha3 " Ravi Kumar
@ 2018-03-19 12:23         ` Ravi Kumar
  2018-04-22 20:08           ` Thomas Monjalon
  2018-03-19 12:23         ` [PATCH v5 18/19] test/crypto: add test for AMD CCP crypto poll mode Ravi Kumar
                           ` (3 subsequent siblings)
  19 siblings, 1 reply; 131+ messages in thread
From: Ravi Kumar @ 2018-03-19 12:23 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 config/common_base                   |   1 +
 drivers/crypto/ccp/ccp_crypto.c      | 282 ++++++++++++++++++++++++++++++++++-
 drivers/crypto/ccp/ccp_crypto.h      |   5 +-
 drivers/crypto/ccp/ccp_pmd_ops.c     |  23 +++
 drivers/crypto/ccp/ccp_pmd_private.h |  10 ++
 5 files changed, 316 insertions(+), 5 deletions(-)

diff --git a/config/common_base b/config/common_base
index 28237f0..65e34ae 100644
--- a/config/common_base
+++ b/config/common_base
@@ -532,6 +532,7 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
 # Compile PMD for AMD CCP crypto device
 #
 CONFIG_RTE_LIBRTE_PMD_CCP=n
+CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=n
 
 #
 # Compile PMD for Marvell Crypto device
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 53e731b..a0809e4 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -27,6 +27,12 @@
 #include "ccp_pci.h"
 #include "ccp_pmd_private.h"
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+#include <openssl/conf.h>
+#include <openssl/err.h>
+#include <openssl/hmac.h>
+#endif
+
 /* SHA initial context values */
 static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
 	SHA1_H4, SHA1_H3,
@@ -760,6 +766,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 	else
 		sess->auth.op = CCP_AUTH_OP_VERIFY;
 	switch (auth_xform->algo) {
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		sess->auth.algo = CCP_AUTH_ALGO_MD5_HMAC;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - MD5_DIGEST_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		sess->auth.block_size = MD5_BLOCK_SIZE;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+		break;
+#endif
 	case RTE_CRYPTO_AUTH_SHA1:
 		sess->auth.engine = CCP_ENGINE_SHA;
 		sess->auth.algo = CCP_AUTH_ALGO_SHA1;
@@ -769,6 +786,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
 		break;
 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		sess->auth.block_size = SHA1_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+#else
 		if (auth_xform->key.length > SHA1_BLOCK_SIZE)
 			return -1;
 		sess->auth.engine = CCP_ENGINE_SHA;
@@ -784,6 +812,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   auth_xform->key.length);
 		if (generate_partial_hash(sess))
 			return -1;
+#endif
 		break;
 	case RTE_CRYPTO_AUTH_SHA224:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA224;
@@ -794,6 +823,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
 		break;
 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		sess->auth.block_size = SHA224_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+#else
 		if (auth_xform->key.length > SHA224_BLOCK_SIZE)
 			return -1;
 		sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
@@ -809,6 +849,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   auth_xform->key.length);
 		if (generate_partial_hash(sess))
 			return -1;
+#endif
 		break;
 	case RTE_CRYPTO_AUTH_SHA3_224:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_224;
@@ -843,6 +884,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
 		break;
 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		sess->auth.block_size = SHA256_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+#else
 		if (auth_xform->key.length > SHA256_BLOCK_SIZE)
 			return -1;
 		sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
@@ -858,6 +910,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   auth_xform->key.length);
 		if (generate_partial_hash(sess))
 			return -1;
+#endif
 		break;
 	case RTE_CRYPTO_AUTH_SHA3_256:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_256;
@@ -892,6 +945,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
 		break;
 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		sess->auth.block_size = SHA384_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+#else
 		if (auth_xform->key.length > SHA384_BLOCK_SIZE)
 			return -1;
 		sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
@@ -907,6 +971,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   auth_xform->key.length);
 		if (generate_partial_hash(sess))
 			return -1;
+#endif
 		break;
 	case RTE_CRYPTO_AUTH_SHA3_384:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_384;
@@ -941,6 +1006,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
 		break;
 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+			return -1;
+		sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		sess->auth.block_size = SHA512_BLOCK_SIZE;
+		sess->auth.key_length = auth_xform->key.length;
+		memset(sess->auth.key, 0, sess->auth.block_size);
+		rte_memcpy(sess->auth.key, auth_xform->key.data,
+			   auth_xform->key.length);
+#else
 		if (auth_xform->key.length > SHA512_BLOCK_SIZE)
 			return -1;
 		sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
@@ -956,6 +1032,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 			   auth_xform->key.length);
 		if (generate_partial_hash(sess))
 			return -1;
+#endif
 		break;
 	case RTE_CRYPTO_AUTH_SHA3_512:
 		sess->auth.algo = CCP_AUTH_ALGO_SHA3_512;
@@ -986,7 +1063,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.engine = CCP_ENGINE_AES;
 		sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
 		sess->auth.key_length = auth_xform->key.length;
-		/**<padding and hash result*/
+		/* padding and hash result */
 		sess->auth.ctx_len = CCP_SB_BYTES << 1;
 		sess->auth.offset = AES_BLOCK_SIZE;
 		sess->auth.block_size = AES_BLOCK_SIZE;
@@ -1182,14 +1259,22 @@ ccp_auth_slot(struct ccp_session *session)
 		count = 3;
 		/**< op + lsb passthrough cpy to/from*/
 		break;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	case CCP_AUTH_ALGO_MD5_HMAC:
+		break;
+#endif
 	case CCP_AUTH_ALGO_SHA1_HMAC:
 	case CCP_AUTH_ALGO_SHA224_HMAC:
 	case CCP_AUTH_ALGO_SHA256_HMAC:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
 		count = 6;
+#endif
 		break;
 	case CCP_AUTH_ALGO_SHA384_HMAC:
 	case CCP_AUTH_ALGO_SHA512_HMAC:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
 		count = 7;
+#endif
 		/**
 		 * 1. Load PHash1 = H(k ^ ipad); to LSB
 		 * 2. generate IHash = H(hash on meassage with PHash1
@@ -1296,6 +1381,122 @@ ccp_compute_slot_count(struct ccp_session *session)
 	return count;
 }
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+static uint8_t
+algo_select(int sessalgo,
+	    const EVP_MD **algo)
+{
+	int res = 0;
+
+	switch (sessalgo) {
+	case CCP_AUTH_ALGO_MD5_HMAC:
+		*algo = EVP_md5();
+		break;
+	case CCP_AUTH_ALGO_SHA1_HMAC:
+		*algo = EVP_sha1();
+		break;
+	case CCP_AUTH_ALGO_SHA224_HMAC:
+		*algo = EVP_sha224();
+		break;
+	case CCP_AUTH_ALGO_SHA256_HMAC:
+		*algo = EVP_sha256();
+		break;
+	case CCP_AUTH_ALGO_SHA384_HMAC:
+		*algo = EVP_sha384();
+		break;
+	case CCP_AUTH_ALGO_SHA512_HMAC:
+		*algo = EVP_sha512();
+		break;
+	default:
+		res = -EINVAL;
+		break;
+	}
+	return res;
+}
+
+static int
+process_cpu_auth_hmac(uint8_t *src, uint8_t *dst,
+		      __rte_unused uint8_t *iv,
+		      EVP_PKEY *pkey,
+		      int srclen,
+		      EVP_MD_CTX *ctx,
+		      const EVP_MD *algo,
+		      uint16_t d_len)
+{
+	size_t dstlen;
+	unsigned char temp_dst[64];
+
+	if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
+		goto process_auth_err;
+
+	if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
+		goto process_auth_err;
+
+	if (EVP_DigestSignFinal(ctx, temp_dst, &dstlen) <= 0)
+		goto process_auth_err;
+
+	memcpy(dst, temp_dst, d_len);
+	return 0;
+process_auth_err:
+	CCP_LOG_ERR("Process cpu auth failed");
+	return -EINVAL;
+}
+
+static int cpu_crypto_auth(struct ccp_qp *qp,
+			   struct rte_crypto_op *op,
+			   struct ccp_session *sess,
+			   EVP_MD_CTX *ctx)
+{
+	uint8_t *src, *dst;
+	int srclen, status;
+	struct rte_mbuf *mbuf_src, *mbuf_dst;
+	const EVP_MD *algo = NULL;
+	EVP_PKEY *pkey;
+
+	algo_select(sess->auth.algo, &algo);
+	pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL, sess->auth.key,
+				    sess->auth.key_length);
+	mbuf_src = op->sym->m_src;
+	mbuf_dst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+	srclen = op->sym->auth.data.length;
+	src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+				      op->sym->auth.data.offset);
+
+	if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+		dst = qp->temp_digest;
+	} else {
+		dst = op->sym->auth.digest.data;
+		if (dst == NULL) {
+			dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+						     op->sym->auth.data.offset +
+						     sess->auth.digest_length);
+		}
+	}
+	status = process_cpu_auth_hmac(src, dst, NULL,
+				       pkey, srclen,
+				       ctx,
+				       algo,
+				       sess->auth.digest_length);
+	if (status) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return status;
+	}
+
+	if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+		if (memcmp(dst, op->sym->auth.digest.data,
+			   sess->auth.digest_length) != 0) {
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+		} else {
+			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+		}
+	} else {
+		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	}
+	EVP_PKEY_free(pkey);
+	return 0;
+}
+#endif
+
 static void
 ccp_perform_passthru(struct ccp_passthru *pst,
 		     struct ccp_queue *cmd_q)
@@ -2391,14 +2592,24 @@ ccp_crypto_auth(struct rte_crypto_op *op,
 		result = ccp_perform_sha(op, cmd_q);
 		b_info->desccnt += 3;
 		break;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	case CCP_AUTH_ALGO_MD5_HMAC:
+		break;
+#endif
 	case CCP_AUTH_ALGO_SHA1_HMAC:
 	case CCP_AUTH_ALGO_SHA224_HMAC:
 	case CCP_AUTH_ALGO_SHA256_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		break;
+#endif
 		result = ccp_perform_hmac(op, cmd_q);
 		b_info->desccnt += 6;
 		break;
 	case CCP_AUTH_ALGO_SHA384_HMAC:
 	case CCP_AUTH_ALGO_SHA512_HMAC:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+		break;
+#endif
 		result = ccp_perform_hmac(op, cmd_q);
 		b_info->desccnt += 7;
 		break;
@@ -2462,7 +2673,7 @@ ccp_crypto_aead(struct rte_crypto_op *op,
 }
 
 int
-process_ops_to_enqueue(const struct ccp_qp *qp,
+process_ops_to_enqueue(struct ccp_qp *qp,
 		       struct rte_crypto_op **op,
 		       struct ccp_queue *cmd_q,
 		       uint16_t nb_ops,
@@ -2471,11 +2682,22 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
 	int i, result = 0;
 	struct ccp_batch_info *b_info;
 	struct ccp_session *session;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX *auth_ctx = NULL;
+#endif
 
 	if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
 		CCP_LOG_ERR("batch info allocation failed");
 		return 0;
 	}
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	auth_ctx = EVP_MD_CTX_create();
+	if (unlikely(!auth_ctx)) {
+		CCP_LOG_ERR("Unable to create auth ctx");
+		return 0;
+	}
+	b_info->auth_ctr = 0;
+#endif
 	/* populate batch info necessary for dequeue */
 	b_info->op_idx = 0;
 	b_info->lsb_buf_idx = 0;
@@ -2497,6 +2719,11 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
 			break;
 		case CCP_CMD_AUTH:
 			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			b_info->auth_ctr++;
+			result = cpu_crypto_auth(qp, op[i],
+						 session, auth_ctx);
+#endif
 			break;
 		case CCP_CMD_CIPHER_HASH:
 			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
@@ -2506,6 +2733,12 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
 			break;
 		case CCP_CMD_HASH_CIPHER:
 			result = ccp_crypto_auth(op[i], cmd_q, b_info);
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			result = cpu_crypto_auth(qp, op[i],
+						 session, auth_ctx);
+			if (op[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
+				continue;
+#endif
 			if (result)
 				break;
 			result = ccp_crypto_cipher(op[i], cmd_q, b_info);
@@ -2539,6 +2772,9 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
 
 	rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX_destroy(auth_ctx);
+#endif
 	return i;
 }
 
@@ -2607,13 +2843,27 @@ static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
 }
 
 static int
-ccp_prepare_ops(struct rte_crypto_op **op_d,
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+ccp_prepare_ops(struct ccp_qp *qp,
+#else
+ccp_prepare_ops(struct ccp_qp *qp __rte_unused,
+#endif
+		struct rte_crypto_op **op_d,
 		struct ccp_batch_info *b_info,
 		uint16_t nb_ops)
 {
 	int i, min_ops;
 	struct ccp_session *session;
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX *auth_ctx = NULL;
+
+	auth_ctx = EVP_MD_CTX_create();
+	if (unlikely(!auth_ctx)) {
+		CCP_LOG_ERR("Unable to create auth ctx");
+		return 0;
+	}
+#endif
 	min_ops = RTE_MIN(nb_ops, b_info->opcnt);
 
 	for (i = 0; i < min_ops; i++) {
@@ -2626,8 +2876,25 @@ ccp_prepare_ops(struct rte_crypto_op **op_d,
 			op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 			break;
 		case CCP_CMD_AUTH:
+#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			ccp_auth_dq_prepare(op_d[i]);
+#endif
+			break;
 		case CCP_CMD_CIPHER_HASH:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			cpu_crypto_auth(qp, op_d[i],
+					session, auth_ctx);
+#else
+			ccp_auth_dq_prepare(op_d[i]);
+#endif
+			break;
 		case CCP_CMD_HASH_CIPHER:
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+			op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#else
+			ccp_auth_dq_prepare(op_d[i]);
+#endif
+			break;
 		case CCP_CMD_COMBINED:
 			ccp_auth_dq_prepare(op_d[i]);
 			break;
@@ -2636,6 +2903,9 @@ ccp_prepare_ops(struct rte_crypto_op **op_d,
 		}
 	}
 
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	EVP_MD_CTX_destroy(auth_ctx);
+#endif
 	b_info->opcnt -= min_ops;
 	return min_ops;
 }
@@ -2655,6 +2925,10 @@ process_ops_to_dequeue(struct ccp_qp *qp,
 	} else if (rte_ring_dequeue(qp->processed_pkts,
 				    (void **)&b_info))
 		return 0;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	if (b_info->auth_ctr == b_info->opcnt)
+		goto success;
+#endif
 	cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
 				       CMD_Q_HEAD_LO_BASE);
 
@@ -2674,7 +2948,7 @@ process_ops_to_dequeue(struct ccp_qp *qp,
 
 
 success:
-	nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
+	nb_ops = ccp_prepare_ops(qp, op, b_info, nb_ops);
 	rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
 	b_info->desccnt = 0;
 	if (b_info->opcnt > 0) {
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 23b0486..dd89ad9 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -68,6 +68,9 @@
 #define SHA512_BLOCK_SIZE       128
 #define SHA3_512_BLOCK_SIZE     72
 
+/* Maximum length for digest */
+#define DIGEST_LENGTH_MAX	64
+
 /* SHA LSB intialiazation values */
 
 #define SHA1_H0		0x67452301UL
@@ -346,7 +349,7 @@ int ccp_compute_slot_count(struct ccp_session *session);
  * @param nb_ops No. of ops to be submitted
  * @return 0 on success otherwise -1
  */
-int process_ops_to_enqueue(const struct ccp_qp *qp,
+int process_ops_to_enqueue(struct ccp_qp *qp,
 			   struct rte_crypto_op **op,
 			   struct ccp_queue *cmd_q,
 			   uint16_t nb_ops,
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
index 24ad961..3d0d875 100644
--- a/drivers/crypto/ccp/ccp_pmd_ops.c
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -13,6 +13,29 @@
 #include "ccp_crypto.h"
 
 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				 .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				 .block_size = 64,
+				 .key_size = {
+					 .min = 1,
+					 .max = 64,
+					 .increment = 1
+				 },
+				 .digest_size = {
+					 .min = 16,
+					 .max = 16,
+					 .increment = 0
+				 },
+				 .aad_size = { 0 }
+			}, }
+		}, }
+	},
+#endif
 	{	/* SHA1 */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
index 47c4fb2..560bed9 100644
--- a/drivers/crypto/ccp/ccp_pmd_private.h
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -6,6 +6,7 @@
 #define _CCP_PMD_PRIVATE_H_
 
 #include <rte_cryptodev.h>
+#include "ccp_crypto.h"
 
 #define CRYPTODEV_NAME_CCP_PMD crypto_ccp
 
@@ -61,6 +62,10 @@ struct ccp_batch_info {
 	phys_addr_t lsb_buf_phys;
 	/**< LSB intermediate buf for passthru */
 	int lsb_buf_idx;
+#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+	uint16_t auth_ctr;
+	/**< auth only ops batch */
+#endif
 } __rte_cache_aligned;
 
 /**< CCP crypto queue pair */
@@ -81,6 +86,11 @@ struct ccp_qp {
 	/**< Store ops pulled out of queue */
 	struct rte_cryptodev *dev;
 	/**< rte crypto device to which this qp belongs */
+	uint8_t temp_digest[DIGEST_LENGTH_MAX];
+	/**< Buffer used to store the digest generated
+	 * by the driver when verifying a digest provided
+	 * by the user (using authentication verify operation)
+	 */
 } __rte_cache_aligned;
 
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v5 18/19] test/crypto: add test for AMD CCP crypto poll mode
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
                           ` (15 preceding siblings ...)
  2018-03-19 12:23         ` [PATCH v5 17/19] crypto/ccp: support cpu based md5 and sha2 " Ravi Kumar
@ 2018-03-19 12:23         ` Ravi Kumar
  2018-03-19 12:23         ` [PATCH v5 19/19] doc: add document for AMD CCP crypto poll mode driver Ravi Kumar
                           ` (2 subsequent siblings)
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-19 12:23 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 test/test/test_cryptodev.c                   | 161 +++++++++++++++++++++++++++
 test/test/test_cryptodev.h                   |   1 +
 test/test/test_cryptodev_aes_test_vectors.h  |  93 ++++++++++------
 test/test/test_cryptodev_blockcipher.c       |   9 +-
 test/test/test_cryptodev_blockcipher.h       |   1 +
 test/test/test_cryptodev_des_test_vectors.h  |  42 ++++---
 test/test/test_cryptodev_hash_test_vectors.h |  60 ++++++----
 7 files changed, 301 insertions(+), 66 deletions(-)

diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index 1417482..d1d7925 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -338,6 +338,23 @@ testsuite_setup(void)
 		}
 	}
 
+	/* Create an CCP device if required */
+	if (gbl_driver_id == rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD))) {
+		nb_devs = rte_cryptodev_device_count_by_driver(
+				rte_cryptodev_driver_id_get(
+				RTE_STR(CRYPTODEV_NAME_CCP_PMD)));
+		if (nb_devs < 1) {
+			ret = rte_vdev_init(
+				RTE_STR(CRYPTODEV_NAME_CCP_PMD),
+				NULL);
+
+			TEST_ASSERT(ret == 0, "Failed to create "
+				"instance of pmd : %s",
+				RTE_STR(CRYPTODEV_NAME_CCP_PMD));
+		}
+	}
+
 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
 	if (gbl_driver_id == rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD))) {
@@ -1727,6 +1744,44 @@ test_AES_cipheronly_openssl_all(void)
 }
 
 static int
+test_AES_chain_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_AES_CHAIN_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_AES_cipheronly_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_AES_CIPHERONLY_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
 test_AES_chain_qat_all(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -1898,6 +1953,25 @@ test_authonly_openssl_all(void)
 }
 
 static int
+test_authonly_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_AUTHONLY_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
 test_AES_chain_armv8_all(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -4973,6 +5047,44 @@ test_3DES_cipheronly_dpaa2_sec_all(void)
 }
 
 static int
+test_3DES_chain_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_3DES_CHAIN_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_3DES_cipheronly_ccp_all(void)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	int status;
+
+	status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+		ts_params->op_mpool,
+		ts_params->session_mpool,
+		ts_params->valid_devs[0],
+		rte_cryptodev_driver_id_get(
+		RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+		BLKCIPHER_3DES_CIPHERONLY_TYPE);
+
+	TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+	return TEST_SUCCESS;
+}
+
+static int
 test_3DES_cipheronly_qat_all(void)
 {
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -9646,6 +9758,38 @@ static struct unit_test_suite cryptodev_mrvl_testsuite  = {
 	}
 };
 
+static struct unit_test_suite cryptodev_ccp_testsuite  = {
+	.suite_name = "Crypto Device CCP Unit Test Suite",
+	.setup = testsuite_setup,
+	.teardown = testsuite_teardown,
+	.unit_test_cases = {
+		TEST_CASE_ST(ut_setup, ut_teardown, test_multi_session),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_multi_session_random_usage),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_AES_chain_ccp_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_AES_cipheronly_ccp_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_3DES_chain_ccp_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_3DES_cipheronly_ccp_all),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+				test_authonly_ccp_all),
+
+		/** Negative tests */
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			authentication_verify_HMAC_SHA1_fail_data_corrupt),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			authentication_verify_HMAC_SHA1_fail_tag_corrupt),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			auth_decryption_AES128CBC_HMAC_SHA1_fail_data_corrupt),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			auth_decryption_AES128CBC_HMAC_SHA1_fail_tag_corrupt),
+
+		TEST_CASES_END() /**< NULL terminate unit test array */
+	}
+};
 
 static int
 test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
@@ -9867,6 +10011,22 @@ test_cryptodev_dpaa_sec(void /*argv __rte_unused, int argc __rte_unused*/)
 	return unit_test_suite_runner(&cryptodev_dpaa_sec_testsuite);
 }
 
+static int
+test_cryptodev_ccp(void)
+{
+	gbl_driver_id = rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD));
+
+	if (gbl_driver_id == -1) {
+		RTE_LOG(ERR, USER1, "CCP PMD must be loaded. Check if "
+				"CONFIG_RTE_LIBRTE_PMD_CCP is enabled "
+				"in config file to run this testsuite.\n");
+		return TEST_FAILED;
+	}
+
+	return unit_test_suite_runner(&cryptodev_ccp_testsuite);
+}
+
 REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
 REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb);
 REGISTER_TEST_COMMAND(cryptodev_openssl_autotest, test_cryptodev_openssl);
@@ -9879,3 +10039,4 @@ REGISTER_TEST_COMMAND(cryptodev_sw_armv8_autotest, test_cryptodev_armv8);
 REGISTER_TEST_COMMAND(cryptodev_sw_mrvl_autotest, test_cryptodev_mrvl);
 REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_autotest, test_cryptodev_dpaa2_sec);
 REGISTER_TEST_COMMAND(cryptodev_dpaa_sec_autotest, test_cryptodev_dpaa_sec);
+REGISTER_TEST_COMMAND(cryptodev_ccp_autotest, test_cryptodev_ccp);
diff --git a/test/test/test_cryptodev.h b/test/test/test_cryptodev.h
index 8cdc087..d45fb7b 100644
--- a/test/test/test_cryptodev.h
+++ b/test/test/test_cryptodev.h
@@ -61,6 +61,7 @@
 #define CRYPTODEV_NAME_DPAA2_SEC_PMD	crypto_dpaa2_sec
 #define CRYPTODEV_NAME_SCHEDULER_PMD	crypto_scheduler
 #define CRYPTODEV_NAME_MRVL_PMD		crypto_mrvl
+#define CRYPTODEV_NAME_CCP_PMD		crypto_ccp
 
 /**
  * Write (spread) data from buffer to mbuf data
diff --git a/test/test/test_cryptodev_aes_test_vectors.h b/test/test/test_cryptodev_aes_test_vectors.h
index 3577ef4..6f2422a 100644
--- a/test/test/test_cryptodev_aes_test_vectors.h
+++ b/test/test/test_cryptodev_aes_test_vectors.h
@@ -1171,7 +1171,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CTR HMAC-SHA1 Decryption Digest "
@@ -1184,7 +1185,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CTR XCBC Encryption Digest",
@@ -1223,7 +1225,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CTR HMAC-SHA1 Decryption Digest "
@@ -1236,7 +1239,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest",
@@ -1249,7 +1253,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1285,7 +1290,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
@@ -1315,7 +1321,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA256 Encryption Digest "
@@ -1337,7 +1344,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA256 Decryption Digest "
@@ -1357,7 +1365,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
@@ -1366,7 +1375,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
-			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
@@ -1390,7 +1400,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA512 Decryption Digest "
@@ -1455,7 +1466,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA224 Decryption Digest "
@@ -1467,7 +1479,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA384 Encryption Digest",
@@ -1479,7 +1492,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA384 Decryption Digest "
@@ -1492,7 +1506,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1501,7 +1516,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
-			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr =
@@ -1511,7 +1527,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
-			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+			BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 };
 
@@ -1526,7 +1543,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CBC Decryption",
@@ -1538,7 +1556,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CBC Encryption",
@@ -1549,7 +1568,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CBC Encryption Scater gather",
@@ -1570,7 +1590,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CBC Decryption Scatter Gather",
@@ -1590,7 +1611,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CBC Decryption",
@@ -1602,7 +1624,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CBC OOP Encryption",
@@ -1612,7 +1635,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CBC OOP Decryption",
@@ -1622,7 +1646,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CTR Encryption",
@@ -1634,7 +1659,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CTR Decryption",
@@ -1646,7 +1672,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CTR Encryption",
@@ -1657,7 +1684,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-192-CTR Decryption",
@@ -1668,7 +1696,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_MB |
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CTR Encryption",
@@ -1680,7 +1709,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-256-CTR Decryption",
@@ -1692,7 +1722,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "AES-128-CTR Encryption (12-byte IV)",
diff --git a/test/test/test_cryptodev_blockcipher.c b/test/test/test_cryptodev_blockcipher.c
index ed06618..5835b3e 100644
--- a/test/test/test_cryptodev_blockcipher.c
+++ b/test/test/test_cryptodev_blockcipher.c
@@ -54,6 +54,8 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
 
 	int openssl_pmd = rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+	int ccp_pmd = rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD));
 	int scheduler_pmd = rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD));
 	int armv8_pmd = rte_cryptodev_driver_id_get(
@@ -94,7 +96,8 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
 			driver_id == qat_pmd ||
 			driver_id == openssl_pmd ||
 			driver_id == armv8_pmd ||
-			driver_id == mrvl_pmd) { /* Fall through */
+			driver_id == mrvl_pmd ||
+			driver_id == ccp_pmd) { /* Fall through */
 		digest_len = tdata->digest.len;
 	} else if (driver_id == aesni_mb_pmd ||
 			driver_id == scheduler_pmd) {
@@ -555,6 +558,8 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
 
 	int openssl_pmd = rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+	int ccp_pmd = rte_cryptodev_driver_id_get(
+			RTE_STR(CRYPTODEV_NAME_CCP_PMD));
 	int dpaa2_sec_pmd = rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD));
 	int dpaa_sec_pmd = rte_cryptodev_driver_id_get(
@@ -627,6 +632,8 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
 		target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER;
 	else if (driver_id == dpaa2_sec_pmd)
 		target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC;
+	else if (driver_id == ccp_pmd)
+		target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_CCP;
 	else if (driver_id == dpaa_sec_pmd)
 		target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC;
 	else if (driver_id == mrvl_pmd)
diff --git a/test/test/test_cryptodev_blockcipher.h b/test/test/test_cryptodev_blockcipher.h
index edbdaab..93ef0ae 100644
--- a/test/test/test_cryptodev_blockcipher.h
+++ b/test/test/test_cryptodev_blockcipher.h
@@ -27,6 +27,7 @@
 #define BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC	0x0020 /* DPAA2_SEC flag */
 #define BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC	0x0040 /* DPAA_SEC flag */
 #define BLOCKCIPHER_TEST_TARGET_PMD_MRVL	0x0080 /* Marvell flag */
+#define BLOCKCIPHER_TEST_TARGET_PMD_CCP		0x0040 /* CCP flag */
 
 #define BLOCKCIPHER_TEST_OP_CIPHER	(BLOCKCIPHER_TEST_OP_ENCRYPT | \
 					BLOCKCIPHER_TEST_OP_DECRYPT)
diff --git a/test/test/test_cryptodev_des_test_vectors.h b/test/test/test_cryptodev_des_test_vectors.h
index 0be809e..a30317c 100644
--- a/test/test/test_cryptodev_des_test_vectors.h
+++ b/test/test/test_cryptodev_des_test_vectors.h
@@ -1044,7 +1044,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CBC HMAC-SHA1 Decryption Digest Verify",
@@ -1053,19 +1054,22 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CBC SHA1 Encryption Digest",
 		.test_data = &triple_des128cbc_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CBC SHA1 Decryption Digest Verify",
 		.test_data = &triple_des128cbc_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC HMAC-SHA1 Encryption Digest",
@@ -1075,7 +1079,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC HMAC-SHA1 Decryption Digest Verify",
@@ -1085,21 +1090,24 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC SHA1 Encryption Digest",
 		.test_data = &triple_des192cbc_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC SHA1 Decryption Digest Verify",
 		.test_data = &triple_des192cbc_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CTR HMAC-SHA1 Encryption Digest",
@@ -1180,7 +1188,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.test_data = &triple_des128cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr =
@@ -1189,7 +1198,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
 		.test_data = &triple_des128cbc_hmac_sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
 		.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 };
 
@@ -1201,7 +1211,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CBC Decryption",
@@ -1210,7 +1221,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC Encryption",
@@ -1220,7 +1232,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-192-CBC Decryption",
@@ -1230,7 +1243,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+			BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "3DES-128-CTR Encryption",
diff --git a/test/test/test_cryptodev_hash_test_vectors.h b/test/test/test_cryptodev_hash_test_vectors.h
index 93dacb7..6b882ae 100644
--- a/test/test/test_cryptodev_hash_test_vectors.h
+++ b/test/test/test_cryptodev_hash_test_vectors.h
@@ -358,13 +358,15 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.test_descr = "SHA1 Digest",
 		.test_data = &sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA1 Digest Verify",
 		.test_data = &sha1_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA1 Digest",
@@ -375,7 +377,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA1 Digest Scatter Gather",
@@ -394,7 +397,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA1 Digest Verify Scatter Gather",
@@ -408,13 +412,15 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 		.test_descr = "SHA224 Digest",
 		.test_data = &sha224_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			    BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA224 Digest Verify",
 		.test_data = &sha224_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			    BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA224 Digest",
@@ -425,7 +431,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA224 Digest Verify",
@@ -436,19 +443,22 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA256 Digest",
 		.test_data = &sha256_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA256 Digest Verify",
 		.test_data = &sha256_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA256 Digest",
@@ -459,7 +469,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA256 Digest Verify",
@@ -470,19 +481,22 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA384 Digest",
 		.test_data = &sha384_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA384 Digest Verify",
 		.test_data = &sha384_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA384 Digest",
@@ -493,7 +507,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA384 Digest Verify",
@@ -504,19 +519,22 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA512 Digest",
 		.test_data = &sha512_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "SHA512 Digest Verify",
 		.test_data = &sha512_test_vector,
 		.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
-		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+		.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA512 Digest",
@@ -527,7 +545,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 	{
 		.test_descr = "HMAC-SHA512 Digest Verify",
@@ -538,7 +557,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
 			BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
 			BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
-			BLOCKCIPHER_TEST_TARGET_PMD_QAT
+			BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+			BLOCKCIPHER_TEST_TARGET_PMD_CCP
 	},
 };
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* [PATCH v5 19/19] doc: add document for AMD CCP crypto poll mode driver
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
                           ` (16 preceding siblings ...)
  2018-03-19 12:23         ` [PATCH v5 18/19] test/crypto: add test for AMD CCP crypto poll mode Ravi Kumar
@ 2018-03-19 12:23         ` Ravi Kumar
  2018-03-30 22:46         ` [PATCH v5 01/19] crypto/ccp: add AMD ccp skeleton PMD De Lara Guarch, Pablo
  2018-04-22 19:57         ` Thomas Monjalon
  19 siblings, 0 replies; 131+ messages in thread
From: Ravi Kumar @ 2018-03-19 12:23 UTC (permalink / raw)
  To: dev; +Cc: pablo.de.lara.guarch, hemant.agrawal

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 doc/guides/cryptodevs/ccp.rst              | 102 +++++++++++++++++++++++++++++
 doc/guides/cryptodevs/features/ccp.ini     |  57 ++++++++++++++++
 doc/guides/cryptodevs/features/default.ini |  12 ++++
 doc/guides/cryptodevs/index.rst            |   1 +
 4 files changed, 172 insertions(+)
 create mode 100644 doc/guides/cryptodevs/ccp.rst
 create mode 100644 doc/guides/cryptodevs/features/ccp.ini

diff --git a/doc/guides/cryptodevs/ccp.rst b/doc/guides/cryptodevs/ccp.rst
new file mode 100644
index 0000000..1fcd462
--- /dev/null
+++ b/doc/guides/cryptodevs/ccp.rst
@@ -0,0 +1,102 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+AMD CCP Poll Mode Driver
+========================
+
+This code provides the initial implementation of the ccp poll mode driver.
+The CCP poll mode driver library (librte_pmd_ccp) implements support for
+AMD’s cryptographic co-processor (CCP). The CCP PMD is a virtual crypto
+poll mode driver which schedules crypto operations to one or more available
+CCP hardware engines on the platform. The CCP PMD provides poll mode crypto
+driver support for the following hardware accelerator devices::
+
+	AMD Cryptographic Co-processor (0x1456)
+	AMD Cryptographic Co-processor (0x1468)
+
+Features
+--------
+
+CCP crypto PMD has support for:
+
+Cipher algorithms:
+
+* ``RTE_CRYPTO_CIPHER_AES_CBC``
+* ``RTE_CRYPTO_CIPHER_AES_ECB``
+* ``RTE_CRYPTO_CIPHER_AES_CTR``
+* ``RTE_CRYPTO_CIPHER_3DES_CBC``
+
+Hash algorithms:
+
+* ``RTE_CRYPTO_AUTH_SHA1``
+* ``RTE_CRYPTO_AUTH_SHA1_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA224``
+* ``RTE_CRYPTO_AUTH_SHA224_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA256``
+* ``RTE_CRYPTO_AUTH_SHA256_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA384``
+* ``RTE_CRYPTO_AUTH_SHA384_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA512``
+* ``RTE_CRYPTO_AUTH_SHA512_HMAC``
+* ``RTE_CRYPTO_AUTH_MD5_HMAC``
+* ``RTE_CRYPTO_AUTH_AES_CMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_224``
+* ``RTE_CRYPTO_AUTH_SHA3_224_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_256``
+* ``RTE_CRYPTO_AUTH_SHA3_256_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_384``
+* ``RTE_CRYPTO_AUTH_SHA3_384_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_512``
+* ``RTE_CRYPTO_AUTH_SHA3_512_HMAC``
+
+AEAD algorithms:
+
+* ``RTE_CRYPTO_AEAD_AES_GCM``
+
+Installation
+------------
+
+To compile CCP PMD, it has to be enabled in the config/common_base file.
+* ``CONFIG_RTE_LIBRTE_PMD_CCP=y``
+
+The CCP PMD also supports computing authentication over CPU with cipher offloaded
+to CCP. To enable this feature, enable following in the configuration.
+* ``CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=y``
+
+This code was verified on Ubuntu 16.04.
+
+Initialization
+--------------
+
+Bind the CCP devices to DPDK UIO driver module before running the CCP PMD stack.
+e.g. for the 0x1456 device::
+
+	cd to the top-level DPDK directory
+	modprobe uio
+	insmod ./build/kmod/igb_uio.ko
+	echo "1022 1456" > /sys/bus/pci/drivers/igb_uio/new_id
+
+Another way to bind the CCP devices to DPDK UIO driver is by using the ``dpdk-devbind.py`` script.
+The following command assumes ``BFD`` of ``0000:09:00.2``::
+
+	cd to the top-level DPDK directory
+	./usertools/dpdk-devbind.py -b igb_uio 0000:09:00.2
+
+To verify real traffic l2fwd-crypto example can be used with following command:
+
+.. code-block:: console
+
+	sudo ./build/l2fwd-crypto -l 1 -n 4 --vdev "crypto_ccp" -- -p 0x1
+	--chain CIPHER_HASH --cipher_op ENCRYPT --cipher_algo AES_CBC
+	--cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f
+	--iv 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:ff
+	--auth_op GENERATE --auth_algo SHA1_HMAC
+	--auth_key 11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+	:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+	:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+
+Limitations
+-----------
+
+* Chained mbufs are not supported
+* MD5_HMAC is supported only if ``CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=y`` is enabled in configuration
diff --git a/doc/guides/cryptodevs/features/ccp.ini b/doc/guides/cryptodevs/features/ccp.ini
new file mode 100644
index 0000000..add4bd8
--- /dev/null
+++ b/doc/guides/cryptodevs/features/ccp.ini
@@ -0,0 +1,57 @@
+;
+; Supported features of the 'ccp' crypto poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Symmetric crypto       = Y
+Sym operation chaining = Y
+HW Accelerated         = Y
+
+;
+; Supported crypto algorithms of the 'ccp' crypto driver.
+;
+[Cipher]
+AES CBC (128)  = Y
+AES CBC (192)  = Y
+AES CBC (256)  = Y
+AES ECB (128)  = Y
+AES ECB (192)  = Y
+AES ECB (256)  = Y
+AES CTR (128)  = Y
+AES CTR (192)  = Y
+AES CTR (256)  = Y
+3DES CBC       = Y
+
+;
+; Supported authentication algorithms of the 'ccp' crypto driver.
+;
+[Auth]
+MD5 HMAC       = Y
+SHA1           = Y
+SHA1 HMAC      = Y
+SHA224	       = Y
+SHA224 HMAC    = Y
+SHA256         = Y
+SHA256 HMAC    = Y
+SHA384         = Y
+SHA384 HMAC    = Y
+SHA512         = Y
+SHA512 HMAC    = Y
+AES CMAC       = Y
+SHA3_224       = Y
+SHA3_224 HMAC  = Y
+SHA3_256       = Y
+SHA3_256 HMAC  = Y
+SHA3_384       = Y
+SHA3_384 HMAC  = Y
+SHA3_512       = Y
+SHA3_512 HMAC  = Y
+
+;
+; Supported AEAD algorithms of the 'ccp' crypto driver.
+;
+[AEAD]
+AES GCM (128) = Y
+AES GCM (192) = Y
+AES GCM (256) = Y
diff --git a/doc/guides/cryptodevs/features/default.ini b/doc/guides/cryptodevs/features/default.ini
index 728ce3b..aa1ca31 100644
--- a/doc/guides/cryptodevs/features/default.ini
+++ b/doc/guides/cryptodevs/features/default.ini
@@ -28,6 +28,9 @@ NULL           =
 AES CBC (128)  =
 AES CBC (192)  =
 AES CBC (256)  =
+AES ECB (128)  =
+AES ECB (192)  =
+AES ECB (256)  =
 AES CTR (128)  =
 AES CTR (192)  =
 AES CTR (256)  =
@@ -62,6 +65,15 @@ AES GMAC     =
 SNOW3G UIA2  =
 KASUMI F9    =
 ZUC EIA3     =
+AES CMAC     =
+SHA3_224       =
+SHA3_224 HMAC  =
+SHA3_256       =
+SHA3_256 HMAC  =
+SHA3_384       =
+SHA3_384 HMAC  =
+SHA3_512       =
+SHA3_512 HMAC  =
 
 ;
 ; Supported AEAD algorithms of a default crypto driver.
diff --git a/doc/guides/cryptodevs/index.rst b/doc/guides/cryptodevs/index.rst
index 558c926..8a921dd 100644
--- a/doc/guides/cryptodevs/index.rst
+++ b/doc/guides/cryptodevs/index.rst
@@ -13,6 +13,7 @@ Crypto Device Drivers
     aesni_mb
     aesni_gcm
     armv8
+    ccp
     dpaa2_sec
     dpaa_sec
     kasumi
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 131+ messages in thread

* Re: [PATCH v5 01/19] crypto/ccp: add AMD ccp skeleton PMD
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
                           ` (17 preceding siblings ...)
  2018-03-19 12:23         ` [PATCH v5 19/19] doc: add document for AMD CCP crypto poll mode driver Ravi Kumar
@ 2018-03-30 22:46         ` De Lara Guarch, Pablo
  2018-04-02  5:50           ` Kumar, Ravi1
  2018-04-22 19:57         ` Thomas Monjalon
  19 siblings, 1 reply; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2018-03-30 22:46 UTC (permalink / raw)
  To: Ravi Kumar, dev; +Cc: hemant.agrawal



> -----Original Message-----
> From: Ravi Kumar [mailto:Ravi1.kumar@amd.com]
> Sent: Monday, March 19, 2018 12:24 PM
> To: dev@dpdk.org
> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;
> hemant.agrawal@nxp.com
> Subject: [PATCH v5 01/19] crypto/ccp: add AMD ccp skeleton PMD
> 
> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>

Patchset applied to dpdk-next-crypto, with same minor changes
(title changes and driver registering modification due to an earlier patch).

Thanks for the work!
Pablo

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v5 01/19] crypto/ccp: add AMD ccp skeleton PMD
  2018-03-30 22:46         ` [PATCH v5 01/19] crypto/ccp: add AMD ccp skeleton PMD De Lara Guarch, Pablo
@ 2018-04-02  5:50           ` Kumar, Ravi1
  2018-04-16 14:20             ` De Lara Guarch, Pablo
  0 siblings, 1 reply; 131+ messages in thread
From: Kumar, Ravi1 @ 2018-04-02  5:50 UTC (permalink / raw)
  To: De Lara Guarch, Pablo, dev; +Cc: hemant.agrawal

>
>
>> -----Original Message-----
>> From: Ravi Kumar [mailto:Ravi1.kumar@amd.com]
>> Sent: Monday, March 19, 2018 12:24 PM
>> To: dev@dpdk.org
>> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; 
>> hemant.agrawal@nxp.com
>> Subject: [PATCH v5 01/19] crypto/ccp: add AMD ccp skeleton PMD
>> 
>> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
>
>Patchset applied to dpdk-next-crypto, with same minor changes (title changes and driver registering modification due to an earlier patch).
>
>Thanks for the work!
>Pablo
>

Thanks a lot Pablo. 

Regards,
Ravi

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v5 01/19] crypto/ccp: add AMD ccp skeleton PMD
  2018-04-02  5:50           ` Kumar, Ravi1
@ 2018-04-16 14:20             ` De Lara Guarch, Pablo
  2018-04-17  6:11               ` Kumar, Ravi1
  0 siblings, 1 reply; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2018-04-16 14:20 UTC (permalink / raw)
  To: Kumar, Ravi1, dev; +Cc: hemant.agrawal

Hi Ravi,

> -----Original Message-----
> From: Kumar, Ravi1 [mailto:Ravi1.Kumar@amd.com]
> Sent: Monday, April 2, 2018 6:50 AM
> To: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; dev@dpdk.org
> Cc: hemant.agrawal@nxp.com
> Subject: RE: [PATCH v5 01/19] crypto/ccp: add AMD ccp skeleton PMD
> 
> >
> >
> >> -----Original Message-----
> >> From: Ravi Kumar [mailto:Ravi1.kumar@amd.com]
> >> Sent: Monday, March 19, 2018 12:24 PM
> >> To: dev@dpdk.org
> >> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;
> >> hemant.agrawal@nxp.com
> >> Subject: [PATCH v5 01/19] crypto/ccp: add AMD ccp skeleton PMD
> >>
> >> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
> >
> >Patchset applied to dpdk-next-crypto, with same minor changes (title changes
> and driver registering modification due to an earlier patch).
> >
> >Thanks for the work!
> >Pablo
> >

There's been a memory rework applied in DPDK at the same time I applied your PMD in next-crypto,
Which means that it is broken now. Could you submit a fix for it?
At least, compilation is broken now, but it may require more changes:

drivers/crypto/ccp/ccp_dev.c:98:7: error: implicit declaration of function 'rte_eal_get_physmem_layout' is invalid in C99
      [-Werror,-Wimplicit-function-declaration]
        ms = rte_eal_get_physmem_layout();

This function does not exist anymore.
Commit 2d84772bf858 ("crypto/qat: use contiguous allocation for DMA memory")
makes a similar required changed in QAT.
Take a look at it and see if it suits you.

> 
> Thanks a lot Pablo.
> 
> Regards,
> Ravi

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v5 01/19] crypto/ccp: add AMD ccp skeleton PMD
  2018-04-16 14:20             ` De Lara Guarch, Pablo
@ 2018-04-17  6:11               ` Kumar, Ravi1
  0 siblings, 0 replies; 131+ messages in thread
From: Kumar, Ravi1 @ 2018-04-17  6:11 UTC (permalink / raw)
  To: De Lara Guarch, Pablo, dev; +Cc: hemant.agrawal

>Hi Ravi,
>
>> -----Original Message-----
>> From: Kumar, Ravi1 [mailto:Ravi1.Kumar@amd.com]
>> Sent: Monday, April 2, 2018 6:50 AM
>> To: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; 
>> dev@dpdk.org
>> Cc: hemant.agrawal@nxp.com
>> Subject: RE: [PATCH v5 01/19] crypto/ccp: add AMD ccp skeleton PMD
>> 
>> >
>> >
>> >> -----Original Message-----
>> >> From: Ravi Kumar [mailto:Ravi1.kumar@amd.com]
>> >> Sent: Monday, March 19, 2018 12:24 PM
>> >> To: dev@dpdk.org
>> >> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; 
>> >> hemant.agrawal@nxp.com
>> >> Subject: [PATCH v5 01/19] crypto/ccp: add AMD ccp skeleton PMD
>> >>
>> >> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
>> >
>> >Patchset applied to dpdk-next-crypto, with same minor changes (title 
>> >changes
>> and driver registering modification due to an earlier patch).
>> >
>> >Thanks for the work!
>> >Pablo
>> >
>
>There's been a memory rework applied in DPDK at the same time I applied your PMD in next-crypto, Which means that it is broken now. Could you submit a fix for it?
>At least, compilation is broken now, but it may require more changes:
>
>drivers/crypto/ccp/ccp_dev.c:98:7: error: implicit declaration of function 'rte_eal_get_physmem_layout' is invalid in C99
>      [-Werror,-Wimplicit-function-declaration]
>        ms = rte_eal_get_physmem_layout();
>
>This function does not exist anymore.
>Commit 2d84772bf858 ("crypto/qat: use contiguous allocation for DMA memory") makes a similar required changed in QAT.
>Take a look at it and see if it suits you.
>

Hi Pablo,

Sure, we will send updated patch for this.

Regards,
Ravi

>> 
>> Thanks a lot Pablo.
>> 
>> Regards,
>> Ravi
>

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v5 01/19] crypto/ccp: add AMD ccp skeleton PMD
  2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
                           ` (18 preceding siblings ...)
  2018-03-30 22:46         ` [PATCH v5 01/19] crypto/ccp: add AMD ccp skeleton PMD De Lara Guarch, Pablo
@ 2018-04-22 19:57         ` Thomas Monjalon
  2018-04-23  6:37           ` Kumar, Ravi1
  19 siblings, 1 reply; 131+ messages in thread
From: Thomas Monjalon @ 2018-04-22 19:57 UTC (permalink / raw)
  To: Ravi Kumar, pablo.de.lara.guarch; +Cc: dev, hemant.agrawal

Hi,

19/03/2018 13:23, Ravi Kumar:
> --- a/config/common_base
> +++ b/config/common_base
> @@ -529,6 +529,11 @@ CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n
>  CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
>  
>  #
> +# Compile PMD for AMD CCP crypto device
> +#
> +CONFIG_RTE_LIBRTE_PMD_CCP=n

Why is it disabled by default?

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v5 17/19] crypto/ccp: support cpu based md5 and sha2 family authentication algo
  2018-03-19 12:23         ` [PATCH v5 17/19] crypto/ccp: support cpu based md5 and sha2 " Ravi Kumar
@ 2018-04-22 20:08           ` Thomas Monjalon
  2018-04-23  6:41             ` Kumar, Ravi1
  0 siblings, 1 reply; 131+ messages in thread
From: Thomas Monjalon @ 2018-04-22 20:08 UTC (permalink / raw)
  To: Ravi Kumar, pablo.de.lara.guarch; +Cc: dev, hemant.agrawal

Hi,

I am doing some late comments because I have a quick look when trying
to pull next-crypto in master branch.
Unfortunately, it doesn't met the basic quality criterias.


19/03/2018 13:23, Ravi Kumar:
> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
> ---
>  config/common_base                   |   1 +
>  drivers/crypto/ccp/ccp_crypto.c      | 282 ++++++++++++++++++++++++++++++++++-
>  drivers/crypto/ccp/ccp_crypto.h      |   5 +-
>  drivers/crypto/ccp/ccp_pmd_ops.c     |  23 +++
>  drivers/crypto/ccp/ccp_pmd_private.h |  10 ++
>  5 files changed, 316 insertions(+), 5 deletions(-)
[...]
> +CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=n

Why introducing a compile-time option?
Can it be a run-time option of the device?
We must not add compile-time device option if not well justified.

Talking about justification, there is 0 explanation in the commit messages.
But there are some in next-crypto tree. Where do they come from?

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v5 01/19] crypto/ccp: add AMD ccp skeleton PMD
  2018-04-22 19:57         ` Thomas Monjalon
@ 2018-04-23  6:37           ` Kumar, Ravi1
  2018-04-23  7:55             ` De Lara Guarch, Pablo
  2018-04-23  7:57             ` Thomas Monjalon
  0 siblings, 2 replies; 131+ messages in thread
From: Kumar, Ravi1 @ 2018-04-23  6:37 UTC (permalink / raw)
  To: Thomas Monjalon, pablo.de.lara.guarch; +Cc: dev, hemant.agrawal

>Hi,
>
>19/03/2018 13:23, Ravi Kumar:
>> --- a/config/common_base
>> +++ b/config/common_base
>> @@ -529,6 +529,11 @@ CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n
>>  CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
>>  
>>  #
>> +# Compile PMD for AMD CCP crypto device # CONFIG_RTE_LIBRTE_PMD_CCP=n
>
>Why is it disabled by default?
>
Hi Thomas,

The CCP HW crypto engines are available only on specific AMD platforms.
That is why we let the user decide when to enable.

Regards,
Ravi

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v5 17/19] crypto/ccp: support cpu based md5 and sha2 family authentication algo
  2018-04-22 20:08           ` Thomas Monjalon
@ 2018-04-23  6:41             ` Kumar, Ravi1
  2018-04-23  8:05               ` Thomas Monjalon
  0 siblings, 1 reply; 131+ messages in thread
From: Kumar, Ravi1 @ 2018-04-23  6:41 UTC (permalink / raw)
  To: Thomas Monjalon, pablo.de.lara.guarch; +Cc: dev, hemant.agrawal

>Hi,
>
>I am doing some late comments because I have a quick look when trying to pull next-crypto in master branch.
>Unfortunately, it doesn't met the basic quality criterias.
>
>
>19/03/2018 13:23, Ravi Kumar:
>> Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
>> ---
>>  config/common_base                   |   1 +
>>  drivers/crypto/ccp/ccp_crypto.c      | 282 ++++++++++++++++++++++++++++++++++-
>>  drivers/crypto/ccp/ccp_crypto.h      |   5 +-
>>  drivers/crypto/ccp/ccp_pmd_ops.c     |  23 +++
>>  drivers/crypto/ccp/ccp_pmd_private.h |  10 ++
>>  5 files changed, 316 insertions(+), 5 deletions(-)
>[...]
>> +CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=n
>
>Why introducing a compile-time option?
>Can it be a run-time option of the device?
>We must not add compile-time device option if not well justified.
>
>Talking about justification, there is 0 explanation in the commit messages.
>But there are some in next-crypto tree. Where do they come from?
>

Hi Thomas,

The detailed commit messages were missing from the earlier patches. Later after the patch were applied to dpdk-next-crypto, Pablo requested detailed commit messages for patches and just not to populate the mail-chain unnecessarily, the messages were later squashed in the commits offline.

Here is the explanation of the patch:

By default, all the crypto operations (cipher + auth) are offloaded to CCP engines. When user enables CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=y, the auth operations are not offloaded to CCP and rather performed over CPU. We kept this feature as a compile time option in order to let user decide whether to run auth operations on CCP or CPU as some of the auth operations performs faster on CPU as compared to their performance on CCP.

Regards,
Ravi

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v5 01/19] crypto/ccp: add AMD ccp skeleton PMD
  2018-04-23  6:37           ` Kumar, Ravi1
@ 2018-04-23  7:55             ` De Lara Guarch, Pablo
  2018-04-23  7:57             ` Thomas Monjalon
  1 sibling, 0 replies; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2018-04-23  7:55 UTC (permalink / raw)
  To: Kumar, Ravi1, Thomas Monjalon; +Cc: dev, hemant.agrawal

Hi,

> -----Original Message-----
> From: Kumar, Ravi1 [mailto:Ravi1.Kumar@amd.com]
> Sent: Monday, April 23, 2018 7:37 AM
> To: Thomas Monjalon <thomas@monjalon.net>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>
> Cc: dev@dpdk.org; hemant.agrawal@nxp.com
> Subject: RE: [dpdk-dev] [PATCH v5 01/19] crypto/ccp: add AMD ccp skeleton
> PMD
> 
> >Hi,
> >
> >19/03/2018 13:23, Ravi Kumar:
> >> --- a/config/common_base
> >> +++ b/config/common_base
> >> @@ -529,6 +529,11 @@
> CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n
> >>  CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
> >>
> >>  #
> >> +# Compile PMD for AMD CCP crypto device #
> CONFIG_RTE_LIBRTE_PMD_CCP=n
> >
> >Why is it disabled by default?
> >
> Hi Thomas,
> 
> The CCP HW crypto engines are available only on specific AMD platforms.
> That is why we let the user decide when to enable.

Actually, I thought this was disabled because this PMD has an external dependency with libcrypto.

> 
> Regards,
> Ravi

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v5 01/19] crypto/ccp: add AMD ccp skeleton PMD
  2018-04-23  6:37           ` Kumar, Ravi1
  2018-04-23  7:55             ` De Lara Guarch, Pablo
@ 2018-04-23  7:57             ` Thomas Monjalon
  1 sibling, 0 replies; 131+ messages in thread
From: Thomas Monjalon @ 2018-04-23  7:57 UTC (permalink / raw)
  To: Kumar, Ravi1; +Cc: pablo.de.lara.guarch, dev, hemant.agrawal

23/04/2018 08:37, Kumar, Ravi1:
> >Hi,
> >
> >19/03/2018 13:23, Ravi Kumar:
> >> --- a/config/common_base
> >> +++ b/config/common_base
> >> @@ -529,6 +529,11 @@ CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n
> >>  CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
> >>  
> >>  #
> >> +# Compile PMD for AMD CCP crypto device # CONFIG_RTE_LIBRTE_PMD_CCP=n
> >
> >Why is it disabled by default?
> >
> Hi Thomas,
> 
> The CCP HW crypto engines are available only on specific AMD platforms.
> That is why we let the user decide when to enable.

Most of the devices are not always available,
but they are enabled by default.

Can we enable this one from the first patch?

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v5 17/19] crypto/ccp: support cpu based md5 and sha2 family authentication algo
  2018-04-23  6:41             ` Kumar, Ravi1
@ 2018-04-23  8:05               ` Thomas Monjalon
  2018-04-23 10:05                 ` De Lara Guarch, Pablo
  0 siblings, 1 reply; 131+ messages in thread
From: Thomas Monjalon @ 2018-04-23  8:05 UTC (permalink / raw)
  To: Kumar, Ravi1; +Cc: pablo.de.lara.guarch, dev, hemant.agrawal

23/04/2018 08:41, Kumar, Ravi1:
> >> +CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=n
> >
> >Why introducing a compile-time option?
> >Can it be a run-time option of the device?
> >We must not add compile-time device option if not well justified.
[...]
> By default, all the crypto operations (cipher + auth) are offloaded to CCP engines. When user enables CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=y, the auth operations are not offloaded to CCP and rather performed over CPU. We kept this feature as a compile time option in order to let user decide whether to run auth operations on CCP or CPU as some of the auth operations performs faster on CPU as compared to their performance on CCP.

No, you do not let the user decide.
The compilation options are for the packager to decide.
The user can rely on pre-compiled packages and use only runtime options.
That's why we forbid compile-time options for such features.

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v5 17/19] crypto/ccp: support cpu based md5 and sha2 family authentication algo
  2018-04-23  8:05               ` Thomas Monjalon
@ 2018-04-23 10:05                 ` De Lara Guarch, Pablo
  2018-04-23 10:41                   ` Kumar, Ravi1
  2018-05-03  6:01                   ` Kumar, Ravi1
  0 siblings, 2 replies; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2018-04-23 10:05 UTC (permalink / raw)
  To: Thomas Monjalon, Kumar, Ravi1; +Cc: dev, hemant.agrawal

Hi Ravi,

> -----Original Message-----
> From: Thomas Monjalon [mailto:thomas@monjalon.net]
> Sent: Monday, April 23, 2018 9:06 AM
> To: Kumar, Ravi1 <Ravi1.Kumar@amd.com>
> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; dev@dpdk.org;
> hemant.agrawal@nxp.com
> Subject: Re: [dpdk-dev] [PATCH v5 17/19] crypto/ccp: support cpu based md5
> and sha2 family authentication algo
> 
> 23/04/2018 08:41, Kumar, Ravi1:
> > >> +CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=n
> > >
> > >Why introducing a compile-time option?
> > >Can it be a run-time option of the device?
> > >We must not add compile-time device option if not well justified.
> [...]
> > By default, all the crypto operations (cipher + auth) are offloaded to CCP
> engines. When user enables CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=y, the
> auth operations are not offloaded to CCP and rather performed over CPU. We
> kept this feature as a compile time option in order to let user decide whether to
> run auth operations on CCP or CPU as some of the auth operations performs
> faster on CPU as compared to their performance on CCP.
> 
> No, you do not let the user decide.
> The compilation options are for the packager to decide.
> The user can rely on pre-compiled packages and use only runtime options.
> That's why we forbid compile-time options for such features.
> 

Could you send a patch to remove this compile-time option and pass it as an option for the vdev?
Look at how the crypto scheduler accepts parameters from "rte_vdev_init" or --vdev, in scheduler_pmd.c.

Thanks!
Pablo

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v5 17/19] crypto/ccp: support cpu based md5 and sha2 family authentication algo
  2018-04-23 10:05                 ` De Lara Guarch, Pablo
@ 2018-04-23 10:41                   ` Kumar, Ravi1
  2018-05-03  6:01                   ` Kumar, Ravi1
  1 sibling, 0 replies; 131+ messages in thread
From: Kumar, Ravi1 @ 2018-04-23 10:41 UTC (permalink / raw)
  To: De Lara Guarch, Pablo, Thomas Monjalon; +Cc: dev, hemant.agrawal

>Hi Ravi,
>
>> -----Original Message-----
>> From: Thomas Monjalon [mailto:thomas@monjalon.net]
>> Sent: Monday, April 23, 2018 9:06 AM
>> To: Kumar, Ravi1 <Ravi1.Kumar@amd.com>
>> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; 
>> dev@dpdk.org; hemant.agrawal@nxp.com
>> Subject: Re: [dpdk-dev] [PATCH v5 17/19] crypto/ccp: support cpu based 
>> md5 and sha2 family authentication algo
>> 
>> 23/04/2018 08:41, Kumar, Ravi1:
>> > >> +CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=n
>> > >
>> > >Why introducing a compile-time option?
>> > >Can it be a run-time option of the device?
>> > >We must not add compile-time device option if not well justified.
>> [...]
>> > By default, all the crypto operations (cipher + auth) are offloaded 
>> > to CCP
>> engines. When user enables CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=y, the 
>> auth operations are not offloaded to CCP and rather performed over 
>> CPU. We kept this feature as a compile time option in order to let 
>> user decide whether to run auth operations on CCP or CPU as some of 
>> the auth operations performs faster on CPU as compared to their performance on CCP.
>> 
>> No, you do not let the user decide.
>> The compilation options are for the packager to decide.
>> The user can rely on pre-compiled packages and use only runtime options.
>> That's why we forbid compile-time options for such features.
>> 
>
>Could you send a patch to remove this compile-time option and pass it as an option for the vdev?
>Look at how the crypto scheduler accepts parameters from "rte_vdev_init" or --vdev, in scheduler_pmd.c.
>
>Thanks!
>Pablo
>
Hi Pablo,

Yes, we are working on it.

Regards,
Ravi

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v5 17/19] crypto/ccp: support cpu based md5 and sha2 family authentication algo
  2018-04-23 10:05                 ` De Lara Guarch, Pablo
  2018-04-23 10:41                   ` Kumar, Ravi1
@ 2018-05-03  6:01                   ` Kumar, Ravi1
  2018-05-03  7:25                     ` De Lara Guarch, Pablo
  1 sibling, 1 reply; 131+ messages in thread
From: Kumar, Ravi1 @ 2018-05-03  6:01 UTC (permalink / raw)
  To: De Lara Guarch, Pablo, Thomas Monjalon; +Cc: dev, hemant.agrawal

>Hi Ravi,
>
>> -----Original Message-----
>> From: Thomas Monjalon [mailto:thomas@monjalon.net]
>> Sent: Monday, April 23, 2018 9:06 AM
>> To: Kumar, Ravi1 <Ravi1.Kumar@amd.com>
>> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; 
>> dev@dpdk.org; hemant.agrawal@nxp.com
>> Subject: Re: [dpdk-dev] [PATCH v5 17/19] crypto/ccp: support cpu based 
>> md5 and sha2 family authentication algo
>> 
>> 23/04/2018 08:41, Kumar, Ravi1:
>> > >> +CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=n
>> > >
>> > >Why introducing a compile-time option?
>> > >Can it be a run-time option of the device?
>> > >We must not add compile-time device option if not well justified.
>> [...]
>> > By default, all the crypto operations (cipher + auth) are offloaded 
>> > to CCP
>> engines. When user enables CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=y, the 
>> auth operations are not offloaded to CCP and rather performed over 
>> CPU. We kept this feature as a compile time option in order to let 
>> user decide whether to run auth operations on CCP or CPU as some of 
>> the auth operations performs faster on CPU as compared to their performance on CCP.
>> 
>> No, you do not let the user decide.
>> The compilation options are for the packager to decide.
>> The user can rely on pre-compiled packages and use only runtime options.
>> That's why we forbid compile-time options for such features.
>> 
>
>Could you send a patch to remove this compile-time option and pass it as an option for the vdev?
>Look at how the crypto scheduler accepts parameters from "rte_vdev_init" or --vdev, in scheduler_pmd.c.
>
>Thanks!
>Pablo
>
Hi Pablo,

I have uploaded 2 patches for this issue. 

If needed, you could squash the patch-1 to our very initial patch (commit 0054d84f6ec8: crypto/ccp: add AMD ccp skeleton PMD).

Please let me know if you need any other information from our side. 

Regards,
Ravi

^ permalink raw reply	[flat|nested] 131+ messages in thread

* Re: [PATCH v5 17/19] crypto/ccp: support cpu based md5 and sha2 family authentication algo
  2018-05-03  6:01                   ` Kumar, Ravi1
@ 2018-05-03  7:25                     ` De Lara Guarch, Pablo
  0 siblings, 0 replies; 131+ messages in thread
From: De Lara Guarch, Pablo @ 2018-05-03  7:25 UTC (permalink / raw)
  To: Kumar, Ravi1, Thomas Monjalon; +Cc: dev, hemant.agrawal



> -----Original Message-----
> From: Kumar, Ravi1 [mailto:Ravi1.Kumar@amd.com]
> Sent: Thursday, May 3, 2018 7:02 AM
> To: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; Thomas
> Monjalon <thomas@monjalon.net>
> Cc: dev@dpdk.org; hemant.agrawal@nxp.com
> Subject: RE: [dpdk-dev] [PATCH v5 17/19] crypto/ccp: support cpu based md5
> and sha2 family authentication algo
> 
> >Hi Ravi,
> >
> >> -----Original Message-----
> >> From: Thomas Monjalon [mailto:thomas@monjalon.net]
> >> Sent: Monday, April 23, 2018 9:06 AM
> >> To: Kumar, Ravi1 <Ravi1.Kumar@amd.com>
> >> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;
> >> dev@dpdk.org; hemant.agrawal@nxp.com
> >> Subject: Re: [dpdk-dev] [PATCH v5 17/19] crypto/ccp: support cpu
> >> based
> >> md5 and sha2 family authentication algo
> >>
> >> 23/04/2018 08:41, Kumar, Ravi1:
> >> > >> +CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=n
> >> > >
> >> > >Why introducing a compile-time option?
> >> > >Can it be a run-time option of the device?
> >> > >We must not add compile-time device option if not well justified.
> >> [...]
> >> > By default, all the crypto operations (cipher + auth) are offloaded
> >> > to CCP
> >> engines. When user enables CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=y,
> the
> >> auth operations are not offloaded to CCP and rather performed over
> >> CPU. We kept this feature as a compile time option in order to let
> >> user decide whether to run auth operations on CCP or CPU as some of
> >> the auth operations performs faster on CPU as compared to their
> performance on CCP.
> >>
> >> No, you do not let the user decide.
> >> The compilation options are for the packager to decide.
> >> The user can rely on pre-compiled packages and use only runtime options.
> >> That's why we forbid compile-time options for such features.
> >>
> >
> >Could you send a patch to remove this compile-time option and pass it as an
> option for the vdev?
> >Look at how the crypto scheduler accepts parameters from "rte_vdev_init" or -
> -vdev, in scheduler_pmd.c.
> >
> >Thanks!
> >Pablo
> >
> Hi Pablo,
> 
> I have uploaded 2 patches for this issue.
> 
> If needed, you could squash the patch-1 to our very initial patch (commit
> 0054d84f6ec8: crypto/ccp: add AMD ccp skeleton PMD).
> 
> Please let me know if you need any other information from our side.

This cannot be squashed into the first patch as it has already been merged,
so they would be applied as they are.

Pablo

> 
> Regards,
> Ravi

^ permalink raw reply	[flat|nested] 131+ messages in thread

end of thread, other threads:[~2018-05-03  7:25 UTC | newest]

Thread overview: 131+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-11-30 13:12 [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD Ravi Kumar
2017-11-30 13:12 ` [PATCH 02/11] crypto/ccp: add " Ravi Kumar
2017-12-11 19:50   ` De Lara Guarch, Pablo
2017-12-12 11:46     ` Kumar, Ravi1
2017-11-30 13:12 ` [PATCH 03/11] crypto: add macros for AES-CMAC and SHA3 Ravi Kumar
2017-11-30 13:12 ` [PATCH 04/11] crypto/ccp: add support for AES-CMAC Ravi Kumar
2017-11-30 13:12 ` [PATCH 05/11] crypto/ccp: add support for CPU based authentication Ravi Kumar
2017-12-11 19:40   ` De Lara Guarch, Pablo
2017-11-30 13:12 ` [PATCH 06/11] crypto/ccp: add support for SHA3 family authentication Ravi Kumar
2017-11-30 13:12 ` [PATCH 07/11] doc: add document for AMD CCP crypto PMD Ravi Kumar
2017-12-11 19:36   ` De Lara Guarch, Pablo
2017-11-30 13:12 ` [PATCH 08/11] crypto/ccp: rename CCP crypto driver id Ravi Kumar
2017-12-11 14:30   ` De Lara Guarch, Pablo
2017-11-30 13:12 ` [PATCH 09/11] crypto/ccp: update queue-pair release to enable reset Ravi Kumar
2017-11-30 13:12 ` [PATCH 10/11] test/test: add test for AMD CCP crypto PMD Ravi Kumar
2017-12-11 14:27   ` De Lara Guarch, Pablo
2017-11-30 13:12 ` [PATCH 11/11] crypto/ccp: update CCP PMD code-base Ravi Kumar
2017-12-11 19:30   ` De Lara Guarch, Pablo
2017-12-11 13:39 ` [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD De Lara Guarch, Pablo
2017-12-11 13:41   ` Kumar, Ravi1
2018-01-05  9:39 ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
2018-01-05  9:39   ` [PATCH v2 02/20] crypto/ccp: add ccp device initialization and remove Ravi Kumar
2018-01-05  9:39   ` [PATCH v2 03/20] crypto/ccp: add basic pmd ops support for start, stop, config etc Ravi Kumar
2018-01-08 17:21     ` De Lara Guarch, Pablo
2018-01-05  9:39   ` [PATCH v2 04/20] crypto/ccp: add session related crypto pmd ops support Ravi Kumar
2018-01-05  9:39   ` [PATCH v2 05/20] crypto/ccp: add queue pair related " Ravi Kumar
2018-01-05  9:39   ` [PATCH v2 06/20] crypto/ccp: add crypto enqueue and dequeue burst api support Ravi Kumar
2018-01-05  9:39   ` [PATCH v2 07/20] crypto/ccp: add support for RTE_CRYPTO_OP_SESSIONLESS Ravi Kumar
2018-01-05  9:39   ` [PATCH v2 08/20] crypto/ccp: add stats related crypto pmd ops support Ravi Kumar
2018-01-05  9:39   ` [PATCH v2 09/20] crypto/ccp: add ccp hwrng feature support Ravi Kumar
2018-01-05  9:39   ` [PATCH v2 10/20] crypto/ccp: add aes cipher algo support Ravi Kumar
2018-01-05  9:39   ` [PATCH v2 11/20] crypto/ccp: add 3des " Ravi Kumar
2018-01-05  9:39   ` [PATCH v2 12/20] crypto/ccp: add aes-cmac auth algo aupport Ravi Kumar
2018-01-08 17:27     ` De Lara Guarch, Pablo
2018-01-05  9:39   ` [PATCH v2 13/20] crypto/ccp: add aes-gcm aead algo support Ravi Kumar
2018-01-05  9:39   ` [PATCH v2 14/20] crypto/ccp: add sha1 auth " Ravi Kumar
2018-01-05  9:39   ` [PATCH v2 15/20] crypto/ccp: add sha2 family " Ravi Kumar
2018-01-05  9:39   ` [PATCH v2 16/20] crypto/ccp: add sha3 " Ravi Kumar
2018-01-05  9:39   ` [PATCH v2 17/20] crypto/ccp: add cpu based md5 and sha2 " Ravi Kumar
2018-01-08 17:36     ` De Lara Guarch, Pablo
2018-01-05  9:39   ` [PATCH v2 18/20] doc: add document for AMD CCP crypto poll mode driver Ravi Kumar
2018-01-05  9:39   ` [PATCH v2 19/20] test/crypto: add test " Ravi Kumar
2018-01-05  9:39   ` [PATCH v2 20/20] doc: add ccp crypto poll mode driver to release notes Ravi Kumar
2018-01-08 17:32     ` De Lara Guarch, Pablo
2018-01-07  9:02   ` [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Hemant Agrawal
2018-01-08 17:16   ` De Lara Guarch, Pablo
2018-01-10  9:42   ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
2018-01-10  9:42     ` [PATCH v3 02/19] crypto/ccp: support ccp device initialization and deintialization Ravi Kumar
2018-01-10  9:42     ` [PATCH v3 03/19] crypto/ccp: support basic pmd ops Ravi Kumar
2018-01-10  9:42     ` [PATCH v3 04/19] crypto/ccp: support session related crypto " Ravi Kumar
2018-01-10  9:42     ` [PATCH v3 05/19] crypto/ccp: support queue pair related " Ravi Kumar
2018-01-10  9:42     ` [PATCH v3 06/19] crypto/ccp: support crypto enqueue and dequeue burst api Ravi Kumar
2018-01-10  9:42     ` [PATCH v3 07/19] crypto/ccp: support for RTE_CRYPTO_OP_SESSIONLESS Ravi Kumar
2018-01-10  9:42     ` [PATCH v3 08/19] crypto/ccp: support stats related crypto pmd ops Ravi Kumar
2018-01-10  9:42     ` [PATCH v3 09/19] crypto/ccp: support ccp hwrng feature Ravi Kumar
2018-01-10  9:42     ` [PATCH v3 10/19] crypto/ccp: support aes cipher algo Ravi Kumar
2018-01-10  9:42     ` [PATCH v3 11/19] crypto/ccp: support 3des " Ravi Kumar
2018-01-10  9:42     ` [PATCH v3 12/19] crypto/ccp: support aes-cmac auth algo Ravi Kumar
2018-01-10  9:42     ` [PATCH v3 13/19] crypto/ccp: support aes-gcm aead algo Ravi Kumar
2018-01-10  9:42     ` [PATCH v3 14/19] crypto/ccp: support sha1 authentication algo Ravi Kumar
2018-01-10  9:42     ` [PATCH v3 15/19] crypto/ccp: support sha2 family " Ravi Kumar
2018-01-10  9:42     ` [PATCH v3 16/19] crypto/ccp: support sha3 " Ravi Kumar
2018-01-10  9:42     ` [PATCH v3 17/19] crypto/ccp: support cpu based md5 and sha2 " Ravi Kumar
2018-01-10  9:42     ` [PATCH v3 18/19] test/crypto: add test for AMD CCP crypto poll mode driver Ravi Kumar
2018-01-10  9:42     ` [PATCH v3 19/19] doc: add document " Ravi Kumar
2018-01-10  9:53     ` [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD De Lara Guarch, Pablo
2018-01-10 10:29       ` Kumar, Ravi1
2018-01-10 13:41         ` De Lara Guarch, Pablo
2018-01-11  6:36           ` Kumar, Ravi1
2018-01-16 15:29             ` De Lara Guarch, Pablo
2018-01-17  9:08               ` Kumar, Ravi1
2018-01-24  8:57                 ` De Lara Guarch, Pablo
2018-03-09  8:35     ` [PATCH v4 01/20] " Ravi Kumar
2018-03-09  8:35       ` [PATCH v4 02/20] crypto/ccp: support ccp device initialization and deintialization Ravi Kumar
2018-03-09  8:35       ` [PATCH v4 03/20] crypto/ccp: support basic pmd ops Ravi Kumar
2018-03-09  8:35       ` [PATCH v4 04/20] crypto/ccp: support session related crypto " Ravi Kumar
2018-03-09  8:35       ` [PATCH v4 05/20] crypto/ccp: support queue pair related " Ravi Kumar
2018-03-09  8:35       ` [PATCH v4 06/20] crypto/ccp: support crypto enqueue and dequeue burst api Ravi Kumar
2018-03-09  8:35       ` [PATCH v4 07/20] crypto/ccp: support sessionless operations Ravi Kumar
2018-03-09  8:35       ` [PATCH v4 08/20] crypto/ccp: support stats related crypto pmd ops Ravi Kumar
2018-03-09  8:35       ` [PATCH v4 09/20] crypto/ccp: support ccp hwrng feature Ravi Kumar
2018-03-09  8:35       ` [PATCH v4 10/20] crypto/ccp: support aes cipher algo Ravi Kumar
2018-03-09  8:35       ` [PATCH v4 11/20] crypto/ccp: support 3des " Ravi Kumar
2018-03-09  8:35       ` [PATCH v4 12/20] crypto/ccp: support aes-cmac auth algo Ravi Kumar
2018-03-09  8:35       ` [PATCH v4 13/20] crypto/ccp: support aes-gcm aead algo Ravi Kumar
2018-03-09  8:35       ` [PATCH v4 14/20] crypto/ccp: support sha1 authentication algo Ravi Kumar
2018-03-09  8:35       ` [PATCH v4 15/20] crypto/ccp: support sha2 family " Ravi Kumar
2018-03-09  8:35       ` [PATCH v4 16/20] crypto/ccp: support sha3 " Ravi Kumar
2018-03-09  8:35       ` [PATCH v4 17/20] crypto/ccp: support cpu based md5 and sha2 " Ravi Kumar
2018-03-09  8:35       ` [PATCH v4 18/20] test/crypto: add test for AMD CCP crypto poll mode Ravi Kumar
2018-03-09  8:35       ` [PATCH v4 19/20] doc: add document for AMD CCP crypto poll mode driver Ravi Kumar
2018-03-09  8:35       ` [PATCH v4 20/20] crypto/ccp: moved license headers to SPDX format Ravi Kumar
2018-03-12  9:18         ` De Lara Guarch, Pablo
2018-03-12 11:23           ` Kumar, Ravi1
2018-03-09 17:36       ` [PATCH v4 01/20] crypto/ccp: add AMD ccp skeleton PMD Hemant Agrawal
2018-03-09 17:46         ` Hemant Agrawal
2018-03-12 11:26           ` Kumar, Ravi1
2018-03-19 12:23       ` [PATCH v5 01/19] " Ravi Kumar
2018-03-19 12:23         ` [PATCH v5 02/19] crypto/ccp: support ccp device initialization and deintialization Ravi Kumar
2018-03-19 12:23         ` [PATCH v5 03/19] crypto/ccp: support basic pmd ops Ravi Kumar
2018-03-19 12:23         ` [PATCH v5 04/19] crypto/ccp: support session related crypto " Ravi Kumar
2018-03-19 12:23         ` [PATCH v5 05/19] crypto/ccp: support queue pair related " Ravi Kumar
2018-03-19 12:23         ` [PATCH v5 06/19] crypto/ccp: support crypto enqueue and dequeue burst api Ravi Kumar
2018-03-19 12:23         ` [PATCH v5 07/19] crypto/ccp: support sessionless operations Ravi Kumar
2018-03-19 12:23         ` [PATCH v5 08/19] crypto/ccp: support stats related crypto pmd ops Ravi Kumar
2018-03-19 12:23         ` [PATCH v5 09/19] crypto/ccp: support ccp hwrng feature Ravi Kumar
2018-03-19 12:23         ` [PATCH v5 10/19] crypto/ccp: support aes cipher algo Ravi Kumar
2018-03-19 12:23         ` [PATCH v5 11/19] crypto/ccp: support 3des " Ravi Kumar
2018-03-19 12:23         ` [PATCH v5 12/19] crypto/ccp: support aes-cmac auth algo Ravi Kumar
2018-03-19 12:23         ` [PATCH v5 13/19] crypto/ccp: support aes-gcm aead algo Ravi Kumar
2018-03-19 12:23         ` [PATCH v5 14/19] crypto/ccp: support sha1 authentication algo Ravi Kumar
2018-03-19 12:23         ` [PATCH v5 15/19] crypto/ccp: support sha2 family " Ravi Kumar
2018-03-19 12:23         ` [PATCH v5 16/19] crypto/ccp: support sha3 " Ravi Kumar
2018-03-19 12:23         ` [PATCH v5 17/19] crypto/ccp: support cpu based md5 and sha2 " Ravi Kumar
2018-04-22 20:08           ` Thomas Monjalon
2018-04-23  6:41             ` Kumar, Ravi1
2018-04-23  8:05               ` Thomas Monjalon
2018-04-23 10:05                 ` De Lara Guarch, Pablo
2018-04-23 10:41                   ` Kumar, Ravi1
2018-05-03  6:01                   ` Kumar, Ravi1
2018-05-03  7:25                     ` De Lara Guarch, Pablo
2018-03-19 12:23         ` [PATCH v5 18/19] test/crypto: add test for AMD CCP crypto poll mode Ravi Kumar
2018-03-19 12:23         ` [PATCH v5 19/19] doc: add document for AMD CCP crypto poll mode driver Ravi Kumar
2018-03-30 22:46         ` [PATCH v5 01/19] crypto/ccp: add AMD ccp skeleton PMD De Lara Guarch, Pablo
2018-04-02  5:50           ` Kumar, Ravi1
2018-04-16 14:20             ` De Lara Guarch, Pablo
2018-04-17  6:11               ` Kumar, Ravi1
2018-04-22 19:57         ` Thomas Monjalon
2018-04-23  6:37           ` Kumar, Ravi1
2018-04-23  7:55             ` De Lara Guarch, Pablo
2018-04-23  7:57             ` Thomas Monjalon

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.