All of lore.kernel.org
 help / color / mirror / Atom feed
From: asomalap@amd.com
To: dev@dpdk.org
Cc: akhil.goyal@nxp.com
Subject: [dpdk-dev] [PATCH v3] crypto/ccp: enable IOMMU for CCP
Date: Fri, 25 Dec 2020 13:33:58 +0530	[thread overview]
Message-ID: <20201225080358.366162-1-asomalap@amd.com> (raw)
In-Reply-To: <20200128083819.32834-1-asomalap@amd.com>

From: Amaranath Somalapuram <Amaranath.Somalapuram@amd.com>

CCP use vdev framework, and vdev framework don’t support IOMMU.
Adding custom IOMMU support for AMD CCP driver.

Signed-off-by: Amaranath Somalapuram <Amaranath.Somalapuram@amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 114 ++++++++++++++++++++++++-------
 drivers/crypto/ccp/ccp_dev.c     |  54 +++------------
 drivers/crypto/ccp/ccp_pci.c     |   1 +
 drivers/crypto/ccp/rte_ccp_pmd.c |   3 +
 4 files changed, 104 insertions(+), 68 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index db3fb6eff..f37d35f18 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -31,8 +31,10 @@
 #include <openssl/err.h>
 #include <openssl/hmac.h>
 
+extern int iommu_mode;
+void *sha_ctx;
 /* SHA initial context values */
-static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
+uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
 	SHA1_H4, SHA1_H3,
 	SHA1_H2, SHA1_H1,
 	SHA1_H0, 0x0U,
@@ -744,8 +746,13 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 		CCP_LOG_ERR("Invalid CCP Engine");
 		return -ENOTSUP;
 	}
-	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
-	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+	if (iommu_mode == 2) {
+		sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
+		sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
+	} else {
+		sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+		sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+	}
 	return 0;
 }
 
@@ -784,6 +791,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha1_init;
 		sess->auth.ctx_len = CCP_SB_BYTES;
 		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		rte_memcpy(sha_ctx, sess->auth.ctx, SHA_COMMON_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
 		if (sess->auth_opt) {
@@ -822,6 +830,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha224_init;
 		sess->auth.ctx_len = CCP_SB_BYTES;
 		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
 		if (sess->auth_opt) {
@@ -884,6 +893,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha256_init;
 		sess->auth.ctx_len = CCP_SB_BYTES;
 		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
 		if (sess->auth_opt) {
@@ -946,6 +956,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha384_init;
 		sess->auth.ctx_len = CCP_SB_BYTES << 1;
 		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
 		if (sess->auth_opt) {
@@ -1010,6 +1021,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha512_init;
 		sess->auth.ctx_len = CCP_SB_BYTES << 1;
 		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
 		if (sess->auth_opt) {
@@ -1159,8 +1171,13 @@ ccp_configure_session_aead(struct ccp_session *sess,
 		CCP_LOG_ERR("Unsupported aead algo");
 		return -ENOTSUP;
 	}
-	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
-	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+	if (iommu_mode == 2) {
+		sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
+		sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
+	} else {
+		sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+		sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+	}
 	return 0;
 }
 
@@ -1575,11 +1592,16 @@ ccp_perform_hmac(struct rte_crypto_op *op,
 					      op->sym->auth.data.offset);
 	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
 						session->auth.ctx_len);
-	dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+	if (iommu_mode == 2) {
+		dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
+	} else {
+		dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	}
 	dest_addr_t = dest_addr;
 
 	/** Load PHash1 to LSB*/
-	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
 	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
 	pst.len = session->auth.ctx_len;
 	pst.dir = 1;
@@ -1659,7 +1681,10 @@ ccp_perform_hmac(struct rte_crypto_op *op,
 
 	/** Load PHash2 to LSB*/
 	addr += session->auth.ctx_len;
-	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	if (iommu_mode == 2)
+		pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
+	else
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
 	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
 	pst.len = session->auth.ctx_len;
 	pst.dir = 1;
@@ -1745,15 +1770,19 @@ ccp_perform_sha(struct rte_crypto_op *op,
 
 	src_addr = rte_pktmbuf_iova_offset(op->sym->m_src,
 					      op->sym->auth.data.offset);
-
 	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
 						session->auth.ctx_len);
-	dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+	if (iommu_mode == 2) {
+		dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
+		pst.src_addr = (phys_addr_t)sha_ctx;
+	} else {
+		dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
+						     session->auth.ctx);
+	}
 
 	/** Passthru sha context*/
 
-	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
-						     session->auth.ctx);
 	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
 	pst.len = session->auth.ctx_len;
 	pst.dir = 1;
@@ -1840,10 +1869,16 @@ ccp_perform_sha3_hmac(struct rte_crypto_op *op,
 		CCP_LOG_ERR("CCP MBUF append failed\n");
 		return -1;
 	}
-	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	if (iommu_mode == 2) {
+		dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
+		ctx_paddr = (phys_addr_t)rte_mem_virt2iova(
+					session->auth.pre_compute);
+	} else {
+		dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+		ctx_paddr = (phys_addr_t)rte_mem_virt2phy(
+					session->auth.pre_compute);
+	}
 	dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
-	ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void
-						   *)session->auth.pre_compute);
 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
 	memset(desc, 0, Q_DESC_SIZE);
 
@@ -1964,7 +1999,7 @@ ccp_perform_sha3(struct rte_crypto_op *op,
 	struct ccp_session *session;
 	union ccp_function function;
 	struct ccp_desc *desc;
-	uint8_t *ctx_addr, *append_ptr;
+	uint8_t *ctx_addr = NULL, *append_ptr = NULL;
 	uint32_t tail;
 	phys_addr_t src_addr, dest_addr, ctx_paddr;
 
@@ -1980,9 +2015,15 @@ ccp_perform_sha3(struct rte_crypto_op *op,
 		CCP_LOG_ERR("CCP MBUF append failed\n");
 		return -1;
 	}
-	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	if (iommu_mode == 2) {
+		dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
+		ctx_paddr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr);
+	} else {
+		dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+		ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+	}
+
 	ctx_addr = session->auth.sha3_ctx;
-	ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
 
 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
 	memset(desc, 0, Q_DESC_SIZE);
@@ -2056,7 +2097,13 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
 
 		ctx_addr = session->auth.pre_compute;
 		memset(ctx_addr, 0, AES_BLOCK_SIZE);
-		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+		if (iommu_mode == 2)
+			pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
+							(void *)ctx_addr);
+		else
+			pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
+							(void *)ctx_addr);
+
 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
 		pst.len = CCP_SB_BYTES;
 		pst.dir = 1;
@@ -2094,7 +2141,12 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
 	} else {
 		ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
 		memset(ctx_addr, 0, AES_BLOCK_SIZE);
-		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+		if (iommu_mode == 2)
+			pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
+							(void *)ctx_addr);
+		else
+			pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
+							(void *)ctx_addr);
 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
 		pst.len = CCP_SB_BYTES;
 		pst.dir = 1;
@@ -2288,8 +2340,12 @@ ccp_perform_3des(struct rte_crypto_op *op,
 
 		rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
 			   iv, session->iv.length);
-
-		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+		if (iommu_mode == 2)
+			pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
+							(void *) lsb_buf);
+		else
+			pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
+							(void *) lsb_buf);
 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
 		pst.len = CCP_SB_BYTES;
 		pst.dir = 1;
@@ -2312,7 +2368,10 @@ ccp_perform_3des(struct rte_crypto_op *op,
 	else
 		dest_addr = src_addr;
 
-	key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+	if (iommu_mode == 2)
+		key_addr = rte_mem_virt2iova(session->cipher.key_ccp);
+	else
+		key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
 
 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
 
@@ -2707,8 +2766,13 @@ process_ops_to_enqueue(struct ccp_qp *qp,
 	b_info->lsb_buf_idx = 0;
 	b_info->desccnt = 0;
 	b_info->cmd_q = cmd_q;
-	b_info->lsb_buf_phys =
-		(phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+	if (iommu_mode == 2)
+		b_info->lsb_buf_phys =
+			(phys_addr_t)rte_mem_virt2iova((void *)b_info->lsb_buf);
+	else
+		b_info->lsb_buf_phys =
+			(phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+
 	rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
 
 	b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index 664ddc174..ee6882b8a 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -23,6 +23,7 @@
 #include "ccp_pci.h"
 #include "ccp_pmd_private.h"
 
+int iommu_mode;
 struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
 static int ccp_dev_id;
 
@@ -512,7 +513,7 @@ ccp_add_device(struct ccp_device *dev, int type)
 
 		CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
 	}
-	CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
+	CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x0);
 
 	/* Copy the private LSB mask to the public registers */
 	status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
@@ -657,9 +658,7 @@ ccp_probe_device(const char *dirname, uint16_t domain,
 	struct rte_pci_device *pci;
 	char filename[PATH_MAX];
 	unsigned long tmp;
-	int uio_fd = -1, i, uio_num;
-	char uio_devname[PATH_MAX];
-	void *map_addr;
+	int uio_fd = -1;
 
 	ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
 			      RTE_CACHE_LINE_SIZE);
@@ -710,46 +709,14 @@ ccp_probe_device(const char *dirname, uint16_t domain,
 	snprintf(filename, sizeof(filename), "%s/resource", dirname);
 	if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
 		goto fail;
+	if (iommu_mode == 2)
+		pci->kdrv = RTE_PCI_KDRV_VFIO;
+	else if (iommu_mode == 0)
+		pci->kdrv = RTE_PCI_KDRV_IGB_UIO;
+	else if (iommu_mode == 1)
+		pci->kdrv = RTE_PCI_KDRV_UIO_GENERIC;
 
-	uio_num = ccp_find_uio_devname(dirname);
-	if (uio_num < 0) {
-		/*
-		 * It may take time for uio device to appear,
-		 * wait  here and try again
-		 */
-		usleep(100000);
-		uio_num = ccp_find_uio_devname(dirname);
-		if (uio_num < 0)
-			goto fail;
-	}
-	snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
-
-	uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK);
-	if (uio_fd < 0)
-		goto fail;
-	if (flock(uio_fd, LOCK_EX | LOCK_NB))
-		goto fail;
-
-	/* Map the PCI memory resource of device */
-	for (i = 0; i < PCI_MAX_RESOURCE; i++) {
-
-		char devname[PATH_MAX];
-		int res_fd;
-
-		if (pci->mem_resource[i].phys_addr == 0)
-			continue;
-		snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
-		res_fd = open(devname, O_RDWR);
-		if (res_fd < 0)
-			goto fail;
-		map_addr = mmap(NULL, pci->mem_resource[i].len,
-				PROT_READ | PROT_WRITE,
-				MAP_SHARED, res_fd, 0);
-		if (map_addr == MAP_FAILED)
-			goto fail;
-
-		pci->mem_resource[i].addr = map_addr;
-	}
+	rte_pci_map_device(pci);
 
 	/* device is valid, add in list */
 	if (ccp_add_device(ccp_dev, ccp_type)) {
@@ -784,6 +751,7 @@ ccp_probe_devices(const struct rte_pci_id *ccp_id)
 	if (module_idx < 0)
 		return -1;
 
+	iommu_mode = module_idx;
 	TAILQ_INIT(&ccp_list);
 	dir = opendir(SYSFS_PCI_DEVICES);
 	if (dir == NULL)
diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c
index 1702a09c4..38029a908 100644
--- a/drivers/crypto/ccp/ccp_pci.c
+++ b/drivers/crypto/ccp/ccp_pci.c
@@ -15,6 +15,7 @@
 static const char * const uio_module_names[] = {
 	"igb_uio",
 	"uio_pci_generic",
+	"vfio_pci"
 };
 
 int
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index 000b2f4fe..ba379a19f 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -22,6 +22,7 @@
 static unsigned int ccp_pmd_init_done;
 uint8_t ccp_cryptodev_driver_id;
 uint8_t cryptodev_cnt;
+extern void *sha_ctx;
 
 struct ccp_pmd_init_params {
 	struct rte_cryptodev_pmd_init_params def_p;
@@ -305,6 +306,7 @@ cryptodev_ccp_remove(struct rte_vdev_device *dev)
 
 	ccp_pmd_init_done = 0;
 	name = rte_vdev_device_name(dev);
+	rte_free(sha_ctx);
 	if (name == NULL)
 		return -EINVAL;
 
@@ -388,6 +390,7 @@ cryptodev_ccp_probe(struct rte_vdev_device *vdev)
 	};
 	const char *input_args;
 
+	sha_ctx = (void *)rte_malloc(NULL, SHA512_DIGEST_SIZE, 64);
 	if (ccp_pmd_init_done) {
 		RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
 		return -EFAULT;
-- 
2.25.1


  parent reply	other threads:[~2020-12-25  8:06 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-01-28  8:38 [dpdk-dev] [PATCH v2] crypto/ccp: enable IOMMU for CCP asomalap
2020-02-12  7:11 ` Akhil Goyal
2020-02-12  7:44   ` Somalapuram, Amaranath
2020-02-12 10:05     ` Akhil Goyal
2020-02-12 10:31 ` Kumar, Ravi1
2020-02-12 13:50   ` Akhil Goyal
2020-02-14 15:53     ` [dpdk-dev] [dpdk-stable] " Thomas Monjalon
2020-12-25  8:03 ` asomalap [this message]
2021-01-15 15:58   ` [dpdk-dev] [PATCH v3] " Akhil Goyal
2021-05-27 13:24   ` David Marchand
2021-05-27 14:10     ` Thomas Monjalon
2021-05-28  5:02       ` Somalapuram, Amaranath
2021-05-28  7:20         ` Thomas Monjalon
2021-05-28  9:40           ` Somalapuram, Amaranath

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201225080358.366162-1-asomalap@amd.com \
    --to=asomalap@amd.com \
    --cc=akhil.goyal@nxp.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.