All of lore.kernel.org
 help / color / mirror / Atom feed
From: Marcin Smoczynski <marcinx.smoczynski@intel.com>
To: akhil.goyal@nxp.com, konstantin.ananyev@intel.com,
	roy.fan.zhang@intel.com, declan.doherty@intel.com,
	radu.nicolau@intel.com, pablo.de.lara.guarch@intel.com
Cc: dev@dpdk.org, Marcin Smoczynski <marcinx.smoczynski@intel.com>
Subject: [dpdk-dev] [PATCH v5 5/8] ipsec: introduce support for cpu crypto mode
Date: Tue, 28 Jan 2020 15:22:17 +0100	[thread overview]
Message-ID: <20200128142220.16644-6-marcinx.smoczynski@intel.com> (raw)
In-Reply-To: <20200128142220.16644-1-marcinx.smoczynski@intel.com>

Update library to handle CPU cypto security mode which utilizes
cryptodev's synchronous, CPU accelerated crypto operations.

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Signed-off-by: Marcin Smoczynski <marcinx.smoczynski@intel.com>
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 lib/librte_ipsec/esp_inb.c   | 156 ++++++++++++++++++++++++++++++-----
 lib/librte_ipsec/esp_outb.c  | 136 +++++++++++++++++++++++++++---
 lib/librte_ipsec/misc.h      | 120 ++++++++++++++++++++++++++-
 lib/librte_ipsec/rte_ipsec.h |  20 ++++-
 lib/librte_ipsec/sa.c        | 114 ++++++++++++++++++++-----
 lib/librte_ipsec/sa.h        |  19 ++++-
 lib/librte_ipsec/ses.c       |   5 +-
 7 files changed, 513 insertions(+), 57 deletions(-)

diff --git a/lib/librte_ipsec/esp_inb.c b/lib/librte_ipsec/esp_inb.c
index 5c653dd39..7b8ab81f6 100644
--- a/lib/librte_ipsec/esp_inb.c
+++ b/lib/librte_ipsec/esp_inb.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2020 Intel Corporation
  */
 
 #include <rte_ipsec.h>
@@ -105,6 +105,39 @@ inb_cop_prepare(struct rte_crypto_op *cop,
 	}
 }
 
+static inline uint32_t
+inb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
+	uint32_t *pofs, uint32_t plen, void *iv)
+{
+	struct aead_gcm_iv *gcm;
+	struct aesctr_cnt_blk *ctr;
+	uint64_t *ivp;
+	uint32_t clen;
+
+	ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
+		*pofs + sizeof(struct rte_esp_hdr));
+	clen = 0;
+
+	switch (sa->algo_type) {
+	case ALGO_TYPE_AES_GCM:
+		gcm = (struct aead_gcm_iv *)iv;
+		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+		break;
+	case ALGO_TYPE_AES_CBC:
+	case ALGO_TYPE_3DES_CBC:
+		copy_iv(iv, ivp, sa->iv_len);
+		break;
+	case ALGO_TYPE_AES_CTR:
+		ctr = (struct aesctr_cnt_blk *)iv;
+		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+		break;
+	}
+
+	*pofs += sa->ctp.auth.offset;
+	clen = plen - sa->ctp.auth.length;
+	return clen;
+}
+
 /*
  * Helper function for prepare() to deal with situation when
  * ICV is spread by two segments. Tries to move ICV completely into the
@@ -157,17 +190,12 @@ inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
 	}
 }
 
-/*
- * setup/update packet data and metadata for ESP inbound tunnel case.
- */
-static inline int32_t
-inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
-	struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv)
+static inline int
+inb_get_sqn(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
+	struct rte_mbuf *mb, uint32_t hlen, rte_be64_t *sqc)
 {
 	int32_t rc;
 	uint64_t sqn;
-	uint32_t clen, icv_len, icv_ofs, plen;
-	struct rte_mbuf *ml;
 	struct rte_esp_hdr *esph;
 
 	esph = rte_pktmbuf_mtod_offset(mb, struct rte_esp_hdr *, hlen);
@@ -179,12 +207,21 @@ inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
 	sqn = rte_be_to_cpu_32(esph->seq);
 	if (IS_ESN(sa))
 		sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz);
+	*sqc = rte_cpu_to_be_64(sqn);
 
+	/* check IPsec window */
 	rc = esn_inb_check_sqn(rsn, sa, sqn);
-	if (rc != 0)
-		return rc;
 
-	sqn = rte_cpu_to_be_64(sqn);
+	return rc;
+}
+
+/* prepare packet for upcoming processing */
+static inline int32_t
+inb_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
+	uint32_t hlen, union sym_op_data *icv)
+{
+	uint32_t clen, icv_len, icv_ofs, plen;
+	struct rte_mbuf *ml;
 
 	/* start packet manipulation */
 	plen = mb->pkt_len;
@@ -217,7 +254,8 @@ inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
 
 	icv_ofs += sa->sqh_len;
 
-	/* we have to allocate space for AAD somewhere,
+	/*
+	 * we have to allocate space for AAD somewhere,
 	 * right now - just use free trailing space at the last segment.
 	 * Would probably be more convenient to reserve space for AAD
 	 * inside rte_crypto_op itself
@@ -238,10 +276,28 @@ inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
 	mb->pkt_len += sa->sqh_len;
 	ml->data_len += sa->sqh_len;
 
-	inb_pkt_xprepare(sa, sqn, icv);
 	return plen;
 }
 
+static inline int32_t
+inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
+	struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv)
+{
+	int rc;
+	rte_be64_t sqn;
+
+	rc = inb_get_sqn(sa, rsn, mb, hlen, &sqn);
+	if (rc != 0)
+		return rc;
+
+	rc = inb_prepare(sa, mb, hlen, icv);
+	if (rc < 0)
+		return rc;
+
+	inb_pkt_xprepare(sa, sqn, icv);
+	return rc;
+}
+
 /*
  * setup/update packets and crypto ops for ESP inbound case.
  */
@@ -270,17 +326,17 @@ esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
 			lksd_none_cop_prepare(cop[k], cs, mb[i]);
 			inb_cop_prepare(cop[k], sa, mb[i], &icv, hl, rc);
 			k++;
-		} else
+		} else {
 			dr[i - k] = i;
+			rte_errno = -rc;
+		}
 	}
 
 	rsn_release(sa, rsn);
 
 	/* copy not prepared mbufs beyond good ones */
-	if (k != num && k != 0) {
+	if (k != num && k != 0)
 		move_bad_mbufs(mb, dr, num, num - k);
-		rte_errno = EBADMSG;
-	}
 
 	return k;
 }
@@ -512,7 +568,6 @@ tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
 	return k;
 }
 
-
 /*
  * *process* function for tunnel packets
  */
@@ -612,7 +667,7 @@ esp_inb_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
 	if (k != num && k != 0)
 		move_bad_mbufs(mb, dr, num, num - k);
 
-	/* update SQN and replay winow */
+	/* update SQN and replay window */
 	n = esp_inb_rsn_update(sa, sqn, dr, k);
 
 	/* handle mbufs with wrong SQN */
@@ -625,6 +680,67 @@ esp_inb_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[],
 	return n;
 }
 
+/*
+ * Prepare (plus actual crypto/auth) routine for inbound CPU-CRYPTO
+ * (synchronous mode).
+ */
+uint16_t
+cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss,
+	struct rte_mbuf *mb[], uint16_t num)
+{
+	int32_t rc;
+	uint32_t i, k;
+	struct rte_ipsec_sa *sa;
+	struct replay_sqn *rsn;
+	union sym_op_data icv;
+	void *iv[num];
+	void *aad[num];
+	void *dgst[num];
+	uint32_t dr[num];
+	uint32_t l4ofs[num];
+	uint32_t clen[num];
+	uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
+
+	sa = ss->sa;
+
+	/* grab rsn lock */
+	rsn = rsn_acquire(sa);
+
+	/* do preparation for all packets */
+	for (i = 0, k = 0; i != num; i++) {
+
+		/* calculate ESP header offset */
+		l4ofs[k] = mb[i]->l2_len + mb[i]->l3_len;
+
+		/* prepare ESP packet for processing */
+		rc = inb_pkt_prepare(sa, rsn, mb[i], l4ofs[k], &icv);
+		if (rc >= 0) {
+			/* get encrypted data offset and length */
+			clen[k] = inb_cpu_crypto_prepare(sa, mb[i],
+				l4ofs + k, rc, ivbuf[k]);
+
+			/* fill iv, digest and aad */
+			iv[k] = ivbuf[k];
+			aad[k] = icv.va + sa->icv_len;
+			dgst[k++] = icv.va;
+		} else {
+			dr[i - k] = i;
+			rte_errno = -rc;
+		}
+	}
+
+	/* release rsn lock */
+	rsn_release(sa, rsn);
+
+	/* copy not prepared mbufs beyond good ones */
+	if (k != num && k != 0)
+		move_bad_mbufs(mb, dr, num, num - k);
+
+	/* convert mbufs to iovecs and do actual crypto/auth processing */
+	cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst, l4ofs, clen, k);
+	return k;
+}
+
 /*
  * process group of ESP inbound tunnel packets.
  */
diff --git a/lib/librte_ipsec/esp_outb.c b/lib/librte_ipsec/esp_outb.c
index e983b25a3..b6d9cbe98 100644
--- a/lib/librte_ipsec/esp_outb.c
+++ b/lib/librte_ipsec/esp_outb.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2020 Intel Corporation
  */
 
 #include <rte_ipsec.h>
@@ -15,6 +15,9 @@
 #include "misc.h"
 #include "pad.h"
 
+typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc,
+	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
+	union sym_op_data *icv, uint8_t sqh_len);
 
 /*
  * helper function to fill crypto_sym op for cipher+auth algorithms.
@@ -177,6 +180,7 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
 	espt->pad_len = pdlen;
 	espt->next_proto = sa->proto;
 
+	/* set icv va/pa value(s) */
 	icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
 	icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
 
@@ -270,8 +274,7 @@ esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
 static inline int32_t
 outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
 	const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
-	uint32_t l2len, uint32_t l3len, union sym_op_data *icv,
-	uint8_t sqh_len)
+	union sym_op_data *icv, uint8_t sqh_len)
 {
 	uint8_t np;
 	uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen;
@@ -280,6 +283,10 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
 	struct rte_esp_tail *espt;
 	char *ph, *pt;
 	uint64_t *iv;
+	uint32_t l2len, l3len;
+
+	l2len = mb->l2_len;
+	l3len = mb->l3_len;
 
 	uhlen = l2len + l3len;
 	plen = mb->pkt_len - uhlen;
@@ -340,6 +347,7 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
 	espt->pad_len = pdlen;
 	espt->next_proto = np;
 
+	/* set icv va/pa value(s) */
 	icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
 	icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
 
@@ -381,8 +389,8 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
 		gen_iv(iv, sqc);
 
 		/* try to update the packet itself */
-		rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], l2, l3, &icv,
-					  sa->sqh_len);
+		rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv,
+				  sa->sqh_len);
 		/* success, setup crypto op */
 		if (rc >= 0) {
 			outb_pkt_xprepare(sa, sqc, &icv);
@@ -403,6 +411,116 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
 	return k;
 }
 
+
+static inline uint32_t
+outb_cpu_crypto_prepare(const struct rte_ipsec_sa *sa, uint32_t *pofs,
+	uint32_t plen, void *iv)
+{
+	uint64_t *ivp = iv;
+	struct aead_gcm_iv *gcm;
+	struct aesctr_cnt_blk *ctr;
+	uint32_t clen;
+
+	switch (sa->algo_type) {
+	case ALGO_TYPE_AES_GCM:
+		gcm = iv;
+		aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+		break;
+	case ALGO_TYPE_AES_CTR:
+		ctr = iv;
+		aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+		break;
+	}
+
+	*pofs += sa->ctp.auth.offset;
+	clen = plen + sa->ctp.auth.length;
+	return clen;
+}
+
+static uint16_t
+cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num,
+		esp_outb_prepare_t prepare, uint32_t cofs_mask)
+{
+	int32_t rc;
+	uint64_t sqn;
+	rte_be64_t sqc;
+	struct rte_ipsec_sa *sa;
+	uint32_t i, k, n;
+	uint32_t l2, l3;
+	union sym_op_data icv;
+	void *iv[num];
+	void *aad[num];
+	void *dgst[num];
+	uint32_t dr[num];
+	uint32_t l4ofs[num];
+	uint32_t clen[num];
+	uint64_t ivbuf[num][IPSEC_MAX_IV_QWORD];
+
+	sa = ss->sa;
+
+	n = num;
+	sqn = esn_outb_update_sqn(sa, &n);
+	if (n != num)
+		rte_errno = EOVERFLOW;
+
+	for (i = 0, k = 0; i != n; i++) {
+
+		l2 = mb[i]->l2_len;
+		l3 = mb[i]->l3_len;
+
+		/* calculate ESP header offset */
+		l4ofs[k] = (l2 + l3) & cofs_mask;
+
+		sqc = rte_cpu_to_be_64(sqn + i);
+		gen_iv(ivbuf[k], sqc);
+
+		/* try to update the packet itself */
+		rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len);
+
+		/* success, proceed with preparations */
+		if (rc >= 0) {
+
+			outb_pkt_xprepare(sa, sqc, &icv);
+
+			/* get encrypted data offset and length */
+			clen[k] = outb_cpu_crypto_prepare(sa, l4ofs + k, rc,
+				ivbuf[k]);
+
+			/* fill iv, digest and aad */
+			iv[k] = ivbuf[k];
+			aad[k] = icv.va + sa->icv_len;
+			dgst[k++] = icv.va;
+		} else {
+			dr[i - k] = i;
+			rte_errno = -rc;
+		}
+	}
+
+	/* copy not prepared mbufs beyond good ones */
+	if (k != n && k != 0)
+		move_bad_mbufs(mb, dr, n, n - k);
+
+	/* convert mbufs to iovecs and do actual crypto/auth processing */
+	cpu_crypto_bulk(ss, sa->cofs, mb, iv, aad, dgst, l4ofs, clen, k);
+	return k;
+}
+
+uint16_t
+cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num)
+{
+	return cpu_outb_pkt_prepare(ss, mb, num, outb_tun_pkt_prepare, 0);
+}
+
+uint16_t
+cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num)
+{
+	return cpu_outb_pkt_prepare(ss, mb, num, outb_trs_pkt_prepare,
+		UINT32_MAX);
+}
+
 /*
  * process outbound packets for SA with ESN support,
  * for algorithms that require SQN.hibits to be implictly included
@@ -526,7 +644,7 @@ inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
 	struct rte_mbuf *mb[], uint16_t num)
 {
 	int32_t rc;
-	uint32_t i, k, n, l2, l3;
+	uint32_t i, k, n;
 	uint64_t sqn;
 	rte_be64_t sqc;
 	struct rte_ipsec_sa *sa;
@@ -544,15 +662,11 @@ inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss,
 	k = 0;
 	for (i = 0; i != n; i++) {
 
-		l2 = mb[i]->l2_len;
-		l3 = mb[i]->l3_len;
-
 		sqc = rte_cpu_to_be_64(sqn + i);
 		gen_iv(iv, sqc);
 
 		/* try to update the packet itself */
-		rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
-				l2, l3, &icv, 0);
+		rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
 
 		k += (rc >= 0);
 
diff --git a/lib/librte_ipsec/misc.h b/lib/librte_ipsec/misc.h
index fe4641bfc..fc4b3dc69 100644
--- a/lib/librte_ipsec/misc.h
+++ b/lib/librte_ipsec/misc.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2020 Intel Corporation
  */
 
 #ifndef _MISC_H_
@@ -105,4 +105,122 @@ mbuf_cut_seg_ofs(struct rte_mbuf *mb, struct rte_mbuf *ms, uint32_t ofs,
 	mb->pkt_len -= len;
 }
 
+static inline int
+mbuf_to_cryptovec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t data_len,
+	struct rte_crypto_vec vec[], uint32_t num)
+{
+	uint32_t i;
+	struct rte_mbuf *nseg;
+	uint32_t left;
+	uint32_t seglen;
+
+	/* assuming that requested data starts in the first segment */
+	RTE_ASSERT(mb->data_len > ofs);
+
+	if (mb->nb_segs > num)
+		return -mb->nb_segs;
+
+	vec[0].base = rte_pktmbuf_mtod_offset(mb, void *, ofs);
+
+	/* whole data lies in the first segment */
+	seglen = mb->data_len - ofs;
+	if (data_len <= seglen) {
+		vec[0].len = data_len;
+		return 1;
+	}
+
+	/* data spread across segments */
+	vec[0].len = seglen;
+	left = data_len - seglen;
+	for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) {
+		vec[i].base = rte_pktmbuf_mtod(nseg, void *);
+
+		seglen = nseg->data_len;
+		if (left <= seglen) {
+			/* whole requested data is completed */
+			vec[i].len = left;
+			left = 0;
+			break;
+		}
+
+		/* use whole segment */
+		vec[i].len = seglen;
+		left -= seglen;
+	}
+
+	RTE_ASSERT(left == 0);
+	return i + 1;
+}
+
+/*
+ * process packets using sync crypto engine
+ */
+static inline void
+cpu_crypto_bulk(const struct rte_ipsec_session *ss,
+	union rte_crypto_sym_ofs ofs, struct rte_mbuf *mb[],
+	void *iv[], void *aad[], void *dgst[], uint32_t l4ofs[],
+	uint32_t clen[], uint32_t num)
+{
+	uint32_t i, j, n;
+	int32_t vcnt, vofs;
+	int32_t st[num];
+	struct rte_crypto_sgl vecpkt[num];
+	struct rte_crypto_vec vec[UINT8_MAX];
+	struct rte_crypto_sym_vec symvec;
+
+	const uint32_t vnum = RTE_DIM(vec);
+
+	j = 0, n = 0;
+	vofs = 0;
+	for (i = 0; i != num; i++) {
+
+		vcnt = mbuf_to_cryptovec(mb[i], l4ofs[i], clen[i], &vec[vofs],
+			vnum - vofs);
+
+		/* not enough space in vec[] to hold all segments */
+		if (vcnt < 0) {
+			/* fill the request structure */
+			symvec.sgl = &vecpkt[j];
+			symvec.iv = &iv[j];
+			symvec.aad = &aad[j];
+			symvec.digest = &dgst[j];
+			symvec.status = &st[j];
+			symvec.num = i - j;
+
+			/* flush vec array and try again */
+			n += rte_cryptodev_sym_cpu_crypto_process(
+				ss->crypto.dev_id, ss->crypto.ses, ofs,
+				&symvec);
+			vofs = 0;
+			vcnt = mbuf_to_cryptovec(mb[i], l4ofs[i], clen[i], vec,
+				vnum);
+			RTE_ASSERT(vcnt > 0);
+			j = i;
+		}
+
+		vecpkt[i].vec = &vec[vofs];
+		vecpkt[i].num = vcnt;
+		vofs += vcnt;
+	}
+
+	/* fill the request structure */
+	symvec.sgl = &vecpkt[j];
+	symvec.iv = &iv[j];
+	symvec.aad = &aad[j];
+	symvec.digest = &dgst[j];
+	symvec.status = &st[j];
+	symvec.num = i - j;
+
+	n += rte_cryptodev_sym_cpu_crypto_process(ss->crypto.dev_id,
+		ss->crypto.ses, ofs, &symvec);
+
+	j = num - n;
+	for (i = 0; j != 0 && i != num; i++) {
+		if (st[i] != 0) {
+			mb[i]->ol_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+			j--;
+		}
+	}
+}
+
 #endif /* _MISC_H_ */
diff --git a/lib/librte_ipsec/rte_ipsec.h b/lib/librte_ipsec/rte_ipsec.h
index f3b1f936b..6666cf761 100644
--- a/lib/librte_ipsec/rte_ipsec.h
+++ b/lib/librte_ipsec/rte_ipsec.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2020 Intel Corporation
  */
 
 #ifndef _RTE_IPSEC_H_
@@ -33,10 +33,15 @@ struct rte_ipsec_session;
  *   (see rte_ipsec_pkt_process for more details).
  */
 struct rte_ipsec_sa_pkt_func {
-	uint16_t (*prepare)(const struct rte_ipsec_session *ss,
+	union {
+		uint16_t (*async)(const struct rte_ipsec_session *ss,
 				struct rte_mbuf *mb[],
 				struct rte_crypto_op *cop[],
 				uint16_t num);
+		uint16_t (*sync)(const struct rte_ipsec_session *ss,
+				struct rte_mbuf *mb[],
+				uint16_t num);
+	} prepare;
 	uint16_t (*process)(const struct rte_ipsec_session *ss,
 				struct rte_mbuf *mb[],
 				uint16_t num);
@@ -62,6 +67,7 @@ struct rte_ipsec_session {
 	union {
 		struct {
 			struct rte_cryptodev_sym_session *ses;
+			uint8_t dev_id;
 		} crypto;
 		struct {
 			struct rte_security_session *ses;
@@ -114,7 +120,15 @@ static inline uint16_t
 rte_ipsec_pkt_crypto_prepare(const struct rte_ipsec_session *ss,
 	struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
 {
-	return ss->pkt_func.prepare(ss, mb, cop, num);
+	return ss->pkt_func.prepare.async(ss, mb, cop, num);
+}
+
+__rte_experimental
+static inline uint16_t
+rte_ipsec_pkt_cpu_prepare(const struct rte_ipsec_session *ss,
+	struct rte_mbuf *mb[], uint16_t num)
+{
+	return ss->pkt_func.prepare.sync(ss, mb, num);
 }
 
 /**
diff --git a/lib/librte_ipsec/sa.c b/lib/librte_ipsec/sa.c
index 6f1d92c3c..ada195cf8 100644
--- a/lib/librte_ipsec/sa.c
+++ b/lib/librte_ipsec/sa.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2020 Intel Corporation
  */
 
 #include <rte_ipsec.h>
@@ -243,10 +243,26 @@ static void
 esp_inb_init(struct rte_ipsec_sa *sa)
 {
 	/* these params may differ with new algorithms support */
-	sa->ctp.auth.offset = 0;
-	sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
 	sa->ctp.cipher.offset = sizeof(struct rte_esp_hdr) + sa->iv_len;
 	sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
+
+	/*
+	 * for AEAD and NULL algorithms we can assume that
+	 * auth and cipher offsets would be equal.
+	 */
+	switch (sa->algo_type) {
+	case ALGO_TYPE_AES_GCM:
+	case ALGO_TYPE_NULL:
+		sa->ctp.auth.raw = sa->ctp.cipher.raw;
+		break;
+	default:
+		sa->ctp.auth.offset = 0;
+		sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
+		sa->cofs.ofs.cipher.tail = sa->sqh_len;
+		break;
+	}
+
+	sa->cofs.ofs.cipher.head = sa->ctp.cipher.offset - sa->ctp.auth.offset;
 }
 
 /*
@@ -269,13 +285,13 @@ esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
 
 	sa->sqn.outb.raw = 1;
 
-	/* these params may differ with new algorithms support */
-	sa->ctp.auth.offset = hlen;
-	sa->ctp.auth.length = sizeof(struct rte_esp_hdr) +
-		sa->iv_len + sa->sqh_len;
-
 	algo_type = sa->algo_type;
 
+	/*
+	 * Setup auth and cipher length and offset.
+	 * these params may differ with new algorithms support
+	 */
+
 	switch (algo_type) {
 	case ALGO_TYPE_AES_GCM:
 	case ALGO_TYPE_AES_CTR:
@@ -286,11 +302,30 @@ esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen)
 		break;
 	case ALGO_TYPE_AES_CBC:
 	case ALGO_TYPE_3DES_CBC:
-		sa->ctp.cipher.offset = sa->hdr_len +
-			sizeof(struct rte_esp_hdr);
+		sa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr);
 		sa->ctp.cipher.length = sa->iv_len;
 		break;
 	}
+
+	/*
+	 * for AEAD and NULL algorithms we can assume that
+	 * auth and cipher offsets would be equal.
+	 */
+	switch (algo_type) {
+	case ALGO_TYPE_AES_GCM:
+	case ALGO_TYPE_NULL:
+		sa->ctp.auth.raw = sa->ctp.cipher.raw;
+		break;
+	default:
+		sa->ctp.auth.offset = hlen;
+		sa->ctp.auth.length = sizeof(struct rte_esp_hdr) +
+			sa->iv_len + sa->sqh_len;
+		break;
+	}
+
+	sa->cofs.ofs.cipher.head = sa->ctp.cipher.offset - sa->ctp.auth.offset;
+	sa->cofs.ofs.cipher.tail = (sa->ctp.auth.offset + sa->ctp.auth.length) -
+			(sa->ctp.cipher.offset + sa->ctp.cipher.length);
 }
 
 /*
@@ -544,9 +579,9 @@ lksd_proto_prepare(const struct rte_ipsec_session *ss,
  * - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
  * - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
  */
-static uint16_t
-pkt_flag_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
-	uint16_t num)
+uint16_t
+pkt_flag_process(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num)
 {
 	uint32_t i, k;
 	uint32_t dr[num];
@@ -588,21 +623,59 @@ lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,
 	switch (sa->type & msk) {
 	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
 	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
-		pf->prepare = esp_inb_pkt_prepare;
+		pf->prepare.async = esp_inb_pkt_prepare;
 		pf->process = esp_inb_tun_pkt_process;
 		break;
 	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
-		pf->prepare = esp_inb_pkt_prepare;
+		pf->prepare.async = esp_inb_pkt_prepare;
 		pf->process = esp_inb_trs_pkt_process;
 		break;
 	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
 	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
-		pf->prepare = esp_outb_tun_prepare;
+		pf->prepare.async = esp_outb_tun_prepare;
 		pf->process = (sa->sqh_len != 0) ?
 			esp_outb_sqh_process : pkt_flag_process;
 		break;
 	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
-		pf->prepare = esp_outb_trs_prepare;
+		pf->prepare.async = esp_outb_trs_prepare;
+		pf->process = (sa->sqh_len != 0) ?
+			esp_outb_sqh_process : pkt_flag_process;
+		break;
+	default:
+		rc = -ENOTSUP;
+	}
+
+	return rc;
+}
+
+static int
+cpu_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
+		struct rte_ipsec_sa_pkt_func *pf)
+{
+	int32_t rc;
+
+	static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
+			RTE_IPSEC_SATP_MODE_MASK;
+
+	rc = 0;
+	switch (sa->type & msk) {
+	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
+	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
+		pf->prepare.sync = cpu_inb_pkt_prepare;
+		pf->process = esp_inb_tun_pkt_process;
+		break;
+	case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
+		pf->prepare.sync = cpu_inb_pkt_prepare;
+		pf->process = esp_inb_trs_pkt_process;
+		break;
+	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
+	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
+		pf->prepare.sync = cpu_outb_tun_pkt_prepare;
+		pf->process = (sa->sqh_len != 0) ?
+			esp_outb_sqh_process : pkt_flag_process;
+		break;
+	case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
+		pf->prepare.sync = cpu_outb_trs_pkt_prepare;
 		pf->process = (sa->sqh_len != 0) ?
 			esp_outb_sqh_process : pkt_flag_process;
 		break;
@@ -660,7 +733,7 @@ ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
 	int32_t rc;
 
 	rc = 0;
-	pf[0] = (struct rte_ipsec_sa_pkt_func) { 0 };
+	pf[0] = (struct rte_ipsec_sa_pkt_func) { {NULL}, NULL };
 
 	switch (ss->type) {
 	case RTE_SECURITY_ACTION_TYPE_NONE:
@@ -677,9 +750,12 @@ ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
 			pf->process = inline_proto_outb_pkt_process;
 		break;
 	case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
-		pf->prepare = lksd_proto_prepare;
+		pf->prepare.async = lksd_proto_prepare;
 		pf->process = pkt_flag_process;
 		break;
+	case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
+		rc = cpu_crypto_pkt_func_select(sa, pf);
+		break;
 	default:
 		rc = -ENOTSUP;
 	}
diff --git a/lib/librte_ipsec/sa.h b/lib/librte_ipsec/sa.h
index 51e69ad05..d22451b38 100644
--- a/lib/librte_ipsec/sa.h
+++ b/lib/librte_ipsec/sa.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2020 Intel Corporation
  */
 
 #ifndef _SA_H_
@@ -88,6 +88,8 @@ struct rte_ipsec_sa {
 		union sym_op_ofslen cipher;
 		union sym_op_ofslen auth;
 	} ctp;
+	/* cpu-crypto offsets */
+	union rte_crypto_sym_ofs cofs;
 	/* tx_offload template for tunnel mbuf */
 	struct {
 		uint64_t msk;
@@ -156,6 +158,10 @@ uint16_t
 inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
 	struct rte_mbuf *mb[], uint16_t num);
 
+uint16_t
+cpu_inb_pkt_prepare(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num);
+
 /* outbound processing */
 
 uint16_t
@@ -170,6 +176,10 @@ uint16_t
 esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
 	uint16_t num);
 
+uint16_t
+pkt_flag_process(const struct rte_ipsec_session *ss,
+	struct rte_mbuf *mb[], uint16_t num);
+
 uint16_t
 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
 	struct rte_mbuf *mb[], uint16_t num);
@@ -182,4 +192,11 @@ uint16_t
 inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
 	struct rte_mbuf *mb[], uint16_t num);
 
+uint16_t
+cpu_outb_tun_pkt_prepare(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num);
+uint16_t
+cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss,
+		struct rte_mbuf *mb[], uint16_t num);
+
 #endif /* _SA_H_ */
diff --git a/lib/librte_ipsec/ses.c b/lib/librte_ipsec/ses.c
index 82c765a33..3d51ac498 100644
--- a/lib/librte_ipsec/ses.c
+++ b/lib/librte_ipsec/ses.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2020 Intel Corporation
  */
 
 #include <rte_ipsec.h>
@@ -11,7 +11,8 @@ session_check(struct rte_ipsec_session *ss)
 	if (ss == NULL || ss->sa == NULL)
 		return -EINVAL;
 
-	if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) {
+	if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE ||
+		ss->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
 		if (ss->crypto.ses == NULL)
 			return -EINVAL;
 	} else {
-- 
2.17.1


  parent reply	other threads:[~2020-01-28 14:23 UTC|newest]

Thread overview: 77+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-01-15 18:28 [dpdk-dev] [PATCH v3 0/6] Introduce CPU crypto mode Marcin Smoczynski
2020-01-15 18:28 ` [dpdk-dev] [PATCH v3 1/6] cryptodev: introduce cpu crypto support API Marcin Smoczynski
2020-01-15 23:20   ` Ananyev, Konstantin
2020-01-16 10:11   ` Zhang, Roy Fan
2020-01-15 18:28 ` [dpdk-dev] [PATCH v3 2/6] crypto/aesni_gcm: cpu crypto support Marcin Smoczynski
2020-01-15 23:16   ` Ananyev, Konstantin
2020-01-16 10:00   ` Zhang, Roy Fan
2020-01-21 13:53   ` De Lara Guarch, Pablo
2020-01-21 14:29     ` Ananyev, Konstantin
2020-01-21 14:51       ` De Lara Guarch, Pablo
2020-01-21 15:23         ` Ananyev, Konstantin
2020-01-21 22:33           ` De Lara Guarch, Pablo
2020-01-22 12:43             ` Ananyev, Konstantin
2020-01-15 18:28 ` [dpdk-dev] [PATCH v3 3/6] security: add cpu crypto action type Marcin Smoczynski
2020-01-15 22:49   ` Ananyev, Konstantin
2020-01-16 10:01   ` Zhang, Roy Fan
2020-01-15 18:28 ` [dpdk-dev] [PATCH v3 4/6] ipsec: introduce support for cpu crypto mode Marcin Smoczynski
2020-01-16 10:53   ` Zhang, Roy Fan
2020-01-16 10:53   ` Zhang, Roy Fan
2020-01-15 18:28 ` [dpdk-dev] [PATCH v3 5/6] examples/ipsec-secgw: cpu crypto support Marcin Smoczynski
2020-01-16 10:54   ` Zhang, Roy Fan
2020-01-15 18:28 ` [dpdk-dev] [PATCH v3 6/6] examples/ipsec-secgw: cpu crypto testing Marcin Smoczynski
2020-01-16 10:54   ` Zhang, Roy Fan
2020-01-28  3:16 ` [dpdk-dev] [PATCH v4 0/8] Introduce CPU crypto mode Marcin Smoczynski
2020-01-28  3:16   ` [dpdk-dev] [PATCH v4 1/8] cryptodev: introduce cpu crypto support API Marcin Smoczynski
2020-01-28  3:16   ` [dpdk-dev] [PATCH v4 2/8] crypto/aesni_gcm: cpu crypto support Marcin Smoczynski
2020-01-28 10:49     ` De Lara Guarch, Pablo
2020-01-28  3:16   ` [dpdk-dev] [PATCH v4 3/8] test/crypto: add CPU crypto tests Marcin Smoczynski
2020-01-28  9:31     ` De Lara Guarch, Pablo
2020-01-28 10:51       ` De Lara Guarch, Pablo
2020-01-28  3:16   ` [dpdk-dev] [PATCH v4 4/8] security: add cpu crypto action type Marcin Smoczynski
2020-01-28 11:00     ` Ananyev, Konstantin
2020-01-28  3:16   ` [dpdk-dev] [PATCH v4 5/8] ipsec: introduce support for cpu crypto mode Marcin Smoczynski
2020-01-28  3:16   ` [dpdk-dev] [PATCH v4 6/8] examples/ipsec-secgw: cpu crypto support Marcin Smoczynski
2020-01-28  3:16   ` [dpdk-dev] [PATCH v4 7/8] examples/ipsec-secgw: cpu crypto testing Marcin Smoczynski
2020-01-28  3:16   ` [dpdk-dev] [PATCH v4 8/8] doc: add cpu crypto related documentation Marcin Smoczynski
2020-01-28 14:22   ` [dpdk-dev] [PATCH v5 0/8] Introduce CPU crypto mode Marcin Smoczynski
2020-01-28 14:22     ` [dpdk-dev] [PATCH v5 1/8] cryptodev: introduce cpu crypto support API Marcin Smoczynski
2020-01-31 14:30       ` Akhil Goyal
2020-01-28 14:22     ` [dpdk-dev] [PATCH v5 2/8] crypto/aesni_gcm: cpu crypto support Marcin Smoczynski
2020-01-28 16:39       ` Ananyev, Konstantin
2020-01-31 14:33       ` Akhil Goyal
2020-01-28 14:22     ` [dpdk-dev] [PATCH v5 3/8] test/crypto: add CPU crypto tests Marcin Smoczynski
2020-01-31 14:37       ` Akhil Goyal
2020-01-28 14:22     ` [dpdk-dev] [PATCH v5 4/8] security: add cpu crypto action type Marcin Smoczynski
2020-01-31 14:26       ` Akhil Goyal
2020-02-04 10:36         ` Akhil Goyal
2020-02-04 10:43           ` Ananyev, Konstantin
2020-01-28 14:22     ` Marcin Smoczynski [this message]
2020-01-28 16:37       ` [dpdk-dev] [PATCH v5 5/8] ipsec: introduce support for cpu crypto mode Ananyev, Konstantin
2020-01-28 14:22     ` [dpdk-dev] [PATCH v5 6/8] examples/ipsec-secgw: cpu crypto support Marcin Smoczynski
2020-01-28 14:22     ` [dpdk-dev] [PATCH v5 7/8] examples/ipsec-secgw: cpu crypto testing Marcin Smoczynski
2020-01-28 14:22     ` [dpdk-dev] [PATCH v5 8/8] doc: add cpu crypto related documentation Marcin Smoczynski
2020-01-31 14:43       ` Akhil Goyal
2020-02-04 13:12     ` [dpdk-dev] [PATCH v6 0/8] Introduce CPU crypto mode Marcin Smoczynski
2020-02-04 13:12       ` [dpdk-dev] [PATCH v6 1/8] cryptodev: introduce cpu crypto support API Marcin Smoczynski
2020-02-05 14:57         ` Akhil Goyal
2020-02-06  0:48         ` Thomas Monjalon
2020-02-06 12:36         ` [dpdk-dev] [PATCH] cryptodev: fix missing doxygen comment Marcin Smoczynski
2020-02-06 12:43           ` Ananyev, Konstantin
2020-02-12 13:15             ` Akhil Goyal
2020-02-04 13:12       ` [dpdk-dev] [PATCH v6 2/8] crypto/aesni_gcm: cpu crypto support Marcin Smoczynski
2020-02-04 13:12       ` [dpdk-dev] [PATCH v6 3/8] security: add cpu crypto action type Marcin Smoczynski
2020-02-05 14:58         ` Akhil Goyal
2020-02-04 13:12       ` [dpdk-dev] [PATCH v6 4/8] test/crypto: add cpu crypto mode to tests Marcin Smoczynski
2020-02-05 14:59         ` Akhil Goyal
2020-02-07 14:28         ` [dpdk-dev] [PATCH] test/crypto: add cpu crypto mode tests Marcin Smoczynski
2020-02-07 17:04           ` Ananyev, Konstantin
2020-02-13  9:14             ` Akhil Goyal
2020-02-13  9:29               ` Akhil Goyal
2020-02-04 13:12       ` [dpdk-dev] [PATCH v6 5/8] ipsec: introduce support for cpu crypto mode Marcin Smoczynski
2020-02-05 14:59         ` Akhil Goyal
2020-02-04 13:12       ` [dpdk-dev] [PATCH v6 6/8] examples/ipsec-secgw: cpu crypto support Marcin Smoczynski
2020-02-05 15:00         ` Akhil Goyal
2020-02-04 13:12       ` [dpdk-dev] [PATCH v6 7/8] examples/ipsec-secgw: cpu crypto testing Marcin Smoczynski
2020-02-04 13:12       ` [dpdk-dev] [PATCH v6 8/8] doc: add release notes for cpu crypto Marcin Smoczynski
2020-02-05 15:03       ` [dpdk-dev] [PATCH v6 0/8] Introduce CPU crypto mode Akhil Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200128142220.16644-6-marcinx.smoczynski@intel.com \
    --to=marcinx.smoczynski@intel.com \
    --cc=akhil.goyal@nxp.com \
    --cc=declan.doherty@intel.com \
    --cc=dev@dpdk.org \
    --cc=konstantin.ananyev@intel.com \
    --cc=pablo.de.lara.guarch@intel.com \
    --cc=radu.nicolau@intel.com \
    --cc=roy.fan.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.