All of lore.kernel.org
 help / color / mirror / Atom feed
From: Fan Zhang <roy.fan.zhang@intel.com>
To: dev@dpdk.org
Cc: fiona.trahe@intel.com, akhil.goyal@nxp.com, thomas@monjalon.net,
	jerinjacobk@gmail.com, Fan Zhang <roy.fan.zhang@intel.com>
Subject: [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct data-path APIs
Date: Fri,  3 Jul 2020 11:12:01 +0100	[thread overview]
Message-ID: <20200703101203.23003-2-roy.fan.zhang@intel.com> (raw)
In-Reply-To: <20200703101203.23003-1-roy.fan.zhang@intel.com>

This patch add symmetric crypto data-path APIs support to QAT-SYM PMD.

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/common/qat/Makefile      |   2 +
 drivers/common/qat/qat_qp.c      |   4 +-
 drivers/common/qat/qat_qp.h      |   3 +
 drivers/crypto/qat/meson.build   |   1 +
 drivers/crypto/qat/qat_sym.c     |   1 -
 drivers/crypto/qat/qat_sym_job.c | 661 +++++++++++++++++++++++++++++++
 drivers/crypto/qat/qat_sym_job.h |  12 +
 drivers/crypto/qat/qat_sym_pmd.c |   7 +-
 8 files changed, 686 insertions(+), 5 deletions(-)
 create mode 100644 drivers/crypto/qat/qat_sym_job.c
 create mode 100644 drivers/crypto/qat/qat_sym_job.h

diff --git a/drivers/common/qat/Makefile b/drivers/common/qat/Makefile
index 28bd5668f..6655fd2bc 100644
--- a/drivers/common/qat/Makefile
+++ b/drivers/common/qat/Makefile
@@ -39,6 +39,8 @@ ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y)
 	SRCS-y += qat_sym.c
 	SRCS-y += qat_sym_session.c
 	SRCS-y += qat_sym_pmd.c
+	SRCS-y += qat_sym_job.c
+
 	build_qat = yes
 endif
 endif
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 8e6dd04eb..06e2d8c8a 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -547,8 +547,8 @@ txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
 	q->csr_tail = q->tail;
 }
 
-static inline
-void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
 {
 	uint32_t old_head, new_head;
 	uint32_t max_head;
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index 575d69059..8add1b049 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -116,4 +116,7 @@ qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
 			  void *op_cookie __rte_unused,
 			  uint64_t *dequeue_err_count __rte_unused);
 
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q);
+
 #endif /* _QAT_QP_H_ */
diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build
index fc65923a7..8a3921293 100644
--- a/drivers/crypto/qat/meson.build
+++ b/drivers/crypto/qat/meson.build
@@ -13,6 +13,7 @@ if dep.found()
 	qat_sources += files('qat_sym_pmd.c',
 			     'qat_sym.c',
 			     'qat_sym_session.c',
+			     'qat_sym_job.c',
 			     'qat_asym_pmd.c',
 			     'qat_asym.c')
 	qat_ext_deps += dep
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 25b6dd5f4..609180d3f 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -336,7 +336,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 			set_cipher_iv(ctx->cipher_iv.length,
 					ctx->cipher_iv.offset,
 					cipher_param, op, qat_req);
-
 		} else if (ctx->qat_hash_alg ==
 				ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
 
diff --git a/drivers/crypto/qat/qat_sym_job.c b/drivers/crypto/qat/qat_sym_job.c
new file mode 100644
index 000000000..7c0913459
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_job.c
@@ -0,0 +1,661 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2019 Intel Corporation
+ */
+
+#include <rte_cryptodev_pmd.h>
+
+#include "adf_transport_access_macros.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+#include "qat_sym.h"
+#include "qat_sym_pmd.h"
+#include "qat_sym_session.h"
+#include "qat_qp.h"
+#include "qat_sym_job.h"
+
+static __rte_always_inline int
+qat_sym_frame_fill_sgl(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
+		struct rte_crypto_sgl *sgl, uint32_t max_len)
+{
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sgl *list;
+	int64_t len = max_len;
+	uint32_t i;
+
+	if (!sgl)
+		return -EINVAL;
+	if (sgl->num < 2 || sgl->num > QAT_SYM_SGL_MAX_NUMBER || !sgl->vec)
+		return -EINVAL;
+
+	ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+			QAT_COMN_PTR_TYPE_SGL);
+	cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
+	list = (struct qat_sgl *)&cookie->qat_sgl_src;
+
+	for (i = 0; i < sgl->num && len > 0; i++) {
+		list->buffers[i].len = RTE_MIN(sgl->vec[i].len, len);
+		list->buffers[i].resrvd = 0;
+		list->buffers[i].addr = sgl->vec[i].iova;
+		len -= list->buffers[i].len;
+	}
+
+	if (unlikely(len > 0))
+		return -1;
+
+	list->num_bufs = i;
+	req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+			cookie->qat_sgl_src_phys_addr;
+	req->comn_mid.src_length = req->comn_mid.dst_length = 0;
+	return 0;
+}
+
+static __rte_always_inline void
+qat_sym_set_cipher_param(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+		uint32_t cipher_ofs, uint32_t cipher_len)
+{
+	cipher_param->cipher_offset = cipher_ofs;
+	cipher_param->cipher_length = cipher_len;
+}
+
+static __rte_always_inline void
+qat_sym_set_auth_param(struct icp_qat_fw_la_auth_req_params *auth_param,
+		uint32_t auth_ofs, uint32_t auth_len,
+		rte_iova_t digest_iova, rte_iova_t aad_iova)
+{
+	auth_param->auth_off = auth_ofs;
+	auth_param->auth_len = auth_len;
+	auth_param->auth_res_addr = digest_iova;
+	auth_param->u1.aad_adr = aad_iova;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_aead(void *qat_sym_qp,
+	struct rte_cryptodev_sym_session *session,
+	struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+	uint64_t flags)
+{
+	struct qat_qp *qp = qat_sym_qp;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx;
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	uint32_t t;
+	/* In case of AES-CCM this may point to user selected
+	 * memory or iv offset in cypto_op
+	 */
+	uint8_t *aad_data;
+	/* This is true AAD length, it not includes 18 bytes of
+	 * preceding data
+	 */
+	uint8_t aad_ccm_real_len;
+	uint8_t aad_len_field_sz;
+	uint32_t msg_len_be;
+	rte_iova_t aad_iova;
+	uint8_t q;
+
+	ctx = (struct qat_sym_session *)get_sym_session_private_data(
+		session, cryptodev_qat_driver_id);
+
+	if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+		t = tx_queue->tail;
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + t);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+		req->comn_mid.opaque_data = (uintptr_t)opaque;
+	} else {
+		t = (uint32_t)*drv_data;
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + t);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	}
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+			job->data_iova;
+	req->comn_mid.src_length = req->comn_mid.dst_length =
+			job->aead.aead_ofs + job->aead.aead_len;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+				job->iv, ctx->cipher_iv.length);
+		aad_iova = job->aead.aad_iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+		aad_data = job->aead.aad;
+		aad_iova = job->aead.aad_iova;
+		aad_ccm_real_len = 0;
+		aad_len_field_sz = 0;
+		msg_len_be = rte_bswap32(job->aead.aead_len);
+
+		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+			aad_ccm_real_len = ctx->aad_len -
+					ICP_QAT_HW_CCM_AAD_B0_LEN -
+					ICP_QAT_HW_CCM_AAD_LEN_INFO;
+		} else {
+			aad_data = job->iv;
+			aad_iova = job->iv_iova;
+		}
+
+		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
+				ctx->digest_length, q);
+		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+			    ICP_QAT_HW_CCM_NONCE_OFFSET +
+			    (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+			    (uint8_t *)&msg_len_be,
+			    ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+		} else {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+			    ICP_QAT_HW_CCM_NONCE_OFFSET,
+			    (uint8_t *)&msg_len_be
+			    + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+			    - q), q);
+		}
+
+		if (aad_len_field_sz > 0) {
+			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+					rte_bswap16(aad_ccm_real_len);
+
+			if ((aad_ccm_real_len + aad_len_field_sz)
+					% ICP_QAT_HW_CCM_AAD_B0_LEN) {
+				uint8_t pad_len = 0;
+				uint8_t pad_idx = 0;
+
+				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+				((aad_ccm_real_len + aad_len_field_sz) %
+					ICP_QAT_HW_CCM_AAD_B0_LEN);
+				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+				    aad_ccm_real_len + aad_len_field_sz;
+				memset(&aad_data[pad_idx], 0, pad_len);
+			}
+
+			rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+					+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+					job->iv + ICP_QAT_HW_CCM_NONCE_OFFSET,
+					ctx->cipher_iv.length);
+			*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+				q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+			if (aad_len_field_sz)
+				rte_memcpy(job->aead.aad +
+					ICP_QAT_HW_CCM_NONCE_OFFSET,
+					job->iv + ICP_QAT_HW_CCM_NONCE_OFFSET,
+					ctx->cipher_iv.length);
+
+		}
+		break;
+	default:
+		return -1;
+	}
+
+	qat_sym_set_cipher_param(cipher_param, job->aead.aead_ofs,
+			job->aead.aead_len);
+	qat_sym_set_auth_param(auth_param, job->aead.aead_ofs,
+			job->aead.aead_len, job->aead.tag_iova, aad_iova);
+
+	if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+		int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+				job->aead.aead_ofs + job->aead.aead_len);
+		if (unlikely(ret < 0))
+			return -1;
+	}
+
+	if (ctx->is_single_pass) {
+		cipher_param->spc_aad_addr = aad_iova;
+		cipher_param->spc_auth_res_addr = job->aead.tag_iova;
+	}
+
+	qp->enqueued++;
+	qp->stats.enqueued_count++;
+
+	if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+		tx_queue->tail = (t + tx_queue->msg_size) &
+				tx_queue->modulo_mask;
+		WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+			tx_queue->hw_bundle_number,
+			tx_queue->hw_queue_number,
+			tx_queue->tail);
+		tx_queue->csr_tail = tx_queue->tail;
+	} else
+		*drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	return 0;
+}
+
+
+static __rte_always_inline int
+qat_sym_job_enqueue_cipher(void *qat_sym_qp,
+	struct rte_cryptodev_sym_session *session,
+	struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+	uint64_t flags)
+{
+	struct qat_qp *qp = qat_sym_qp;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx;
+	struct icp_qat_fw_la_bulk_req *req;
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	uint32_t t;
+
+	ctx = (struct qat_sym_session *)get_sym_session_private_data(
+			session, cryptodev_qat_driver_id);
+	if (unlikely(ctx->bpi_ctx)) {
+		QAT_DP_LOG(ERR, "DOCSIS is not supported");
+		return -1;
+	}
+
+	if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+		t = tx_queue->tail;
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + t);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+		req->comn_mid.opaque_data = (uintptr_t)opaque;
+	} else {
+		t = (uint32_t)*drv_data;
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + t);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	}
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+
+	req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+			job->data_iova;
+	req->comn_mid.src_length = req->comn_mid.dst_length =
+			job->cipher_only.cipher_ofs +
+			job->cipher_only.cipher_len;
+
+	/* cipher IV */
+	rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+			job->iv, ctx->cipher_iv.length);
+	qat_sym_set_cipher_param(cipher_param, job->cipher_only.cipher_ofs,
+			job->cipher_only.cipher_len);
+
+	if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+		int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+				job->cipher_only.cipher_ofs +
+				job->cipher_only.cipher_len);
+		if (unlikely(ret < 0))
+			return -1;
+	}
+
+	qp->enqueued++;
+	qp->stats.enqueued_count++;
+
+	if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+		tx_queue->tail = (t + tx_queue->msg_size) &
+				tx_queue->modulo_mask;
+		WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+			tx_queue->hw_bundle_number,
+			tx_queue->hw_queue_number,
+			tx_queue->tail);
+		tx_queue->csr_tail = tx_queue->tail;
+	} else
+		*drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	return 0;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_auth(void *qat_sym_qp,
+	struct rte_cryptodev_sym_session *session,
+	struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+	uint64_t flags)
+{
+	struct qat_qp *qp = qat_sym_qp;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx;
+	struct icp_qat_fw_la_bulk_req *req;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	uint32_t t;
+
+	ctx = (struct qat_sym_session *)get_sym_session_private_data(
+			session, cryptodev_qat_driver_id);
+
+	if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+		t = tx_queue->tail;
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + t);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+		req->comn_mid.opaque_data = (uintptr_t)opaque;
+	} else {
+		t = (uint32_t)*drv_data;
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + t);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	}
+
+	auth_param = (void *)((uint8_t *)&req->serv_specif_rqpars +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+			job->data_iova;
+	req->comn_mid.src_length = req->comn_mid.dst_length =
+			job->auth_only.auth_ofs + job->auth_only.auth_len;
+
+	/* auth */
+	qat_sym_set_auth_param(auth_param, job->auth_only.auth_ofs,
+		job->auth_only.auth_len, job->auth_only.digest_iova, 0);
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = job->iv_iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		QAT_DP_LOG(ERR, "GMAC as chained auth algo is not supported");
+		return -1;
+	default:
+		break;
+	}
+
+	if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+		int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+				job->auth_only.auth_ofs +
+				job->auth_only.auth_len);
+		if (unlikely(ret < 0))
+			return -1;
+	}
+
+	qp->enqueued++;
+	qp->stats.enqueued_count++;
+
+	if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+		tx_queue->tail = (t + tx_queue->msg_size) &
+				tx_queue->modulo_mask;
+		WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+			tx_queue->hw_bundle_number,
+			tx_queue->hw_queue_number,
+			tx_queue->tail);
+		tx_queue->csr_tail = tx_queue->tail;
+	} else
+		*drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	return 0;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_chain(void *qat_sym_qp,
+	struct rte_cryptodev_sym_session *session,
+	struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+	uint64_t flags)
+{
+	struct qat_qp *qp = qat_sym_qp;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx;
+	struct icp_qat_fw_la_bulk_req *req;
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	uint32_t min_ofs = RTE_MIN(job->chain.cipher_ofs, job->chain.auth_ofs);
+	uint32_t max_len = RTE_MAX(job->chain.cipher_len, job->chain.auth_len);
+	rte_iova_t auth_iova_end;
+	uint32_t t;
+
+	ctx = (struct qat_sym_session *)get_sym_session_private_data(
+			session, cryptodev_qat_driver_id);
+	if (unlikely(ctx->bpi_ctx)) {
+		QAT_DP_LOG(ERR, "DOCSIS is not supported");
+		return -1;
+	}
+
+	if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+		t = tx_queue->tail;
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + t);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+		req->comn_mid.opaque_data = (uintptr_t)opaque;
+	} else {
+		t = (uint32_t)*drv_data;
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + t);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	}
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	req->comn_mid.src_data_addr =
+		req->comn_mid.dest_data_addr = job->data_iova;
+	req->comn_mid.src_length = req->comn_mid.dst_length = min_ofs + max_len;
+
+	/* cipher IV */
+	rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+			job->iv, ctx->cipher_iv.length);
+	qat_sym_set_cipher_param(cipher_param, job->chain.cipher_ofs,
+			job->chain.cipher_len);
+
+	/* auth */
+	qat_sym_set_auth_param(auth_param, job->chain.auth_ofs,
+			job->chain.auth_len, job->chain.digest_iova, 0);
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = job->iv_iova;
+
+		if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+			uint32_t len = job->chain.auth_ofs +
+					job->chain.auth_len;
+			struct rte_crypto_vec *vec = job->sgl->vec;
+			int auth_end_get = 0;
+			while (len) {
+				if (len <= vec->len) {
+					auth_iova_end = vec->iova + len;
+					auth_end_get = 1;
+					break;
+				}
+				len -= vec->len;
+				vec++;
+			}
+			if (!auth_end_get) {
+				QAT_DP_LOG(ERR, "Failed to get auth end");
+				return -1;
+			}
+		} else
+			auth_iova_end = job->data_iova + job->chain.auth_ofs +
+				job->chain.auth_len;
+
+		/* Then check if digest-encrypted conditions are met */
+		if ((auth_param->auth_off + auth_param->auth_len <
+				cipher_param->cipher_offset +
+				cipher_param->cipher_length) &&
+				(job->chain.digest_iova == auth_iova_end)) {
+			/* Handle partial digest encryption */
+			if (cipher_param->cipher_offset +
+					cipher_param->cipher_length <
+					auth_param->auth_off +
+					auth_param->auth_len +
+					ctx->digest_length)
+				req->comn_mid.dst_length =
+					req->comn_mid.src_length =
+					auth_param->auth_off +
+					auth_param->auth_len +
+					ctx->digest_length;
+			struct icp_qat_fw_comn_req_hdr *header =
+				&req->comn_hdr;
+			ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
+				header->serv_specif_flags,
+				ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+		}
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		QAT_DP_LOG(ERR, "GMAC as chained auth algo is not supported");
+		return -1;
+	default:
+		break;
+	}
+
+	if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+		int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+				min_ofs + max_len);
+		if (unlikely(ret < 0))
+			return -1;
+	}
+
+	qp->enqueued++;
+	qp->stats.enqueued_count++;
+
+	if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+		tx_queue->tail = (t + tx_queue->msg_size) &
+				tx_queue->modulo_mask;
+		WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+			tx_queue->hw_bundle_number,
+			tx_queue->hw_queue_number,
+			tx_queue->tail);
+		tx_queue->csr_tail = tx_queue->tail;
+	} else
+		*drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	return 0;
+}
+
+#define get_rx_queue_message_at_index(q, h, i) \
+	(void *)((uint8_t *)q->base_addr + ((h + q->msg_size * (i)) & \
+	q->modulo_mask))
+
+static __rte_always_inline int
+qat_is_rx_msg_ok(struct icp_qat_fw_comn_resp *resp_msg)
+{
+	return ICP_QAT_FW_COMN_STATUS_FLAG_OK ==
+			ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+					resp_msg->comn_hdr.comn_status);
+}
+
+static __rte_always_inline int
+qat_sym_query_processed_jobs(void *qat_sym_qp, uint32_t nb_jobs)
+{
+	struct qat_qp *qp = qat_sym_qp;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct icp_qat_fw_comn_resp *resp;
+	uint32_t head = (rx_queue->head + (nb_jobs - 1) * rx_queue->msg_size) &
+			rx_queue->modulo_mask;
+
+	resp = (struct icp_qat_fw_comn_resp *)(
+			(uint8_t *)rx_queue->base_addr + head);
+	if (*(uint32_t *)resp == ADF_RING_EMPTY_SIG)
+		return 0;
+
+	return 1;
+}
+
+static __rte_always_inline void *
+qat_sym_job_dequeue_one(void *qat_sym_qp, uint64_t *drv_data, uint64_t flags,
+		int *is_op_success)
+{
+	struct qat_qp *qp = qat_sym_qp;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct icp_qat_fw_comn_resp *resp;
+	uint32_t head;
+	void *opaque;
+
+	if (flags & RTE_CRYPTO_HW_DEQ_FLAG_START)
+		head = rx_queue->head;
+	else
+		head = (uint32_t)*drv_data;
+
+	resp = (struct icp_qat_fw_comn_resp *)(
+			(uint8_t *)rx_queue->base_addr + head);
+
+	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) {
+		*is_op_success = 0;
+		return NULL;
+	}
+
+	if (unlikely(qat_is_rx_msg_ok(resp) == 0))
+		*is_op_success = -1;
+	else
+		*is_op_success = 1;
+
+	opaque = (void *)(uintptr_t)resp->opaque_data;
+
+	rx_queue->head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+	rx_queue->nb_processed_responses++;
+	qp->dequeued++;
+	qp->stats.dequeued_count++;
+	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+		rxq_free_desc(qp, rx_queue);
+
+	return opaque;
+}
+
+static __rte_always_inline uint32_t
+qat_sym_job_dequeue_n(void *qat_sym_qp, uint64_t *drv_data,
+		void *user_data, rte_crpyto_hw_user_post_deq_cb_fn cb,
+		uint32_t n, uint64_t flags, uint32_t *n_failed_jobs)
+{
+	struct qat_qp *qp = qat_sym_qp;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct icp_qat_fw_comn_resp *resp;
+	uint32_t head, i;
+	uint32_t status, total_fail = 0;
+
+	if (flags & RTE_CRYPTO_HW_DEQ_FLAG_START)
+		head = rx_queue->head;
+	else
+		head = (uint32_t)*drv_data;
+
+	for (i = 0; i < n; i++) {
+		resp = (struct icp_qat_fw_comn_resp *)(
+			(uint8_t *)rx_queue->base_addr + head);
+
+		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) {
+			if (flags & RTE_CRYPTO_HW_DEQ_FLAG_EXHAUST)
+				break;
+			return -i;
+		}
+
+		status = qat_is_rx_msg_ok(resp);
+		total_fail += status;
+		cb(user_data, i, status);
+
+		head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+	}
+
+	rx_queue->head = head;
+	rx_queue->nb_processed_responses += i;
+	qp->dequeued += i;
+	qp->stats.dequeued_count += i;
+	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+		rxq_free_desc(qp, rx_queue);
+	*n_failed_jobs = total_fail;
+
+	return i;
+}
+
+int
+qat_sym_get_ops(struct rte_cryptodev *dev,
+		uint16_t qp_id, struct rte_crypto_hw_ops *hw_ops)
+{
+	struct qat_qp *qp = dev->data->queue_pairs[qp_id];
+
+	if (qp->service_type != QAT_SERVICE_SYMMETRIC)
+		return -EINVAL;
+
+	hw_ops->qp = (void *)qp;
+	hw_ops->enqueue_aead = qat_sym_job_enqueue_aead;
+	hw_ops->enqueue_cipher = qat_sym_job_enqueue_cipher;
+	hw_ops->enqueue_auth = qat_sym_job_enqueue_auth;
+	hw_ops->enqueue_chain = qat_sym_job_enqueue_chain;
+	hw_ops->dequeue_one = qat_sym_job_dequeue_one;
+	hw_ops->dequeue_many = qat_sym_job_dequeue_n;
+	hw_ops->query_processed = qat_sym_query_processed_jobs;
+
+	return 0;
+}
diff --git a/drivers/crypto/qat/qat_sym_job.h b/drivers/crypto/qat/qat_sym_job.h
new file mode 100644
index 000000000..b11aeb841
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_job.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_FRAME_H_
+#define _QAT_SYM_FRAME_H_
+
+int
+qat_sym_get_ops(struct rte_cryptodev *dev,
+		uint16_t qp_id, struct rte_crypto_hw_ops *hw_ops);
+
+#endif /* _QAT_SYM_FRAME_H_ */
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index e887c880f..be9d73c0a 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -13,6 +13,7 @@
 #include "qat_sym.h"
 #include "qat_sym_session.h"
 #include "qat_sym_pmd.h"
+#include "qat_sym_job.h"
 
 #define MIXED_CRYPTO_MIN_FW_VER 0x04090000
 
@@ -234,7 +235,8 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
 		/* Crypto related operations */
 		.sym_session_get_size	= qat_sym_session_get_private_size,
 		.sym_session_configure	= qat_sym_session_configure,
-		.sym_session_clear	= qat_sym_session_clear
+		.sym_session_clear	= qat_sym_session_clear,
+		.sym_get_hw_ops		= qat_sym_get_ops,
 };
 
 static uint16_t
@@ -308,7 +310,8 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
-			RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
+			RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED |
+			RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API;
 
 	internals = cryptodev->data->dev_private;
 	internals->qat_dev = qat_pci_dev;
-- 
2.20.1


  reply	other threads:[~2020-07-03 10:12 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-03 10:12 [dpdk-dev] [dpdk-dev v3 0/3] cryptodev: add symmetric crypto data-path APIs Fan Zhang
2020-07-03 10:12 ` Fan Zhang [this message]
2020-07-03 10:12 ` [dpdk-dev] [dpdk-dev v3 2/3] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
2020-07-03 10:12 ` [dpdk-dev] [dpdk-dev v3 3/3] doc: add cryptodev direct APIs guide Fan Zhang
  -- strict thread matches above, loose matches on Subject: below --
2020-06-25 13:31 [dpdk-dev] [dpdk-dev v2 0/3] crypto/qat: add symmetric crypto data-path APIs Fan Zhang
2020-07-03 10:14 ` [dpdk-dev] [dpdk-dev v3 0/3] cryptodev: " Fan Zhang
2020-07-03 10:14   ` [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct " Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 0/4] cryptodev: add symmetric crypto " Fan Zhang
2020-07-03 11:09   ` [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct " Fan Zhang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200703101203.23003-2-roy.fan.zhang@intel.com \
    --to=roy.fan.zhang@intel.com \
    --cc=akhil.goyal@nxp.com \
    --cc=dev@dpdk.org \
    --cc=fiona.trahe@intel.com \
    --cc=jerinjacobk@gmail.com \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.