All of lore.kernel.org
 help / color / mirror / Atom feed
From: Kai Ji <kai.ji@intel.com>
To: dev@dpdk.org
Cc: Kai Ji <kai.ji@intel.com>
Subject: [dpdk-dev v5 08/10] crypto/qat: op burst data path rework
Date: Fri, 28 Jan 2022 18:23:12 +0000	[thread overview]
Message-ID: <20220128182314.23471-9-kai.ji@intel.com> (raw)
In-Reply-To: <20220128182314.23471-1-kai.ji@intel.com>

This patch enable op_build_request function in qat_enqueue_op_burst,
and qat_dequeue_process_response function in qat_dequeue_op_burst.
The op_build_request invoked in crypto build request op is based
on crypto operations setup during session init.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/common/qat/qat_qp.c               |  42 +-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c |   4 -
 drivers/crypto/qat/qat_asym.c             |   2 +-
 drivers/crypto/qat/qat_asym.h             |  22 -
 drivers/crypto/qat/qat_sym.c              | 829 +++++++---------------
 drivers/crypto/qat/qat_sym.h              |   5 -
 6 files changed, 270 insertions(+), 634 deletions(-)

diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index dd9056650d..9bbadc8f8e 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -550,8 +550,7 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
 }
 
 uint16_t
-qat_enqueue_op_burst(void *qp,
-		__rte_unused qat_op_build_request_t op_build_request,
+qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
 		void **ops, uint16_t nb_ops)
 {
 	register struct qat_queue *queue;
@@ -602,29 +601,18 @@ qat_enqueue_op_burst(void *qp,
 		}
 	}
 
-#ifdef BUILD_QAT_SYM
+#ifdef RTE_LIB_SECURITY
 	if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
 		qat_sym_preprocess_requests(ops, nb_ops_possible);
 #endif
 
+	memset(tmp_qp->opaque, 0xff, sizeof(tmp_qp->opaque));
+
 	while (nb_ops_sent != nb_ops_possible) {
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) {
-#ifdef BUILD_QAT_SYM
-			ret = qat_sym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-#endif
-		} else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) {
-			ret = qat_comp_build_request(*ops, base_addr + tail,
+		ret = op_build_request(*ops, base_addr + tail,
 				tmp_qp->op_cookies[tail >> queue->trailz],
-				tmp_qp->qat_dev_gen);
-		} else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
-#ifdef BUILD_QAT_ASYM
-			ret = qat_asym_build_request(*ops, base_addr + tail,
-				tmp_qp->op_cookies[tail >> queue->trailz],
-				NULL, tmp_qp->qat_dev_gen);
-#endif
-		}
+				tmp_qp->opaque, tmp_qp->qat_dev_gen);
+
 		if (ret != 0) {
 			tmp_qp->stats.enqueue_err_count++;
 			/* This message cannot be enqueued */
@@ -820,8 +808,7 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 
 uint16_t
 qat_dequeue_op_burst(void *qp, void **ops,
-		__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
-		uint16_t nb_ops)
+		qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops)
 {
 	struct qat_queue *rx_queue;
 	struct qat_qp *tmp_qp = (struct qat_qp *)qp;
@@ -839,21 +826,10 @@ qat_dequeue_op_burst(void *qp, void **ops,
 
 		nb_fw_responses = 1;
 
-		if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
-			qat_sym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz],
-				NULL);
-		else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
-			nb_fw_responses = qat_comp_process_response(
+		nb_fw_responses = qat_dequeue_process_response(
 				ops, resp_msg,
 				tmp_qp->op_cookies[head >> rx_queue->trailz],
 				&tmp_qp->stats.dequeue_err_count);
-#ifdef BUILD_QAT_ASYM
-		else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
-			qat_asym_process_response(ops, resp_msg,
-				tmp_qp->op_cookies[head >> rx_queue->trailz],
-				NULL);
-#endif
 
 		head = adf_modulo(head + rx_queue->msg_size,
 				  rx_queue->modulo_mask);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 501132a448..c58a628915 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -146,10 +146,6 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
 	.sym_session_get_size	= qat_sym_session_get_private_size,
 	.sym_session_configure	= qat_sym_session_configure,
 	.sym_session_clear	= qat_sym_session_clear,
-
-	/* Raw data-path API related operations */
-	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
-	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
 };
 
 static struct qat_capabilities_info
diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c
index da8d7e965c..07e3baa172 100644
--- a/drivers/crypto/qat/qat_asym.c
+++ b/drivers/crypto/qat/qat_asym.c
@@ -773,7 +773,7 @@ qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op,
 	return 0;
 }
 
-int
+static __rte_always_inline int
 qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
 		__rte_unused uint64_t *opaque,
 		__rte_unused enum qat_device_gen dev_gen)
diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h
index aba49d57cb..72e62120c5 100644
--- a/drivers/crypto/qat/qat_asym.h
+++ b/drivers/crypto/qat/qat_asym.h
@@ -104,28 +104,6 @@ void
 qat_asym_session_clear(struct rte_cryptodev *dev,
 		struct rte_cryptodev_asym_session *sess);
 
-/*
- * Build PKE request to be sent to the fw, partially uses template
- * request generated during session creation.
- *
- * @param	in_op		Pointer to the crypto operation, for every
- *				service it points to service specific struct.
- * @param	out_msg		Message to be returned to enqueue function
- * @param	op_cookie	Cookie pointer that holds private metadata
- * @param	qat_dev_gen	Generation of QAT hardware
- *
- * @return
- *	This function always returns zero,
- *	it is because of backward compatibility.
- *	- 0: Always returned
- *
- */
-int
-qat_asym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie,
-		__rte_unused uint64_t *opaque,
-		enum qat_device_gen qat_dev_gen);
-
 /*
  * Process PKE response received from outgoing queue of QAT
  *
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index aad4b243b7..0b1ab0b000 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -11,12 +11,25 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
-#include "dev/qat_crypto_pmd_gens.h"
+#include "qat_crypto.h"
+#include "qat_qp.h"
 
 uint8_t qat_sym_driver_id;
 
 struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+	.name = qat_sym_drv_name,
+	.alias = qat_sym_drv_name
+};
+
 void
 qat_sym_init_op_cookie(void *op_cookie)
 {
@@ -38,160 +51,67 @@ qat_sym_init_op_cookie(void *op_cookie)
 			opt.spc_gmac.cd_cipher);
 }
 
-static inline void
-set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op,
-		struct icp_qat_fw_la_bulk_req *qat_req)
+static __rte_always_inline int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+		void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen)
 {
-	/* copy IV into request if it fits */
-	if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
-		rte_memcpy(cipher_param->u.cipher_IV_array,
-				rte_crypto_op_ctod_offset(op, uint8_t *,
-					iv_offset),
-				iv_length);
-	} else {
-		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-				qat_req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-		cipher_param->u.s.cipher_IV_ptr =
-				rte_crypto_op_ctophys_offset(op,
-					iv_offset);
-	}
-}
+	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+	void *sess = (void *)opaque[0];
+	qat_sym_build_request_t build_request = (void *)opaque[1];
+	struct qat_sym_session *ctx = NULL;
 
-/** Set IV for CCM is special case, 0th byte is set to q-1
- *  where q is padding of nonce in 16 byte block
- */
-static inline void
-set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
-		struct icp_qat_fw_la_cipher_req_params *cipher_param,
-		struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
-{
-	rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
-			ICP_QAT_HW_CCM_NONCE_OFFSET,
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-	*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-	if (aad_len_field_sz)
-		rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
-			rte_crypto_op_ctod_offset(op, uint8_t *,
-				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			iv_length);
-}
+	if (likely(op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)) {
+		ctx = get_sym_session_private_data(op->sym->session,
+				qat_sym_driver_id);
+		if (unlikely(!ctx)) {
+			QAT_DP_LOG(ERR, "No session for this device");
+			return -EINVAL;
+		}
+		if (sess != ctx) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
+
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
 
-/** Handle Single-Pass AES-GMAC on QAT GEN3 */
-static inline void
-handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
-		struct qat_sym_op_cookie *cookie,
-		struct icp_qat_fw_la_bulk_req *qat_req)
-{
-	static const uint32_t ver_key_offset =
-			sizeof(struct icp_qat_hw_auth_setup) +
-			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
-			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
-			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
-			sizeof(struct icp_qat_hw_cipher_config);
-	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
-			(void *) &qat_req->cd_ctrl;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-			(void *) &qat_req->serv_specif_rqpars;
-	uint32_t data_length = op->sym->auth.data.length;
-
-	/* Fill separate Content Descriptor for this op */
-	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
-			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-				ctx->cd.cipher.key :
-				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
-			ctx->auth_key_length);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
-				ICP_QAT_HW_CIPHER_AEAD_MODE,
-				ctx->qat_cipher_alg,
-				ICP_QAT_HW_CIPHER_NO_CONVERT,
-				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
-					ICP_QAT_HW_CIPHER_ENCRYPT :
-					ICP_QAT_HW_CIPHER_DECRYPT));
-	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
-			ctx->digest_length,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
-			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
-	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
-			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(data_length);
-
-	/* Update the request */
-	qat_req->cd_pars.u.s.content_desc_addr =
-			cookie->opt.spc_gmac.cd_phys_addr;
-	qat_req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
-			sizeof(struct icp_qat_hw_cipher_config) +
-			ctx->auth_key_length, 8) >> 3;
-	qat_req->comn_mid.src_length = data_length;
-	qat_req->comn_mid.dst_length = 0;
-
-	cipher_param->spc_aad_addr = 0;
-	cipher_param->spc_auth_res_addr = op->sym->auth.digest.phys_addr;
-	cipher_param->spc_aad_sz = data_length;
-	cipher_param->reserved = 0;
-	cipher_param->spc_auth_res_sz = ctx->digest_length;
-
-	qat_req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
-	cipher_cd_ctrl->cipher_cfg_offset = 0;
-	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
-	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
-	ICP_QAT_FW_LA_PROTO_SET(
-			qat_req->comn_hdr.serv_specif_flags,
-			ICP_QAT_FW_LA_NO_PROTO);
-}
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
-{
-	int ret = 0;
-	struct qat_sym_session *ctx = NULL;
-	struct icp_qat_fw_la_cipher_req_params *cipher_param;
-	struct icp_qat_fw_la_cipher_20_req_params *cipher_param20;
-	struct icp_qat_fw_la_auth_req_params *auth_param;
-	register struct icp_qat_fw_la_bulk_req *qat_req;
-	uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
-	uint32_t cipher_len = 0, cipher_ofs = 0;
-	uint32_t auth_len = 0, auth_ofs = 0;
-	uint32_t min_ofs = 0;
-	uint64_t src_buf_start = 0, dst_buf_start = 0;
-	uint64_t auth_data_end = 0;
-	uint8_t do_sgl = 0;
-	uint8_t in_place = 1;
-	int alignment_adjustment = 0;
-	int oop_shift = 0;
-	struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
-	struct qat_sym_op_cookie *cookie =
-				(struct qat_sym_op_cookie *)op_cookie;
-
-	if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
-				"operation requests, op (%p) is not a "
-				"symmetric operation.", op);
-		return -EINVAL;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)ctx;
+			opaque[1] = (uintptr_t)build_request;
+		}
 	}
 
-	if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
-		QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
-				" requests, op (%p) is sessionless.", op);
-		return -EINVAL;
-	} else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		ctx = (struct qat_sym_session *)get_sym_session_private_data(
-				op->sym->session, qat_sym_driver_id);
 #ifdef RTE_LIB_SECURITY
-	} else {
-		ctx = (struct qat_sym_session *)get_sec_session_private_data(
-				op->sym->sec_session);
-		if (likely(ctx)) {
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		if (sess != (void *)op->sym->sec_session) {
+			struct rte_cryptodev *cdev;
+			struct qat_cryptodev_private *internals;
+			enum rte_proc_type_t proc_type;
+
+			ctx = get_sec_session_private_data(
+					op->sym->sec_session);
+			if (unlikely(!ctx)) {
+				QAT_DP_LOG(ERR, "No session for this device");
+				return -EINVAL;
+			}
 			if (unlikely(ctx->bpi_ctx == NULL)) {
 				QAT_DP_LOG(ERR, "QAT PMD only supports security"
 						" operation requests for"
@@ -207,463 +127,234 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 				return -EINVAL;
 			}
-		}
-#endif
-	}
+			cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+			internals = cdev->data->dev_private;
+			proc_type = rte_eal_process_type();
 
-	if (unlikely(ctx == NULL)) {
-		QAT_DP_LOG(ERR, "Session was not created for this device");
-		return -EINVAL;
-	}
+			if (internals->qat_dev->qat_dev_gen != dev_gen) {
+				op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+				return -EINVAL;
+			}
+
+			if (unlikely(ctx->build_request[proc_type] == NULL)) {
+				int ret =
+				qat_sym_gen_dev_ops[dev_gen].set_session(
+					(void *)cdev, sess);
+				if (ret < 0) {
+					op->status =
+					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+					return -EINVAL;
+				}
+			}
 
-	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
-	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
-	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
-	cipher_param = (void *)&qat_req->serv_specif_rqpars;
-	cipher_param20 = (void *)&qat_req->serv_specif_rqpars;
-	auth_param = (void *)((uint8_t *)cipher_param +
-			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-
-	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
-			!ctx->is_gmac) {
-		/* AES-GCM or AES-CCM */
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
-			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
-			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
-			&& ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
-			do_aead = 1;
-		} else {
-			do_auth = 1;
-			do_cipher = 1;
+			sess = (void *)op->sym->sec_session;
+			build_request = ctx->build_request[proc_type];
+			opaque[0] = (uintptr_t)sess;
+			opaque[1] = (uintptr_t)build_request;
 		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
-		do_auth = 1;
-		do_cipher = 0;
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		do_auth = 0;
-		do_cipher = 1;
+	}
+#endif
+	else { /* RTE_CRYPTO_OP_SESSIONLESS */
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		QAT_LOG(DEBUG, "QAT does not support sessionless operation");
+		return -1;
 	}
 
-	if (do_cipher) {
+	return build_request(op, (void *)ctx, out_msg, op_cookie);
+}
 
-		if (ctx->qat_cipher_alg ==
-					 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
-			ctx->qat_cipher_alg ==
-				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+uint16_t
+qat_sym_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_enqueue_op_burst(qp, qat_sym_build_request,
+			(void **)ops, nb_ops);
+}
 
-			if (unlikely(
-			    (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
-			    (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			cipher_len = op->sym->cipher.data.length >> 3;
-			cipher_ofs = op->sym->cipher.data.offset >> 3;
-
-		} else if (ctx->bpi_ctx) {
-			/* DOCSIS - only send complete blocks to device.
-			 * Process any partial block using CFB mode.
-			 * Even if 0 complete blocks, still send this to device
-			 * to get into rx queue for post-process and dequeuing
-			 */
-			cipher_len = qat_bpicipher_preprocess(ctx, op);
-			cipher_ofs = op->sym->cipher.data.offset;
-		} else {
-			cipher_len = op->sym->cipher.data.length;
-			cipher_ofs = op->sym->cipher.data.offset;
-		}
+uint16_t
+qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	return qat_dequeue_op_burst(qp, (void **)ops,
+				qat_sym_process_response, nb_ops);
+}
 
-		set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
-				cipher_param, op, qat_req);
-		min_ofs = cipher_ofs;
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
+		struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
+{
+	int i = 0, ret = 0;
+	struct qat_device_info *qat_dev_instance =
+			&qat_pci_devs[qat_pci_dev->qat_dev_id];
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = qat_dev_instance->pci_dev->device.numa_node,
+		.private_data_size = sizeof(struct qat_cryptodev_private)
+	};
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct rte_cryptodev *cryptodev;
+	struct qat_cryptodev_private *internals;
+	struct qat_capabilities_info capa_info;
+	const struct rte_cryptodev_capabilities *capabilities;
+	const struct qat_crypto_gen_dev_ops *gen_dev_ops =
+		&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+	uint64_t capa_size;
+
+	snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+			qat_pci_dev->name, "sym");
+	QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+	if (gen_dev_ops->cryptodev_ops == NULL) {
+		QAT_LOG(ERR, "Device %s does not support symmetric crypto",
+				name);
+		return -(EFAULT);
 	}
 
-	if (do_auth) {
-
-		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
-			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
-			ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
-			if (unlikely(
-			    (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
-			    (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
-				QAT_DP_LOG(ERR,
-		"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
-				op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-				return -EINVAL;
-			}
-			auth_ofs = op->sym->auth.data.offset >> 3;
-			auth_len = op->sym->auth.data.length >> 3;
-
-			auth_param->u1.aad_adr =
-					rte_crypto_op_ctophys_offset(op,
-							ctx->auth_iv.offset);
-
-		} else if (ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-			/* AES-GMAC */
-			set_cipher_iv(ctx->auth_iv.length,
-				ctx->auth_iv.offset,
-				cipher_param, op, qat_req);
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
-			auth_param->u1.aad_adr = 0;
-			auth_param->u2.aad_sz = 0;
-
-		} else {
-			auth_ofs = op->sym->auth.data.offset;
-			auth_len = op->sym->auth.data.length;
-
+	/*
+	 * All processes must use same driver id so they can share sessions.
+	 * Store driver_id so we can validate that all processes have the same
+	 * value, typically they have, but could differ if binaries built
+	 * separately.
+	 */
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		qat_pci_dev->qat_sym_driver_id =
+				qat_sym_driver_id;
+	} else if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		if (qat_pci_dev->qat_sym_driver_id !=
+				qat_sym_driver_id) {
+			QAT_LOG(ERR,
+				"Device %s have different driver id than corresponding device in primary process",
+				name);
+			return -(EFAULT);
 		}
-		min_ofs = auth_ofs;
-
-		if (ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL ||
-				ctx->auth_op == ICP_QAT_HW_AUTH_VERIFY)
-			auth_param->auth_res_addr =
-					op->sym->auth.digest.phys_addr;
-
 	}
 
-	if (do_aead) {
-		/*
-		 * This address may used for setting AAD physical pointer
-		 * into IV offset from op
-		 */
-		rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
-		if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg ==
-					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-
-			set_cipher_iv(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, qat_req);
-
-		} else if (ctx->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
-
-			/* In case of AES-CCM this may point to user selected
-			 * memory or iv offset in crypto_op
-			 */
-			uint8_t *aad_data = op->sym->aead.aad.data;
-			/* This is true AAD length, it not includes 18 bytes of
-			 * preceding data
-			 */
-			uint8_t aad_ccm_real_len = 0;
-			uint8_t aad_len_field_sz = 0;
-			uint32_t msg_len_be =
-					rte_bswap32(op->sym->aead.data.length);
-
-			if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-				aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-				aad_ccm_real_len = ctx->aad_len -
-					ICP_QAT_HW_CCM_AAD_B0_LEN -
-					ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			} else {
-				/*
-				 * aad_len not greater than 18, so no actual aad
-				 *  data, then use IV after op for B0 block
-				 */
-				aad_data = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-				aad_phys_addr_aead =
-						rte_crypto_op_ctophys_offset(op,
-							ctx->cipher_iv.offset);
-			}
-
-			uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
-							ctx->cipher_iv.length;
-
-			aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-							aad_len_field_sz,
-							ctx->digest_length, q);
-
-			if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET +
-				    (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				    (uint8_t *)&msg_len_be,
-				    ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-			} else {
-				memcpy(aad_data	+ ctx->cipher_iv.length +
-				    ICP_QAT_HW_CCM_NONCE_OFFSET,
-				    (uint8_t *)&msg_len_be
-				    + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				    - q), q);
-			}
-
-			if (aad_len_field_sz > 0) {
-				*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
-						= rte_bswap16(aad_ccm_real_len);
-
-				if ((aad_ccm_real_len + aad_len_field_sz)
-						% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-					uint8_t pad_len = 0;
-					uint8_t pad_idx = 0;
-
-					pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len + aad_len_field_sz) %
-						ICP_QAT_HW_CCM_AAD_B0_LEN);
-					pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					    aad_ccm_real_len + aad_len_field_sz;
-					memset(&aad_data[pad_idx],
-							0, pad_len);
-				}
+	/* Populate subset device to use in cryptodev device creation */
+	qat_dev_instance->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+	qat_dev_instance->sym_rte_dev.numa_node =
+			qat_dev_instance->pci_dev->device.numa_node;
+	qat_dev_instance->sym_rte_dev.devargs = NULL;
 
-			}
+	cryptodev = rte_cryptodev_pmd_create(name,
+			&(qat_dev_instance->sym_rte_dev), &init_params);
 
-			set_cipher_iv_ccm(ctx->cipher_iv.length,
-					ctx->cipher_iv.offset,
-					cipher_param, op, q,
-					aad_len_field_sz);
+	if (cryptodev == NULL)
+		return -ENODEV;
 
-		}
+	qat_dev_instance->sym_rte_dev.name = cryptodev->data->name;
+	cryptodev->driver_id = qat_sym_driver_id;
+	cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
-		cipher_len = op->sym->aead.data.length;
-		cipher_ofs = op->sym->aead.data.offset;
-		auth_len = op->sym->aead.data.length;
-		auth_ofs = op->sym->aead.data.offset;
+	cryptodev->enqueue_burst = qat_sym_enqueue_burst;
+	cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
-		auth_param->u1.aad_adr = aad_phys_addr_aead;
-		auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
-		min_ofs = op->sym->aead.data.offset;
-	}
+	cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
-	if (op->sym->m_src->nb_segs > 1 ||
-			(op->sym->m_dst && op->sym->m_dst->nb_segs > 1))
-		do_sgl = 1;
-
-	/* adjust for chain case */
-	if (do_cipher && do_auth)
-		min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
-
-	if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
-		min_ofs = 0;
-
-	if (unlikely((op->sym->m_dst != NULL) &&
-			(op->sym->m_dst != op->sym->m_src))) {
-		/* Out-of-place operation (OOP)
-		 * Don't align DMA start. DMA the minimum data-set
-		 * so as not to overwrite data in dest buffer
-		 */
-		in_place = 0;
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-		dst_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
-		oop_shift = min_ofs;
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
 
-	} else {
-		/* In-place operation
-		 * Start DMA at nearest aligned address below min_ofs
-		 */
-		src_buf_start =
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
-						& QAT_64_BTYE_ALIGN_MASK;
-
-		if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
-					rte_pktmbuf_headroom(op->sym->m_src))
-							> src_buf_start)) {
-			/* alignment has pushed addr ahead of start of mbuf
-			 * so revert and take the performance hit
-			 */
-			src_buf_start =
-				rte_pktmbuf_iova_offset(op->sym->m_src,
-								min_ofs);
+#ifdef RTE_LIB_SECURITY
+	if (gen_dev_ops->create_security_ctx) {
+		cryptodev->security_ctx =
+			gen_dev_ops->create_security_ctx((void *)cryptodev);
+		if (cryptodev->security_ctx == NULL) {
+			QAT_LOG(ERR, "rte_security_ctx memory alloc failed");
+			ret = -ENOMEM;
+			goto error;
 		}
-		dst_buf_start = src_buf_start;
 
-		/* remember any adjustment for later, note, can be +/- */
-		alignment_adjustment = src_buf_start -
-			rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
-	}
-
-	if (do_cipher || do_aead) {
-		cipher_param->cipher_offset =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, cipher_ofs) - src_buf_start;
-		cipher_param->cipher_length = cipher_len;
+		cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
+		QAT_LOG(INFO, "Device %s rte_security support ensabled", name);
 	} else {
-		cipher_param->cipher_offset = 0;
-		cipher_param->cipher_length = 0;
+		QAT_LOG(INFO, "Device %s rte_security support disabled", name);
 	}
-
-	if (!ctx->is_single_pass) {
-		/* Do not let to overwrite spc_aad len */
-		if (do_auth || do_aead) {
-			auth_param->auth_off =
-				(uint32_t)rte_pktmbuf_iova_offset(
-				op->sym->m_src, auth_ofs) - src_buf_start;
-			auth_param->auth_len = auth_len;
-		} else {
-			auth_param->auth_off = 0;
-			auth_param->auth_len = 0;
+#endif
+	snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"QAT_SYM_CAPA_GEN_%d",
+			qat_pci_dev->qat_dev_gen);
+
+	internals = cryptodev->data->dev_private;
+	internals->qat_dev = qat_pci_dev;
+
+	internals->dev_id = cryptodev->data->dev_id;
+
+	capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
+	capabilities = capa_info.data;
+	capa_size = capa_info.size;
+
+	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+	if (internals->capa_mz == NULL) {
+		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+				capa_size, rte_socket_id(), 0);
+		if (internals->capa_mz == NULL) {
+			QAT_LOG(DEBUG,
+				"Error allocating memzone for capabilities, "
+				"destroying PMD for %s",
+				name);
+			ret = -EFAULT;
+			goto error;
 		}
 	}
 
-	qat_req->comn_mid.dst_length =
-		qat_req->comn_mid.src_length =
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		> (auth_param->auth_off + auth_param->auth_len) ?
-		(cipher_param->cipher_offset + cipher_param->cipher_length)
-		: (auth_param->auth_off + auth_param->auth_len);
-
-	if (do_auth && do_cipher) {
-		/* Handle digest-encrypted cases, i.e.
-		 * auth-gen-then-cipher-encrypt and
-		 * cipher-decrypt-then-auth-verify
-		 */
-		 /* First find the end of the data */
-		if (do_sgl) {
-			uint32_t remaining_off = auth_param->auth_off +
-				auth_param->auth_len + alignment_adjustment + oop_shift;
-			struct rte_mbuf *sgl_buf =
-				(in_place ?
-					op->sym->m_src : op->sym->m_dst);
-
-			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
-					&& sgl_buf->next != NULL) {
-				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
-				sgl_buf = sgl_buf->next;
-			}
+	memcpy(internals->capa_mz->addr, capabilities, capa_size);
+	internals->qat_dev_capabilities = internals->capa_mz->addr;
 
-			auth_data_end = (uint64_t)rte_pktmbuf_iova_offset(
-				sgl_buf, remaining_off);
-		} else {
-			auth_data_end = (in_place ?
-				src_buf_start : dst_buf_start) +
-				auth_param->auth_off + auth_param->auth_len;
-		}
-		/* Then check if digest-encrypted conditions are met */
-		if ((auth_param->auth_off + auth_param->auth_len <
-					cipher_param->cipher_offset +
-					cipher_param->cipher_length) &&
-				(op->sym->auth.digest.phys_addr ==
-					auth_data_end)) {
-			/* Handle partial digest encryption */
-			if (cipher_param->cipher_offset +
-					cipher_param->cipher_length <
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length)
-				qat_req->comn_mid.dst_length =
-					qat_req->comn_mid.src_length =
-					auth_param->auth_off +
-					auth_param->auth_len +
-					ctx->digest_length;
-			struct icp_qat_fw_comn_req_hdr *header =
-				&qat_req->comn_hdr;
-			ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-		}
+	while (1) {
+		if (qat_dev_cmd_param[i].name == NULL)
+			break;
+		if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
+			internals->min_enq_burst_threshold =
+					qat_dev_cmd_param[i].val;
+		i++;
 	}
 
-	if (do_sgl) {
-
-		ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
-				QAT_COMN_PTR_TYPE_SGL);
-		ret = qat_sgl_fill_array(op->sym->m_src,
-		   (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
-		   &cookie->qat_sgl_src,
-		   qat_req->comn_mid.src_length,
-		   QAT_SYM_SGL_MAX_NUMBER);
-
-		if (unlikely(ret)) {
-			QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
-			return ret;
-		}
+	internals->service_type = QAT_SERVICE_SYMMETRIC;
+	qat_pci_dev->sym_dev = internals;
+	QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+			cryptodev->data->name, internals->dev_id);
 
-		if (in_place)
-			qat_req->comn_mid.dest_data_addr =
-				qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-		else {
-			ret = qat_sgl_fill_array(op->sym->m_dst,
-				(int64_t)(dst_buf_start -
-					  rte_pktmbuf_iova(op->sym->m_dst)),
-				 &cookie->qat_sgl_dst,
-				 qat_req->comn_mid.dst_length,
-				 QAT_SYM_SGL_MAX_NUMBER);
-
-			if (unlikely(ret)) {
-				QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
-				return ret;
-			}
+	return 0;
 
-			qat_req->comn_mid.src_data_addr =
-				cookie->qat_sgl_src_phys_addr;
-			qat_req->comn_mid.dest_data_addr =
-					cookie->qat_sgl_dst_phys_addr;
-		}
-		qat_req->comn_mid.src_length = 0;
-		qat_req->comn_mid.dst_length = 0;
-	} else {
-		qat_req->comn_mid.src_data_addr = src_buf_start;
-		qat_req->comn_mid.dest_data_addr = dst_buf_start;
-	}
+error:
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	memset(&qat_dev_instance->sym_rte_dev, 0,
+		sizeof(qat_dev_instance->sym_rte_dev));
 
-	if (ctx->is_single_pass) {
-		if (ctx->is_ucs) {
-			/* GEN 4 */
-			cipher_param20->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param20->spc_auth_res_addr =
-				op->sym->aead.digest.phys_addr;
-		} else {
-			cipher_param->spc_aad_addr =
-				op->sym->aead.aad.phys_addr;
-			cipher_param->spc_auth_res_addr =
-					op->sym->aead.digest.phys_addr;
-		}
-	} else if (ctx->is_single_pass_gmac &&
-		       op->sym->auth.data.length <= QAT_AES_GMAC_SPC_MAX_SIZE) {
-		/* Handle Single-Pass AES-GMAC */
-		handle_spc_gmac(ctx, op, cookie, qat_req);
-	}
+	return ret;
+}
 
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-	QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
-			sizeof(struct icp_qat_fw_la_bulk_req));
-	QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
-			rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
-			rte_pktmbuf_data_len(op->sym->m_src));
-	if (do_cipher) {
-		uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
-						uint8_t *,
-						ctx->cipher_iv.offset);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
-				ctx->cipher_iv.length);
-	}
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
 
-	if (do_auth) {
-		if (ctx->auth_iv.length) {
-			uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
-							uint8_t *,
-							ctx->auth_iv.offset);
-			QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
-						ctx->auth_iv.length);
-		}
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
-				ctx->digest_length);
-	}
+	if (qat_pci_dev == NULL)
+		return -ENODEV;
+	if (qat_pci_dev->sym_dev == NULL)
+		return 0;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		rte_memzone_free(qat_pci_dev->sym_dev->capa_mz);
 
-	if (do_aead) {
-		QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
-				ctx->digest_length);
-		QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
-				ctx->aad_len);
-	}
+	/* free crypto device */
+	cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->dev_id);
+#ifdef RTE_LIB_SECURITY
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
 #endif
+	rte_cryptodev_pmd_destroy(cryptodev);
+	qat_pci_devs[qat_pci_dev->qat_dev_id].sym_rte_dev.name = NULL;
+	qat_pci_dev->sym_dev = NULL;
+
 	return 0;
 }
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+		cryptodev_qat_sym_driver,
+		qat_sym_driver_id);
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f4ff2ce4cd..074612c11b 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -131,11 +131,6 @@ uint16_t
 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		uint16_t nb_ops);
 
-int
-qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen);
-
-
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
  *  Uses ECB+XOR to do CFB encryption, same result, more performant
-- 
2.17.1


  parent reply	other threads:[~2022-01-28 18:24 UTC|newest]

Thread overview: 156+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-26 17:25 [dpdk-dev] [dpdk-dev v1 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 1/7] crypro/qat: qat driver refactor skeleton Kai Ji
2021-10-29 13:58   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 2/7] crypto/qat: qat driver sym op refactor Kai Ji
2021-10-29 14:26   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 3/7] crypto/qat: qat driver asym " Kai Ji
2021-10-29 14:36   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 4/7] crypto/qat: qat driver session method rework Kai Ji
2021-10-29 14:40   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 5/7] crypto/qat: qat driver datapath rework Kai Ji
2021-10-29 14:41   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 6/7] app/test: cryptodev test fix Kai Ji
2021-10-29 14:43   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 7/7] crypto/qat: qat driver rework clean up Kai Ji
2021-10-29 14:46   ` Zhang, Roy Fan
2021-11-01 23:12 ` [dpdk-dev] [dpdk-dev v2 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 1/7] crypro/qat: qat driver refactor skeleton Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 2/7] crypto/qat: qat driver sym op refactor Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 3/7] crypto/qat: qat driver asym " Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 4/7] crypto/qat: qat driver session method rework Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 5/7] crypto/qat: qat driver datapath rework Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 6/7] app/test: cryptodev test fix Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 7/7] crypto/qat: qat driver rework clean up Kai Ji
2021-11-02 13:49   ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 1/8] crypro/qat: qat driver refactor skeleton Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 2/8] crypto/qat: qat driver sym op refactor Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 3/8] crypto/qat: qat driver asym " Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 4/8] crypto/qat: qat driver session method rework Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 5/8] crypto/qat: qat driver datapath rework Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 6/8] crypto/qat: support sgl oop operation Kai Ji
2021-11-03 15:46       ` Zhang, Roy Fan
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 7/8] app/test: cryptodev test fix Kai Ji
2021-11-03 15:45       ` Zhang, Roy Fan
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 8/8] crypto/qat: qat driver rework clean up Kai Ji
2021-11-03 15:46     ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Zhang, Roy Fan
2021-11-03 18:49     ` [dpdk-dev] [EXT] " Akhil Goyal
2021-11-05  0:19     ` [dpdk-dev] [dpdk-dev v4 00/11] " Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 01/11] common/qat: define build op request and dequeue op Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 02/11] crypto/qat: sym build op request specific implementation Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 03/11] crypto/qat: rework session APIs Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 04/11] crypto/qat: asym build op request specific implementation Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 05/11] crypto/qat: unify sym pmd apis Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 06/11] crypto/qat: unify qat asym " Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 07/11] crypto/qat: op burst data path rework Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 08/11] compress/qat: comp dequeue burst update Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 09/11] crypto/qat: raw dp api integration Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 10/11] crypto/qat: support out of place SG list Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 11/11] test/cryptodev: fix incomplete data length Kai Ji
2022-01-28 18:23         ` [dpdk-dev v5 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 01/10] common/qat: define build op request and dequeue op Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 02/10] crypto/qat: sym build op request specific implementation Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 03/10] crypto/qat: qat generation specific enqueue Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 04/10] crypto/qat: rework session APIs Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 07/10] crypto/qat: unify qat asym " Kai Ji
2022-01-28 18:23           ` Kai Ji [this message]
2022-01-28 18:23           ` [dpdk-dev v5 09/10] crypto/qat: raw dp api integration Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 10/10] crypto/qat: support out of place SG list Kai Ji
2022-02-04 18:50           ` [dpdk-dev v6 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 01/10] common/qat: define build op request and dequeue op Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 02/10] crypto/qat: sym build op request specific implementation Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 03/10] crypto/qat: qat generation specific enqueue Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 04/10] crypto/qat: rework session APIs Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 07/10] crypto/qat: unify qat asym " Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 08/10] crypto/qat: op burst data path rework Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 09/10] crypto/qat: raw dp api integration Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 10/10] crypto/qat: support out of place SG list Kai Ji
2022-02-08 18:14               ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-02-08 18:14                 ` [dpdk-dev v7 01/10] common/qat: define build op request and dequeue op Kai Ji
2022-02-09 10:22                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 02/10] crypto/qat: sym build op request specific implementation Kai Ji
2022-02-09 10:22                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 03/10] crypto/qat: qat generation specific enqueue Kai Ji
2022-02-09 10:23                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 04/10] crypto/qat: rework session APIs Kai Ji
2022-02-09 10:23                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
2022-02-09 10:23                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
2022-02-09 10:24                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 07/10] crypto/qat: unify qat asym " Kai Ji
2022-02-09 10:24                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 08/10] crypto/qat: op burst data path rework Kai Ji
2022-02-09 10:24                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 09/10] crypto/qat: raw dp api integration Kai Ji
2022-02-09 10:25                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 10/10] crypto/qat: support out of place SG list Kai Ji
2022-02-09 10:25                   ` Zhang, Roy Fan
2022-02-09 10:20                 ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Zhang, Roy Fan
2022-02-12 11:32                   ` Akhil Goyal
2022-02-17 16:28                 ` [dpdk-dev v8 " Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 01/10] common/qat: define build op request and dequeue op Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 02/10] crypto/qat: sym build op request specific implementation Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 03/10] crypto/qat: qat generation specific enqueue Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 04/10] crypto/qat: rework session APIs Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 07/10] crypto/qat: unify qat asym " Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 08/10] crypto/qat: op burst data path rework Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 09/10] crypto/qat: raw dp api integration Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 10/10] crypto/qat: support out of place SG list Kai Ji
2022-02-17 17:59                   ` [EXT] [dpdk-dev v8 00/10] drivers/qat: QAT symmetric crypto datapatch rework Akhil Goyal
2022-02-18 17:15                   ` [dpdk-dev v9 0/9] " Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 1/9] common/qat: define build request and dequeue ops Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 2/9] crypto/qat: support symmetric build op request Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 3/9] crypto/qat: rework session functions Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 5/9] crypto/qat: unify symmetric functions Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 6/9] crypto/qat: unify asymmetric functions Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 7/9] crypto/qat: rework burst data path Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 8/9] crypto/qat: unify raw data path functions Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 9/9] crypto/qat: support out of place SG list Kai Ji
2022-02-22 17:02                     ` [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-02-22 17:02                       ` [dpdk-dev v10 1/9] common/qat: define build request and dequeue ops Kai Ji
2022-02-22 18:52                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 2/9] crypto/qat: support symmetric build op request Kai Ji
2022-02-22 18:52                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 3/9] crypto/qat: rework session functions Kai Ji
2022-02-22 18:53                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
2022-02-22 18:53                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 5/9] crypto/qat: unify symmetric functions Kai Ji
2022-02-22 18:53                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 6/9] crypto/qat: unify asymmetric functions Kai Ji
2022-02-22 18:52                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 7/9] crypto/qat: rework burst data path Kai Ji
2022-02-22 18:54                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 8/9] crypto/qat: unify raw data path functions Kai Ji
2022-02-22 18:54                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 9/9] crypto/qat: support out of place SG list Kai Ji
2022-02-22 18:55                         ` Zhang, Roy Fan
2022-02-22 18:23                       ` [EXT] [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Akhil Goyal
2022-02-22 20:30                       ` [PATCH v11 " Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 1/9] common/qat: define build request and dequeue ops Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 2/9] crypto/qat: support symmetric build op request Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 3/9] crypto/qat: rework session functions Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 4/9] crypto/qat: rework asymmetric op build operation Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 5/9] crypto/qat: unify symmetric functions Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 6/9] crypto/qat: unify asymmetric functions Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 7/9] crypto/qat: rework burst data path Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 8/9] crypto/qat: unify raw data path functions Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 9/9] crypto/qat: support out of place SG list Fan Zhang
2022-02-23  0:49                         ` [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-02-23  0:49                           ` [dpdk-dev v12 1/9] common/qat: define build request and dequeue ops Kai Ji
2022-02-23  0:49                           ` [dpdk-dev v12 2/9] crypto/qat: support symmetric build op request Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 3/9] crypto/qat: rework session functions Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 5/9] crypto/qat: unify symmetric functions Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 6/9] crypto/qat: unify asymmetric functions Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 7/9] crypto/qat: rework burst data path Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 8/9] crypto/qat: unify raw data path functions Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 9/9] crypto/qat: support out of place SG list Kai Ji
2022-02-23  9:18                           ` [EXT] [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Akhil Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220128182314.23471-9-kai.ji@intel.com \
    --to=kai.ji@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.