All of lore.kernel.org
 help / color / mirror / Atom feed
From: Kai Ji <kai.ji@intel.com>
To: dev@dpdk.org
Cc: Kai Ji <kai.ji@intel.com>
Subject: [dpdk-dev] [dpdk-dev v1 4/7] crypto/qat: qat driver session method rework
Date: Tue, 26 Oct 2021 18:25:15 +0100	[thread overview]
Message-ID: <20211026172518.20183-5-kai.ji@intel.com> (raw)
In-Reply-To: <20211026172518.20183-1-kai.ji@intel.com>

The patch introduce set_session & set_raw_dp_ctx methods to qat gen dev ops
Replace min_qat_dev_gen_id with dev_id, the session will be invalid if the
device generation id is not matching during init and ops

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  90 ++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 467 +++++++++++++++++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 243 ++++++++++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |   7 +-
 drivers/crypto/qat/qat_crypto.c              |   1 +
 drivers/crypto/qat/qat_crypto.h              |   9 +
 drivers/crypto/qat/qat_sym.c                 |   8 +-
 drivers/crypto/qat/qat_sym_session.c         | 106 +----
 drivers/crypto/qat/qat_sym_session.h         |   2 +-
 9 files changed, 832 insertions(+), 101 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index b4ec440e05..72609703f9 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -166,6 +166,90 @@ qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id,
 	return 0;
 }
 
+void
+qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
+		uint8_t hash_flag)
+{
+	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
+	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
+			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
+			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
+
+	/* Set the Use Extended Protocol Flags bit in LW 1 */
+	QAT_FIELD_SET(header->comn_req_flags,
+			QAT_COMN_EXT_FLAGS_USED,
+			QAT_COMN_EXT_FLAGS_BITPOS,
+			QAT_COMN_EXT_FLAGS_MASK);
+
+	/* Set Hash Flags in LW 28 */
+	cd_ctrl->hash_flags |= hash_flag;
+
+	/* Set proto flags in LW 1 */
+	switch (session->qat_cipher_alg) {
+	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_SNOW_3G_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags,
+				ICP_QAT_FW_LA_ZUC_3G_PROTO);
+		break;
+	default:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	}
+}
+
+static int
+qat_sym_crypto_set_session_gen2(void *cdev, void *session)
+{
+	struct rte_cryptodev *dev = cdev;
+	struct qat_sym_session *ctx = session;
+	const struct qat_cryptodev_private *qat_private =
+			dev->data->dev_private;
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	if (ret == 0 || ret != -ENOTSUP)
+		return ret;
+
+	/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+	 * but some are not supported by GEN2, so checking here
+	 */
+	if ((qat_private->internal_capabilities &
+			QAT_SYM_CAP_MIXED_CRYPTO) == 0)
+		return -ENOTSUP;
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+	}
+
+	return 0;
+}
+
 struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = {
 
 	/* Device related operations */
@@ -204,6 +288,10 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 	qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities =
 			qat_sym_crypto_cap_get_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_sym_crypto_set_session_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 
@@ -221,4 +309,6 @@ RTE_INIT(qat_asym_crypto_gen2_init)
 			qat_asym_crypto_cap_get_gen1;
 	qat_asym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_asym_crypto_feature_flags_get_gen1;
+	qat_asym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_asym_crypto_set_session_gen1;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index d3336cf4a1..6494019050 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -143,6 +143,468 @@ qat_sym_crypto_cap_get_gen3(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass) {
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN3 uses single pass to treat AEAD as
+		 * cipher operation
+		 */
+		cipher_param = (void *)&req->serv_specif_rqpars;
+
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+				ofs.ofs.cipher.tail;
+
+		cipher_param->spc_aad_addr = aad->iova;
+		cipher_param->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
+	struct qat_sym_op_cookie *cookie,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	uint32_t ver_key_offset;
+	uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+
+	if (!ctx->is_single_pass_gmac ||
+			(auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
+		enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+				data_len);
+		return;
+	}
+
+	cipher_cd_ctrl = (void *) &req->cd_ctrl;
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
+			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
+			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
+			sizeof(struct icp_qat_hw_cipher_config);
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+		/* AES-GMAC */
+		qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
+				req);
+	}
+
+	/* Fill separate Content Descriptor for this op */
+	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
+			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+				ctx->cd.cipher.key :
+				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
+			ctx->auth_key_length);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+				ICP_QAT_HW_CIPHER_AEAD_MODE,
+				ctx->qat_cipher_alg,
+				ICP_QAT_HW_CIPHER_NO_CONVERT,
+				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+					ICP_QAT_HW_CIPHER_ENCRYPT :
+					ICP_QAT_HW_CIPHER_DECRYPT));
+	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
+			ctx->digest_length,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
+
+	/* Update the request */
+	req->cd_pars.u.s.content_desc_addr =
+			cookie->opt.spc_gmac.cd_phys_addr;
+	req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
+			sizeof(struct icp_qat_hw_cipher_config) +
+			ctx->auth_key_length, 8) >> 3;
+	req->comn_mid.src_length = data_len;
+	req->comn_mid.dst_length = 0;
+
+	cipher_param->spc_aad_addr = 0;
+	cipher_param->spc_auth_res_addr = digest->iova;
+	cipher_param->spc_aad_sz = auth_data_len;
+	cipher_param->reserved = 0;
+	cipher_param->spc_auth_res_sz = ctx->digest_length;
+
+	req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+	cipher_cd_ctrl->cipher_cfg_offset = 0;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+	ICP_QAT_FW_LA_PROTO_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_NO_PROTO);
+}
+
+static int
+qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN3 */
+	if (ctx->is_single_pass)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
+	else if (ctx->is_single_pass_gmac)
+		ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
+
+	if (ret == 0)
+		return ret;
+
+	/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+	 * this is addressed by GEN3
+	 */
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+	}
+
+	return 0;
+}
+
+static int
+qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen3(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen3(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv __rte_unused,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, digest, auth_iv, ofs,
+			(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+		enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
+			&vec->auth_iv[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen3(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen3;
+	} else if (ctx->is_single_pass_gmac) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen3;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen3;
+	}
+
+	return 0;
+}
+
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -150,6 +612,10 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_cap_get_gen3;
 	qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
+			qat_sym_crypto_set_session_gen3;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
@@ -161,4 +627,5 @@ RTE_INIT(qat_asym_crypto_gen3_init)
 	qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 37a58c026f..167c95abcf 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -103,11 +103,253 @@ qat_sym_crypto_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
+			(void *)&req->serv_specif_rqpars;
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN4 uses single pass to treat AEAD as cipher
+		 * operation
+		 */
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length,
+				req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len -
+				ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+		cipher_param_20->spc_aad_addr = aad->iova;
+		cipher_param_20->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static int
+qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen4(void *cdev, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN4 */
+	if (ctx->is_single_pass && ctx->is_ucs)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4;
+	if (ret == 0)
+		return ret;
+
+	/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+	 * this is addressed by GEN4
+	 */
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+	}
+
+	return 0;
+}
+
+static int
+qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_op_cookie *cookie;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	cookie = qp->op_cookies[tail >> tx_queue->trailz];
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
+			data, n_data_vecs, NULL, 0);
+	if (unlikely(data_len < 0))
+		return -1;
+
+	enqueue_one_aead_job_gen4(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
+			NULL, aad, digest);
+#endif
+	return 0;
+}
+
+static uint32_t
+qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		struct qat_sym_op_cookie *cookie =
+			qp->op_cookies[tail >> tx_queue->trailz];
+
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
+			vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+		if (unlikely(data_len < 0))
+			break;
+
+		enqueue_one_aead_job_gen4(ctx, req, &vec->iv[i],
+				&vec->digest[i], &vec->aad[i], ofs,
+				(uint32_t)data_len);
+
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
+				vec->src_sgl[i].num, &vec->iv[i], NULL,
+				&vec->aad[i], &vec->digest[i]);
+#endif
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
+static int
+qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
+{
+	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
+	struct qat_sym_session *ctx = _ctx;
+	int ret;
+
+	ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
+	if (ret < 0)
+		return ret;
+
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4;
+		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen4;
+	}
+
+	return 0;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
 			qat_sym_crypto_cap_get_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
+			qat_sym_crypto_set_session_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
@@ -121,4 +363,5 @@ RTE_INIT(qat_asym_crypto_gen4_init)
 	qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN4].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index e1fd14956b..e32bdf1c4b 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -6,6 +6,7 @@
 #ifdef RTE_LIB_SECURITY
 #include <rte_security_driver.h>
 #endif
+#include <cryptodev_pmd.h>
 
 #include "adf_transport_access_macros.h"
 #include "icp_qat_fw.h"
@@ -154,7 +155,7 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
 };
 
 static struct qat_capabilities_info
-qat_sym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev __rte_unused)
+qat_sym_crypto_cap_get_gen1(struct qat_pci_device * qat_dev __rte_unused)
 {
 	struct qat_capabilities_info capa_info;
 	capa_info.data = qat_sym_crypto_caps_gen1;
@@ -1169,6 +1170,10 @@ RTE_INIT(qat_sym_crypto_gen1_init)
 	qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities =
 			qat_sym_crypto_cap_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
+			qat_sym_crypto_set_session_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_raw_dp_ctx =
+			qat_sym_configure_raw_dp_ctx_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 01d2439b93..6f17abd404 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -2,6 +2,7 @@
  * Copyright(c) 2021 Intel Corporation
  */
 
+#include <cryptodev_pmd.h>
 #include "qat_device.h"
 #include "qat_qp.h"
 #include "qat_crypto.h"
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 6eaa15b975..8fe3f0b061 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -48,15 +48,24 @@ typedef uint64_t (*get_feature_flags_t)(struct qat_pci_device *qat_dev);
 
 typedef void * (*create_security_ctx_t)(void *cryptodev);
 
+typedef int (*set_session_t)(void *cryptodev, void *session);
+
+typedef int (*set_raw_dp_ctx_t)(void *raw_dp_ctx, void *ctx);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
+	set_session_t set_session;
+	set_raw_dp_ctx_t set_raw_dp_ctx;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
 };
 
+extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
+extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[];
+
 int
 qat_cryptodev_config(struct rte_cryptodev *dev,
 		struct rte_cryptodev_config *config);
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index a92874cd27..de687de857 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -237,7 +237,7 @@ refactor_qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen)
+		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
 {
 	int ret = 0;
 	struct qat_sym_session *ctx = NULL;
@@ -302,12 +302,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		return -EINVAL;
 	}
 
-	if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
-		QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-		return -EINVAL;
-	}
-
 	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
 	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
 	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 8ca475ca8b..52837d7c9c 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -486,80 +486,6 @@ qat_sym_session_configure(struct rte_cryptodev *dev,
 	return 0;
 }
 
-static void
-qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
-		uint8_t hash_flag)
-{
-	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
-	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
-			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
-			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
-
-	/* Set the Use Extended Protocol Flags bit in LW 1 */
-	QAT_FIELD_SET(header->comn_req_flags,
-			QAT_COMN_EXT_FLAGS_USED,
-			QAT_COMN_EXT_FLAGS_BITPOS,
-			QAT_COMN_EXT_FLAGS_MASK);
-
-	/* Set Hash Flags in LW 28 */
-	cd_ctrl->hash_flags |= hash_flag;
-
-	/* Set proto flags in LW 1 */
-	switch (session->qat_cipher_alg) {
-	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_SNOW_3G_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_ZUC_3G_PROTO);
-		break;
-	default:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	}
-}
-
-static void
-qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
-		struct qat_sym_session *session)
-{
-	const struct qat_cryptodev_private *qat_private =
-			dev->data->dev_private;
-	enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
-			QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
-
-	if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
-	} else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
-	} else if ((session->aes_cmac ||
-			session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
-			(session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session, 0);
-	}
-}
-
 int
 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		struct rte_crypto_sym_xform *xform, void *session_private)
@@ -569,7 +495,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
 	int ret;
 	int qat_cmd_id;
-	int handle_mixed = 0;
 
 	/* Verify the session physical address is known */
 	rte_iova_t session_paddr = rte_mempool_virt2iova(session);
@@ -584,7 +509,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
+	session->dev_id = internals->dev_id;
 	session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
 	session->is_ucs = 0;
 
@@ -625,7 +550,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
@@ -643,7 +567,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
@@ -664,12 +587,9 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		return -ENOTSUP;
 	}
 	qat_sym_session_finalize(session);
-	if (handle_mixed) {
-		/* Special handling of mixed hash+cipher algorithms */
-		qat_sym_session_handle_mixed(dev, session);
-	}
 
-	return 0;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
+			(void *)session);
 }
 
 static int
@@ -678,7 +598,6 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
 {
 	session->is_single_pass = 1;
 	session->is_auth = 1;
-	session->min_qat_dev_gen = QAT_GEN3;
 	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
 	/* Chacha-Poly is special case that use QAT CTR mode */
 	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
@@ -1205,9 +1124,10 @@ static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
-static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
-			uint8_t *data_in,
-			uint8_t *data_out)
+static int
+partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+		uint8_t *data_in,
+		uint8_t *data_out)
 {
 	int digest_size;
 	uint8_t digest[qat_hash_get_digest_size(
@@ -1654,7 +1574,6 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 		cipher_cd_ctrl->cipher_state_sz =
 			ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 	} else {
 		total_key_size = cipherkeylen;
 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -2002,7 +1921,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
 		cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
 		auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_MD5:
@@ -2263,8 +2181,6 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
-
 	/* Get requested QAT command id - should be cipher */
 	qat_cmd_id = qat_get_cmd_id(xform);
 	if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
@@ -2289,6 +2205,9 @@ qat_security_session_create(void *dev,
 {
 	void *sess_private_data;
 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	struct qat_cryptodev_private *internals = cdev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_sym_session *sym_session = NULL;
 	int ret;
 
 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
@@ -2312,8 +2231,11 @@ qat_security_session_create(void *dev,
 	}
 
 	set_sec_session_private_data(sess, sess_private_data);
+	sym_session = (struct qat_sym_session *)sess_private_data;
+	sym_session->dev_id = internals->dev_id;
 
-	return ret;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev,
+			sess_private_data);
 }
 
 int
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 73493ec864..e1b5edb2f9 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -100,7 +100,7 @@ struct qat_sym_session {
 	uint16_t auth_key_length;
 	uint16_t digest_length;
 	rte_spinlock_t lock;	/* protects this struct */
-	enum qat_device_gen min_qat_dev_gen;
+	uint16_t dev_id;
 	uint8_t aes_cmac;
 	uint8_t is_single_pass;
 	uint8_t is_single_pass_gmac;
-- 
2.17.1


  parent reply	other threads:[~2021-10-26 17:26 UTC|newest]

Thread overview: 156+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-26 17:25 [dpdk-dev] [dpdk-dev v1 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 1/7] crypro/qat: qat driver refactor skeleton Kai Ji
2021-10-29 13:58   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 2/7] crypto/qat: qat driver sym op refactor Kai Ji
2021-10-29 14:26   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 3/7] crypto/qat: qat driver asym " Kai Ji
2021-10-29 14:36   ` Zhang, Roy Fan
2021-10-26 17:25 ` Kai Ji [this message]
2021-10-29 14:40   ` [dpdk-dev] [dpdk-dev v1 4/7] crypto/qat: qat driver session method rework Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 5/7] crypto/qat: qat driver datapath rework Kai Ji
2021-10-29 14:41   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 6/7] app/test: cryptodev test fix Kai Ji
2021-10-29 14:43   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 7/7] crypto/qat: qat driver rework clean up Kai Ji
2021-10-29 14:46   ` Zhang, Roy Fan
2021-11-01 23:12 ` [dpdk-dev] [dpdk-dev v2 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 1/7] crypro/qat: qat driver refactor skeleton Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 2/7] crypto/qat: qat driver sym op refactor Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 3/7] crypto/qat: qat driver asym " Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 4/7] crypto/qat: qat driver session method rework Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 5/7] crypto/qat: qat driver datapath rework Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 6/7] app/test: cryptodev test fix Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 7/7] crypto/qat: qat driver rework clean up Kai Ji
2021-11-02 13:49   ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 1/8] crypro/qat: qat driver refactor skeleton Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 2/8] crypto/qat: qat driver sym op refactor Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 3/8] crypto/qat: qat driver asym " Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 4/8] crypto/qat: qat driver session method rework Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 5/8] crypto/qat: qat driver datapath rework Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 6/8] crypto/qat: support sgl oop operation Kai Ji
2021-11-03 15:46       ` Zhang, Roy Fan
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 7/8] app/test: cryptodev test fix Kai Ji
2021-11-03 15:45       ` Zhang, Roy Fan
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 8/8] crypto/qat: qat driver rework clean up Kai Ji
2021-11-03 15:46     ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Zhang, Roy Fan
2021-11-03 18:49     ` [dpdk-dev] [EXT] " Akhil Goyal
2021-11-05  0:19     ` [dpdk-dev] [dpdk-dev v4 00/11] " Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 01/11] common/qat: define build op request and dequeue op Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 02/11] crypto/qat: sym build op request specific implementation Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 03/11] crypto/qat: rework session APIs Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 04/11] crypto/qat: asym build op request specific implementation Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 05/11] crypto/qat: unify sym pmd apis Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 06/11] crypto/qat: unify qat asym " Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 07/11] crypto/qat: op burst data path rework Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 08/11] compress/qat: comp dequeue burst update Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 09/11] crypto/qat: raw dp api integration Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 10/11] crypto/qat: support out of place SG list Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 11/11] test/cryptodev: fix incomplete data length Kai Ji
2022-01-28 18:23         ` [dpdk-dev v5 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 01/10] common/qat: define build op request and dequeue op Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 02/10] crypto/qat: sym build op request specific implementation Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 03/10] crypto/qat: qat generation specific enqueue Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 04/10] crypto/qat: rework session APIs Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 07/10] crypto/qat: unify qat asym " Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 08/10] crypto/qat: op burst data path rework Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 09/10] crypto/qat: raw dp api integration Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 10/10] crypto/qat: support out of place SG list Kai Ji
2022-02-04 18:50           ` [dpdk-dev v6 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 01/10] common/qat: define build op request and dequeue op Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 02/10] crypto/qat: sym build op request specific implementation Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 03/10] crypto/qat: qat generation specific enqueue Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 04/10] crypto/qat: rework session APIs Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 07/10] crypto/qat: unify qat asym " Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 08/10] crypto/qat: op burst data path rework Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 09/10] crypto/qat: raw dp api integration Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 10/10] crypto/qat: support out of place SG list Kai Ji
2022-02-08 18:14               ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-02-08 18:14                 ` [dpdk-dev v7 01/10] common/qat: define build op request and dequeue op Kai Ji
2022-02-09 10:22                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 02/10] crypto/qat: sym build op request specific implementation Kai Ji
2022-02-09 10:22                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 03/10] crypto/qat: qat generation specific enqueue Kai Ji
2022-02-09 10:23                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 04/10] crypto/qat: rework session APIs Kai Ji
2022-02-09 10:23                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
2022-02-09 10:23                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
2022-02-09 10:24                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 07/10] crypto/qat: unify qat asym " Kai Ji
2022-02-09 10:24                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 08/10] crypto/qat: op burst data path rework Kai Ji
2022-02-09 10:24                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 09/10] crypto/qat: raw dp api integration Kai Ji
2022-02-09 10:25                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 10/10] crypto/qat: support out of place SG list Kai Ji
2022-02-09 10:25                   ` Zhang, Roy Fan
2022-02-09 10:20                 ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Zhang, Roy Fan
2022-02-12 11:32                   ` Akhil Goyal
2022-02-17 16:28                 ` [dpdk-dev v8 " Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 01/10] common/qat: define build op request and dequeue op Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 02/10] crypto/qat: sym build op request specific implementation Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 03/10] crypto/qat: qat generation specific enqueue Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 04/10] crypto/qat: rework session APIs Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 07/10] crypto/qat: unify qat asym " Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 08/10] crypto/qat: op burst data path rework Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 09/10] crypto/qat: raw dp api integration Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 10/10] crypto/qat: support out of place SG list Kai Ji
2022-02-17 17:59                   ` [EXT] [dpdk-dev v8 00/10] drivers/qat: QAT symmetric crypto datapatch rework Akhil Goyal
2022-02-18 17:15                   ` [dpdk-dev v9 0/9] " Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 1/9] common/qat: define build request and dequeue ops Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 2/9] crypto/qat: support symmetric build op request Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 3/9] crypto/qat: rework session functions Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 5/9] crypto/qat: unify symmetric functions Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 6/9] crypto/qat: unify asymmetric functions Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 7/9] crypto/qat: rework burst data path Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 8/9] crypto/qat: unify raw data path functions Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 9/9] crypto/qat: support out of place SG list Kai Ji
2022-02-22 17:02                     ` [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-02-22 17:02                       ` [dpdk-dev v10 1/9] common/qat: define build request and dequeue ops Kai Ji
2022-02-22 18:52                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 2/9] crypto/qat: support symmetric build op request Kai Ji
2022-02-22 18:52                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 3/9] crypto/qat: rework session functions Kai Ji
2022-02-22 18:53                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
2022-02-22 18:53                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 5/9] crypto/qat: unify symmetric functions Kai Ji
2022-02-22 18:53                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 6/9] crypto/qat: unify asymmetric functions Kai Ji
2022-02-22 18:52                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 7/9] crypto/qat: rework burst data path Kai Ji
2022-02-22 18:54                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 8/9] crypto/qat: unify raw data path functions Kai Ji
2022-02-22 18:54                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 9/9] crypto/qat: support out of place SG list Kai Ji
2022-02-22 18:55                         ` Zhang, Roy Fan
2022-02-22 18:23                       ` [EXT] [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Akhil Goyal
2022-02-22 20:30                       ` [PATCH v11 " Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 1/9] common/qat: define build request and dequeue ops Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 2/9] crypto/qat: support symmetric build op request Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 3/9] crypto/qat: rework session functions Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 4/9] crypto/qat: rework asymmetric op build operation Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 5/9] crypto/qat: unify symmetric functions Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 6/9] crypto/qat: unify asymmetric functions Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 7/9] crypto/qat: rework burst data path Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 8/9] crypto/qat: unify raw data path functions Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 9/9] crypto/qat: support out of place SG list Fan Zhang
2022-02-23  0:49                         ` [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-02-23  0:49                           ` [dpdk-dev v12 1/9] common/qat: define build request and dequeue ops Kai Ji
2022-02-23  0:49                           ` [dpdk-dev v12 2/9] crypto/qat: support symmetric build op request Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 3/9] crypto/qat: rework session functions Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 5/9] crypto/qat: unify symmetric functions Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 6/9] crypto/qat: unify asymmetric functions Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 7/9] crypto/qat: rework burst data path Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 8/9] crypto/qat: unify raw data path functions Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 9/9] crypto/qat: support out of place SG list Kai Ji
2022-02-23  9:18                           ` [EXT] [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Akhil Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211026172518.20183-5-kai.ji@intel.com \
    --to=kai.ji@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.