All of lore.kernel.org
 help / color / mirror / Atom feed
From: Kai Ji <kai.ji@intel.com>
To: dev@dpdk.org
Cc: gakhil@marvell.com, roy.fan.zhang@intel.com, Kai Ji <kai.ji@intel.com>
Subject: [dpdk-dev v9 2/9] crypto/qat: support symmetric build op request
Date: Sat, 19 Feb 2022 01:15:20 +0800	[thread overview]
Message-ID: <20220218171527.56719-3-kai.ji@intel.com> (raw)
In-Reply-To: <20220218171527.56719-1-kai.ji@intel.com>

This patch adds common inline functions for QAT symmetric
crypto driver to process crypto op, and the implementation of
build op request function for QAT generation 1.

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 832 ++++++++++++++++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 187 ++++-
 drivers/crypto/qat/qat_sym.c                 |  90 +-
 3 files changed, 1019 insertions(+), 90 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 67a4d2cb2c..1130e0e76f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #ifndef _QAT_CRYPTO_PMD_GENS_H_
@@ -8,14 +8,844 @@
 #include <rte_cryptodev.h>
 #include "qat_crypto.h"
 #include "qat_sym_session.h"
+#include "qat_sym.h"
+
+#define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
+	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
+
+#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
+	(ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
+	ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
+
+static __rte_always_inline int
+op_bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+		uint8_t *iv, int ivlen, int srclen,
+		void *bpi_ctx)
+{
+	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+	int encrypted_ivlen;
+	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+	uint8_t *encr = encrypted_iv;
+
+	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+								<= 0)
+		goto cipher_decrypt_err;
+
+	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+		*dst = *src ^ *encr;
+
+	return 0;
+
+cipher_decrypt_err:
+	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
+	return -EINVAL;
+}
+
+static __rte_always_inline uint32_t
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
+				struct rte_crypto_op *op)
+{
+	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint8_t last_block_len = block_len > 0 ?
+			sym_op->cipher.data.length % block_len : 0;
+
+	if (last_block_len && ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+		/* Decrypt last block */
+		uint8_t *last_block, *dst, *iv;
+		uint32_t last_block_offset = sym_op->cipher.data.offset +
+				sym_op->cipher.data.length - last_block_len;
+		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+				uint8_t *, last_block_offset);
+
+		if (unlikely((sym_op->m_dst != NULL)
+				&& (sym_op->m_dst != sym_op->m_src)))
+			/* out-of-place operation (OOP) */
+			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+						uint8_t *, last_block_offset);
+		else
+			dst = last_block;
+
+		if (last_block_len < sym_op->cipher.data.length)
+			/* use previous block ciphertext as IV */
+			iv = last_block - block_len;
+		else
+			/* runt block, i.e. less than one full block */
+			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+					ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst before pre-process:",
+			dst, last_block_len);
+#endif
+		op_bpi_cipher_decrypt(last_block, dst, iv, block_len,
+				last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
+			last_block, last_block_len);
+		if (sym_op->m_dst != NULL)
+			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
+			dst, last_block_len);
+#endif
+	}
+
+	return sym_op->cipher.data.length - last_block_len;
+}
+
+static __rte_always_inline int
+qat_auth_is_len_in_bits(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+		if (unlikely((op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+				(op->sym->auth.data.length % BYTE_LENGTH != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int
+qat_cipher_is_len_in_bits(struct qat_sym_session *ctx,
+		struct rte_crypto_op *op)
+{
+	if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+		ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		if (unlikely((op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+			((op->sym->cipher.data.offset %
+			BYTE_LENGTH) != 0)))
+			return -EINVAL;
+		return 1;
+	}
+	return 0;
+}
+
+static __rte_always_inline int32_t
+qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,
+		void *opaque, struct qat_sym_op_cookie *cookie,
+		struct rte_crypto_vec *src_vec, uint16_t n_src,
+		struct rte_crypto_vec *dst_vec, uint16_t n_dst)
+{
+	struct qat_sgl *list;
+	uint32_t i;
+	uint32_t tl_src = 0, total_len_src, total_len_dst;
+	uint64_t src_data_start = 0, dst_data_start = 0;
+	int is_sgl = n_src > 1 || n_dst > 1;
+
+	if (unlikely(n_src < 1 || n_src > QAT_SYM_SGL_MAX_NUMBER ||
+			n_dst > QAT_SYM_SGL_MAX_NUMBER))
+		return -1;
+
+	if (likely(!is_sgl)) {
+		src_data_start = src_vec[0].iova;
+		tl_src = total_len_src =
+				src_vec[0].len;
+		if (unlikely(n_dst)) { /* oop */
+			total_len_dst = dst_vec[0].len;
+
+			dst_data_start = dst_vec[0].iova;
+			if (unlikely(total_len_src != total_len_dst))
+				return -EINVAL;
+		} else {
+			dst_data_start = src_data_start;
+			total_len_dst = tl_src;
+		}
+	} else { /* sgl */
+		total_len_dst = total_len_src = 0;
+
+		ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+			QAT_COMN_PTR_TYPE_SGL);
+
+		list = (struct qat_sgl *)&cookie->qat_sgl_src;
+		for (i = 0; i < n_src; i++) {
+			list->buffers[i].len = src_vec[i].len;
+			list->buffers[i].resrvd = 0;
+			list->buffers[i].addr = src_vec[i].iova;
+			if (tl_src + src_vec[i].len > UINT32_MAX) {
+				QAT_DP_LOG(ERR, "Message too long");
+				return -1;
+			}
+			tl_src += src_vec[i].len;
+		}
+
+		list->num_bufs = i;
+		src_data_start = cookie->qat_sgl_src_phys_addr;
+
+		if (unlikely(n_dst > 0)) { /* oop sgl */
+			uint32_t tl_dst = 0;
+
+			list = (struct qat_sgl *)&cookie->qat_sgl_dst;
+
+			for (i = 0; i < n_dst; i++) {
+				list->buffers[i].len = dst_vec[i].len;
+				list->buffers[i].resrvd = 0;
+				list->buffers[i].addr = dst_vec[i].iova;
+				if (tl_dst + dst_vec[i].len > UINT32_MAX) {
+					QAT_DP_LOG(ERR, "Message too long");
+					return -ENOTSUP;
+				}
+
+				tl_dst += dst_vec[i].len;
+			}
+
+			if (tl_src != tl_dst)
+				return -EINVAL;
+			list->num_bufs = i;
+			dst_data_start = cookie->qat_sgl_dst_phys_addr;
+		} else
+			dst_data_start = src_data_start;
+	}
+
+	req->comn_mid.src_data_addr = src_data_start;
+	req->comn_mid.dest_data_addr = dst_data_start;
+	req->comn_mid.src_length = total_len_src;
+	req->comn_mid.dst_length = total_len_dst;
+	req->comn_mid.opaque_data = (uintptr_t)opaque;
+
+	return tl_src;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad __rte_unused,
+		struct rte_crypto_va_iova_ptr *digest __rte_unused)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int n_src = 0;
+	int ret;
+
+	ret = qat_cipher_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->cipher.data.length >> 3;
+		cipher_ofs = op->sym->cipher.data.offset >> 3;
+		break;
+	case 0:
+		if (ctx->bpi_ctx) {
+			/* DOCSIS - only send complete blocks to device.
+			 * Process any partial block using CFB mode.
+			 * Even if 0 complete blocks, still send this to device
+			 * to get into rx queue for post-process and dequeuing
+			 */
+			cipher_len = qat_bpicipher_preprocess(ctx, op);
+			cipher_ofs = op->sym->cipher.data.offset;
+		} else {
+			cipher_len = op->sym->cipher.data.length;
+			cipher_ofs = op->sym->cipher.data.offset;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	  "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs,
+			cipher_len, in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv __rte_unused,
+		struct rte_crypto_va_iova_ptr *auth_iv,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t auth_ofs = 0, auth_len = 0;
+	int n_src, ret;
+
+	ret = qat_auth_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+				ctx->auth_iv.offset);
+		auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+				ctx->auth_iv.offset);
+		break;
+	case 0:
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+			/* AES-GMAC */
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = rte_crypto_op_ctod_offset(op, void *,
+					ctx->auth_iv.offset);
+			auth_iv->iova = rte_crypto_op_ctophys_offset(op,
+					ctx->auth_iv.offset);
+		} else {
+			auth_ofs = op->sym->auth.data.offset;
+			auth_len = op->sym->auth.data.length;
+			auth_iv->va = NULL;
+			auth_iv->iova = 0;
+		}
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return UINT64_MAX;
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, auth_ofs,
+			auth_ofs + auth_len, in_sgl->vec,
+			QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, auth_ofs,
+				auth_ofs + auth_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+
+		if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	return 0;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	union rte_crypto_sym_ofs ofs;
+	uint32_t min_ofs = 0, max_len = 0;
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	uint32_t auth_len = 0, auth_ofs = 0;
+	int is_oop = (op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src);
+	int is_sgl = op->sym->m_src->nb_segs > 1;
+	int n_src;
+	int ret;
+
+	if (unlikely(is_oop))
+		is_sgl |= op->sym->m_dst->nb_segs > 1;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->auth_iv.offset);
+	auth_iv_or_aad->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->auth_iv.offset);
+	digest->va = (void *)op->sym->auth.digest.data;
+	digest->iova = op->sym->auth.digest.phys_addr;
+
+	ret = qat_cipher_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		cipher_len = op->sym->aead.data.length >> 3;
+		cipher_ofs = op->sym->aead.data.offset >> 3;
+		break;
+	case 0:
+		cipher_len = op->sym->aead.data.length;
+		cipher_ofs = op->sym->aead.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	ret = qat_auth_is_len_in_bits(ctx, op);
+	switch (ret) {
+	case 1:
+		auth_len = op->sym->auth.data.length >> 3;
+		auth_ofs = op->sym->auth.data.offset >> 3;
+		break;
+	case 0:
+		auth_len = op->sym->auth.data.length;
+		auth_ofs = op->sym->auth.data.offset;
+		break;
+	default:
+		QAT_DP_LOG(ERR,
+	"For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+	max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len);
+
+	/* digest in buffer check. Needed only for wireless algos */
+	if (ret == 1) {
+		/* Handle digest-encrypted cases, i.e.
+		 * auth-gen-then-cipher-encrypt and
+		 * cipher-decrypt-then-auth-verify
+		 */
+		uint64_t auth_end_iova;
+
+		if (unlikely(is_sgl)) {
+			uint32_t remaining_off = auth_ofs + auth_len;
+			struct rte_mbuf *sgl_buf = (is_oop ? op->sym->m_dst :
+				op->sym->m_src);
+
+			while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+					&& sgl_buf->next != NULL) {
+				remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+				sgl_buf = sgl_buf->next;
+			}
+
+			auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(
+				sgl_buf, remaining_off);
+		} else
+			auth_end_iova = (is_oop ?
+				rte_pktmbuf_iova(op->sym->m_dst) :
+				rte_pktmbuf_iova(op->sym->m_src)) + auth_ofs +
+					auth_len;
+
+		/* Then check if digest-encrypted conditions are met */
+		if ((auth_ofs + auth_len < cipher_ofs + cipher_len) &&
+				(digest->iova == auth_end_iova))
+			max_len = RTE_MAX(max_len, auth_ofs + auth_len +
+					ctx->digest_length);
+	}
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, min_ofs, max_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return -1;
+	}
+	in_sgl->num = n_src;
+
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, min_ofs,
+				max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return -1;
+		}
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	ofs.ofs.cipher.head = cipher_ofs;
+	ofs.ofs.cipher.tail = max_len - cipher_ofs - cipher_len;
+	ofs.ofs.auth.head = auth_ofs;
+	ofs.ofs.auth.tail = max_len - auth_ofs - auth_len;
+
+	return ofs.raw;
+}
+
+static __rte_always_inline uint64_t
+qat_sym_convert_op_to_vec_aead(struct rte_crypto_op *op,
+		struct qat_sym_session *ctx,
+		struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl,
+		struct rte_crypto_va_iova_ptr *cipher_iv,
+		struct rte_crypto_va_iova_ptr *auth_iv_or_aad,
+		struct rte_crypto_va_iova_ptr *digest)
+{
+	uint32_t cipher_len = 0, cipher_ofs = 0;
+	int32_t n_src = 0;
+
+	cipher_iv->va = rte_crypto_op_ctod_offset(op, void *,
+			ctx->cipher_iv.offset);
+	cipher_iv->iova = rte_crypto_op_ctophys_offset(op,
+			ctx->cipher_iv.offset);
+	auth_iv_or_aad->va = (void *)op->sym->aead.aad.data;
+	auth_iv_or_aad->iova = op->sym->aead.aad.phys_addr;
+	digest->va = (void *)op->sym->aead.digest.data;
+	digest->iova = op->sym->aead.digest.phys_addr;
+
+	cipher_len = op->sym->aead.data.length;
+	cipher_ofs = op->sym->aead.data.offset;
+
+	n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, cipher_ofs, cipher_len,
+			in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER);
+	if (n_src < 0 || n_src > op->sym->m_src->nb_segs) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return UINT64_MAX;
+	}
+	in_sgl->num = n_src;
+
+	/* Out-Of-Place operation */
+	if (unlikely((op->sym->m_dst != NULL) &&
+			(op->sym->m_dst != op->sym->m_src))) {
+		int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, cipher_ofs,
+				cipher_len, out_sgl->vec,
+				QAT_SYM_SGL_MAX_NUMBER);
+		if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) {
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			return UINT64_MAX;
+		}
+
+		out_sgl->num = n_dst;
+	} else
+		out_sgl->num = 0;
+
+	return 0;
+}
+
+static __rte_always_inline void
+qat_set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+		struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
+		struct icp_qat_fw_la_bulk_req *qat_req)
+{
+	/* copy IV into request if it fits */
+	if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv_ptr->va,
+				iv_len);
+	else {
+		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+				qat_req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+		cipher_param->u.s.cipher_IV_ptr = iv_ptr->iova;
+	}
+}
+
+static __rte_always_inline void
+qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
+{
+	uint32_t i;
+
+	for (i = 0; i < n; i++)
+		sta[i] = status;
+}
+
+static __rte_always_inline void
+enqueue_one_cipher_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+
+	/* cipher IV */
+	qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, auth_iv->va,
+				ctx->auth_iv.length);
+		break;
+	default:
+		break;
+	}
+}
+
+static __rte_always_inline int
+enqueue_one_chain_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_vec *src_vec,
+	uint16_t n_src_vecs,
+	struct rte_crypto_vec *dst_vec,
+	uint16_t n_dst_vecs,
+	struct rte_crypto_va_iova_ptr *cipher_iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	struct rte_crypto_vec *cvec = n_dst_vecs > 0 ?
+			dst_vec : src_vec;
+	rte_iova_t auth_iova_end;
+	int cipher_len, auth_len;
+	int is_sgl = n_src_vecs > 1 || n_dst_vecs > 1;
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+	cipher_len = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
+
+	if (unlikely(cipher_len < 0 || auth_len < 0))
+		return -1;
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = cipher_len;
+	qat_set_cipher_iv(cipher_param, cipher_iv, ctx->cipher_iv.length, req);
+
+	auth_param->auth_off = ofs.ofs.auth.head;
+	auth_param->auth_len = auth_len;
+	auth_param->auth_res_addr = digest->iova;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+	case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+		auth_param->u1.aad_adr = auth_iv->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		break;
+	default:
+		break;
+	}
+
+	if (unlikely(is_sgl)) {
+		/* sgl */
+		int i = n_dst_vecs ? n_dst_vecs : n_src_vecs;
+		uint32_t remaining_off = data_len - ofs.ofs.auth.tail;
+
+		while (remaining_off >= cvec->len && i >= 1) {
+			i--;
+			remaining_off -= cvec->len;
+			cvec++;
+		}
+
+		auth_iova_end = cvec->iova + remaining_off;
+	} else
+		auth_iova_end = cvec[0].iova + auth_param->auth_off +
+			auth_param->auth_len;
+
+	/* Then check if digest-encrypted conditions are met */
+	if ((auth_param->auth_off + auth_param->auth_len <
+		cipher_param->cipher_offset + cipher_param->cipher_length) &&
+			(digest->iova == auth_iova_end)) {
+		/* Handle partial digest encryption */
+		if (cipher_param->cipher_offset + cipher_param->cipher_length <
+			auth_param->auth_off + auth_param->auth_len +
+				ctx->digest_length && !is_sgl)
+			req->comn_mid.dst_length = req->comn_mid.src_length =
+				auth_param->auth_off + auth_param->auth_len +
+					ctx->digest_length;
+		struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+		ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+			ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+	}
+
+	return 0;
+}
+
+static __rte_always_inline void
+enqueue_one_aead_job_gen1(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param =
+		(void *)&req->serv_specif_rqpars;
+	struct icp_qat_fw_la_auth_req_params *auth_param =
+		(void *)((uint8_t *)&req->serv_specif_rqpars +
+		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+	uint8_t *aad_data;
+	uint8_t aad_ccm_real_len;
+	uint8_t aad_len_field_sz;
+	uint32_t msg_len_be;
+	rte_iova_t aad_iova = 0;
+	uint8_t q;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
+				ctx->cipher_iv.length);
+		aad_iova = aad->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+		aad_data = aad->va;
+		aad_iova = aad->iova;
+		aad_ccm_real_len = 0;
+		aad_len_field_sz = 0;
+		msg_len_be = rte_bswap32((uint32_t)data_len -
+				ofs.ofs.cipher.head);
+
+		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+			aad_ccm_real_len = ctx->aad_len -
+				ICP_QAT_HW_CCM_AAD_B0_LEN -
+				ICP_QAT_HW_CCM_AAD_LEN_INFO;
+		} else {
+			aad_data = iv->va;
+			aad_iova = iv->iova;
+		}
+
+		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
+			aad_len_field_sz, ctx->digest_length, q);
+		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+				(uint8_t *)&msg_len_be,
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+		} else {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+				(uint8_t *)&msg_len_be +
+				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+				- q), q);
+		}
+
+		if (aad_len_field_sz > 0) {
+			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+				rte_bswap16(aad_ccm_real_len);
+
+			if ((aad_ccm_real_len + aad_len_field_sz)
+				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
+				uint8_t pad_len = 0;
+				uint8_t pad_idx = 0;
+
+				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+					((aad_ccm_real_len +
+					aad_len_field_sz) %
+					ICP_QAT_HW_CCM_AAD_B0_LEN);
+				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+					aad_ccm_real_len +
+					aad_len_field_sz;
+				memset(&aad_data[pad_idx], 0, pad_len);
+			}
+		}
+
+		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va +
+			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
+		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+		rte_memcpy((uint8_t *)aad->va +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
+			ctx->cipher_iv.length);
+		break;
+	default:
+		break;
+	}
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_param->auth_off = ofs.ofs.cipher.head;
+	auth_param->auth_len = cipher_param->cipher_length;
+	auth_param->auth_res_addr = digest->iova;
+	auth_param->u1.aad_adr = aad_iova;
+}
 
 extern struct rte_cryptodev_ops qat_sym_crypto_ops_gen1;
 extern struct rte_cryptodev_ops qat_asym_crypto_ops_gen1;
 
+/* -----------------GEN 1 sym crypto op data path APIs ---------------- */
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+	uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie);
+
 /* -----------------GENx control path APIs ---------------- */
 uint64_t
 qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
 
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev, void *session);
+
 void
 qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
 		uint8_t hash_flag);
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 90b3ec803c..c429825a67 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017-2021 Intel Corporation
+ * Copyright(c) 2017-2022 Intel Corporation
  */
 
 #include <rte_cryptodev.h>
@@ -179,6 +179,191 @@ qat_sym_crypto_feature_flags_get_gen1(
 	return feature_flags;
 }
 
+int
+qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_cipher(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, NULL, NULL);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, NULL, NULL);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen1(ctx, req, &digest, &auth_iv, ofs,
+			total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+int
+qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl = {0}, out_sgl = {0};
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_chain(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num,
+			out_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			&auth_iv, &digest);
+#endif
+
+	return 0;
+}
+
 #ifdef RTE_LIB_SECURITY
 
 #define QAT_SECURITY_SYM_CAPABILITIES					\
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 00ec703754..f814bf8f75 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2019 Intel Corporation
+ * Copyright(c) 2015-2022 Intel Corporation
  */
 
 #include <openssl/evp.h>
@@ -11,93 +11,7 @@
 #include <rte_byteorder.h>
 
 #include "qat_sym.h"
-
-
-/** Decrypt a single partial block
- *  Depends on openssl libcrypto
- *  Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
-		uint8_t *iv, int ivlen, int srclen,
-		void *bpi_ctx)
-{
-	EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
-	int encrypted_ivlen;
-	uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
-	uint8_t *encr = encrypted_iv;
-
-	/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
-	if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
-								<= 0)
-		goto cipher_decrypt_err;
-
-	for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
-		*dst = *src ^ *encr;
-
-	return 0;
-
-cipher_decrypt_err:
-	QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
-	return -EINVAL;
-}
-
-
-static inline uint32_t
-qat_bpicipher_preprocess(struct qat_sym_session *ctx,
-				struct rte_crypto_op *op)
-{
-	int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	uint8_t last_block_len = block_len > 0 ?
-			sym_op->cipher.data.length % block_len : 0;
-
-	if (last_block_len &&
-			ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
-
-		/* Decrypt last block */
-		uint8_t *last_block, *dst, *iv;
-		uint32_t last_block_offset = sym_op->cipher.data.offset +
-				sym_op->cipher.data.length - last_block_len;
-		last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
-				uint8_t *, last_block_offset);
-
-		if (unlikely((sym_op->m_dst != NULL)
-				&& (sym_op->m_dst != sym_op->m_src)))
-			/* out-of-place operation (OOP) */
-			dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
-						uint8_t *, last_block_offset);
-		else
-			dst = last_block;
-
-		if (last_block_len < sym_op->cipher.data.length)
-			/* use previous block ciphertext as IV */
-			iv = last_block - block_len;
-		else
-			/* runt block, i.e. less than one full block */
-			iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-					ctx->cipher_iv.offset);
-
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
-			dst, last_block_len);
-#endif
-		bpi_cipher_decrypt(last_block, dst, iv, block_len,
-				last_block_len, ctx->bpi_ctx);
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
-			last_block, last_block_len);
-		if (sym_op->m_dst != NULL)
-			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
-			dst, last_block_len);
-#endif
-	}
-
-	return sym_op->cipher.data.length - last_block_len;
-}
+#include "dev/qat_crypto_pmd_gens.h"
 
 static inline void
 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
-- 
2.17.1


  parent reply	other threads:[~2022-02-18 17:16 UTC|newest]

Thread overview: 156+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-26 17:25 [dpdk-dev] [dpdk-dev v1 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 1/7] crypro/qat: qat driver refactor skeleton Kai Ji
2021-10-29 13:58   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 2/7] crypto/qat: qat driver sym op refactor Kai Ji
2021-10-29 14:26   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 3/7] crypto/qat: qat driver asym " Kai Ji
2021-10-29 14:36   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 4/7] crypto/qat: qat driver session method rework Kai Ji
2021-10-29 14:40   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 5/7] crypto/qat: qat driver datapath rework Kai Ji
2021-10-29 14:41   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 6/7] app/test: cryptodev test fix Kai Ji
2021-10-29 14:43   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 7/7] crypto/qat: qat driver rework clean up Kai Ji
2021-10-29 14:46   ` Zhang, Roy Fan
2021-11-01 23:12 ` [dpdk-dev] [dpdk-dev v2 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 1/7] crypro/qat: qat driver refactor skeleton Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 2/7] crypto/qat: qat driver sym op refactor Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 3/7] crypto/qat: qat driver asym " Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 4/7] crypto/qat: qat driver session method rework Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 5/7] crypto/qat: qat driver datapath rework Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 6/7] app/test: cryptodev test fix Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 7/7] crypto/qat: qat driver rework clean up Kai Ji
2021-11-02 13:49   ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 1/8] crypro/qat: qat driver refactor skeleton Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 2/8] crypto/qat: qat driver sym op refactor Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 3/8] crypto/qat: qat driver asym " Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 4/8] crypto/qat: qat driver session method rework Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 5/8] crypto/qat: qat driver datapath rework Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 6/8] crypto/qat: support sgl oop operation Kai Ji
2021-11-03 15:46       ` Zhang, Roy Fan
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 7/8] app/test: cryptodev test fix Kai Ji
2021-11-03 15:45       ` Zhang, Roy Fan
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 8/8] crypto/qat: qat driver rework clean up Kai Ji
2021-11-03 15:46     ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Zhang, Roy Fan
2021-11-03 18:49     ` [dpdk-dev] [EXT] " Akhil Goyal
2021-11-05  0:19     ` [dpdk-dev] [dpdk-dev v4 00/11] " Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 01/11] common/qat: define build op request and dequeue op Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 02/11] crypto/qat: sym build op request specific implementation Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 03/11] crypto/qat: rework session APIs Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 04/11] crypto/qat: asym build op request specific implementation Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 05/11] crypto/qat: unify sym pmd apis Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 06/11] crypto/qat: unify qat asym " Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 07/11] crypto/qat: op burst data path rework Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 08/11] compress/qat: comp dequeue burst update Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 09/11] crypto/qat: raw dp api integration Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 10/11] crypto/qat: support out of place SG list Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 11/11] test/cryptodev: fix incomplete data length Kai Ji
2022-01-28 18:23         ` [dpdk-dev v5 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 01/10] common/qat: define build op request and dequeue op Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 02/10] crypto/qat: sym build op request specific implementation Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 03/10] crypto/qat: qat generation specific enqueue Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 04/10] crypto/qat: rework session APIs Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 07/10] crypto/qat: unify qat asym " Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 08/10] crypto/qat: op burst data path rework Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 09/10] crypto/qat: raw dp api integration Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 10/10] crypto/qat: support out of place SG list Kai Ji
2022-02-04 18:50           ` [dpdk-dev v6 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 01/10] common/qat: define build op request and dequeue op Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 02/10] crypto/qat: sym build op request specific implementation Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 03/10] crypto/qat: qat generation specific enqueue Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 04/10] crypto/qat: rework session APIs Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 07/10] crypto/qat: unify qat asym " Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 08/10] crypto/qat: op burst data path rework Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 09/10] crypto/qat: raw dp api integration Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 10/10] crypto/qat: support out of place SG list Kai Ji
2022-02-08 18:14               ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-02-08 18:14                 ` [dpdk-dev v7 01/10] common/qat: define build op request and dequeue op Kai Ji
2022-02-09 10:22                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 02/10] crypto/qat: sym build op request specific implementation Kai Ji
2022-02-09 10:22                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 03/10] crypto/qat: qat generation specific enqueue Kai Ji
2022-02-09 10:23                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 04/10] crypto/qat: rework session APIs Kai Ji
2022-02-09 10:23                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
2022-02-09 10:23                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
2022-02-09 10:24                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 07/10] crypto/qat: unify qat asym " Kai Ji
2022-02-09 10:24                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 08/10] crypto/qat: op burst data path rework Kai Ji
2022-02-09 10:24                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 09/10] crypto/qat: raw dp api integration Kai Ji
2022-02-09 10:25                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 10/10] crypto/qat: support out of place SG list Kai Ji
2022-02-09 10:25                   ` Zhang, Roy Fan
2022-02-09 10:20                 ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Zhang, Roy Fan
2022-02-12 11:32                   ` Akhil Goyal
2022-02-17 16:28                 ` [dpdk-dev v8 " Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 01/10] common/qat: define build op request and dequeue op Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 02/10] crypto/qat: sym build op request specific implementation Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 03/10] crypto/qat: qat generation specific enqueue Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 04/10] crypto/qat: rework session APIs Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 07/10] crypto/qat: unify qat asym " Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 08/10] crypto/qat: op burst data path rework Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 09/10] crypto/qat: raw dp api integration Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 10/10] crypto/qat: support out of place SG list Kai Ji
2022-02-17 17:59                   ` [EXT] [dpdk-dev v8 00/10] drivers/qat: QAT symmetric crypto datapatch rework Akhil Goyal
2022-02-18 17:15                   ` [dpdk-dev v9 0/9] " Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 1/9] common/qat: define build request and dequeue ops Kai Ji
2022-02-18 17:15                     ` Kai Ji [this message]
2022-02-18 17:15                     ` [dpdk-dev v9 3/9] crypto/qat: rework session functions Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 5/9] crypto/qat: unify symmetric functions Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 6/9] crypto/qat: unify asymmetric functions Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 7/9] crypto/qat: rework burst data path Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 8/9] crypto/qat: unify raw data path functions Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 9/9] crypto/qat: support out of place SG list Kai Ji
2022-02-22 17:02                     ` [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-02-22 17:02                       ` [dpdk-dev v10 1/9] common/qat: define build request and dequeue ops Kai Ji
2022-02-22 18:52                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 2/9] crypto/qat: support symmetric build op request Kai Ji
2022-02-22 18:52                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 3/9] crypto/qat: rework session functions Kai Ji
2022-02-22 18:53                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
2022-02-22 18:53                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 5/9] crypto/qat: unify symmetric functions Kai Ji
2022-02-22 18:53                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 6/9] crypto/qat: unify asymmetric functions Kai Ji
2022-02-22 18:52                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 7/9] crypto/qat: rework burst data path Kai Ji
2022-02-22 18:54                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 8/9] crypto/qat: unify raw data path functions Kai Ji
2022-02-22 18:54                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 9/9] crypto/qat: support out of place SG list Kai Ji
2022-02-22 18:55                         ` Zhang, Roy Fan
2022-02-22 18:23                       ` [EXT] [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Akhil Goyal
2022-02-22 20:30                       ` [PATCH v11 " Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 1/9] common/qat: define build request and dequeue ops Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 2/9] crypto/qat: support symmetric build op request Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 3/9] crypto/qat: rework session functions Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 4/9] crypto/qat: rework asymmetric op build operation Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 5/9] crypto/qat: unify symmetric functions Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 6/9] crypto/qat: unify asymmetric functions Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 7/9] crypto/qat: rework burst data path Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 8/9] crypto/qat: unify raw data path functions Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 9/9] crypto/qat: support out of place SG list Fan Zhang
2022-02-23  0:49                         ` [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-02-23  0:49                           ` [dpdk-dev v12 1/9] common/qat: define build request and dequeue ops Kai Ji
2022-02-23  0:49                           ` [dpdk-dev v12 2/9] crypto/qat: support symmetric build op request Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 3/9] crypto/qat: rework session functions Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 5/9] crypto/qat: unify symmetric functions Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 6/9] crypto/qat: unify asymmetric functions Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 7/9] crypto/qat: rework burst data path Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 8/9] crypto/qat: unify raw data path functions Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 9/9] crypto/qat: support out of place SG list Kai Ji
2022-02-23  9:18                           ` [EXT] [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Akhil Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220218171527.56719-3-kai.ji@intel.com \
    --to=kai.ji@intel.com \
    --cc=dev@dpdk.org \
    --cc=gakhil@marvell.com \
    --cc=roy.fan.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.