All of lore.kernel.org
 help / color / mirror / Atom feed
From: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
To: herbert@gondor.apana.org.au
Cc: linux-crypto@vger.kernel.org, qat-linux@intel.com,
	Marco Chiappero <marco.chiappero@intel.com>,
	Mateusz Polrola <mateuszx.potrola@intel.com>,
	Giovanni Cabiddu <giovanni.cabiddu@intel.com>,
	Andy Shevchenko <andriy.shevchenko@linux.intel.com>,
	Indrasena Reddy Gali <indrasena.reddygali@intel.com>
Subject: [PATCH 01/31] crypto: qat - update IV in software
Date: Mon, 12 Oct 2020 21:38:17 +0100	[thread overview]
Message-ID: <20201012203847.340030-2-giovanni.cabiddu@intel.com> (raw)
In-Reply-To: <20201012203847.340030-1-giovanni.cabiddu@intel.com>

From: Marco Chiappero <marco.chiappero@intel.com>

Do IV update calculations in software for AES-CBC and AES-CTR.

This allows to embed the IV on the request descriptor and removes the
allocation of the IV buffer in the data path.

In addition, this change allows the support of QAT devices that are not
capable of updating the IV buffer when performing an AES-CBC or AES-CTR
operation.

Signed-off-by: Marco Chiappero <marco.chiappero@intel.com>
Co-developed-by: Mateusz Polrola <mateuszx.potrola@intel.com>
Signed-off-by: Mateusz Polrola <mateuszx.potrola@intel.com>
Co-developed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Tested-by: Indrasena Reddy Gali <indrasena.reddygali@intel.com>
---
 drivers/crypto/qat/qat_common/qat_algs.c   | 136 ++++++++++++---------
 drivers/crypto/qat/qat_common/qat_crypto.h |  11 +-
 2 files changed, 89 insertions(+), 58 deletions(-)

diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index d552dbcfe0a0..a38afc61f6d2 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -11,6 +11,7 @@
 #include <crypto/hmac.h>
 #include <crypto/algapi.h>
 #include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
 #include <crypto/xts.h>
 #include <linux/dma-mapping.h>
 #include "adf_accel_devices.h"
@@ -90,6 +91,7 @@ struct qat_alg_skcipher_ctx {
 	struct qat_crypto_instance *inst;
 	struct crypto_skcipher *ftfm;
 	bool fallback;
+	int mode;
 };
 
 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
@@ -214,24 +216,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
 	return 0;
 }
 
-static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
-{
-	ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
-					   ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-	ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
-				       ICP_QAT_FW_LA_UPDATE_STATE);
-}
-
-static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
-{
-	ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
-					   ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
-	ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
-				       ICP_QAT_FW_LA_NO_UPDATE_STATE);
-}
-
-static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
-				    int aead)
+static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
 {
 	header->hdr_flags =
 		ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
@@ -241,12 +226,12 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
 					    QAT_COMN_PTR_TYPE_SGL);
 	ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
 				  ICP_QAT_FW_LA_PARTIAL_NONE);
-	if (aead)
-		qat_alg_init_hdr_no_iv_updt(header);
-	else
-		qat_alg_init_hdr_iv_updt(header);
+	ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+					   ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
 	ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
 				ICP_QAT_FW_LA_NO_PROTO);
+	ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+				       ICP_QAT_FW_LA_NO_UPDATE_STATE);
 }
 
 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
@@ -281,7 +266,7 @@ static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
 		return -EFAULT;
 
 	/* Request setup */
-	qat_alg_init_common_hdr(header, 1);
+	qat_alg_init_common_hdr(header);
 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
 	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
 					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
@@ -368,7 +353,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
 		return -EFAULT;
 
 	/* Request setup */
-	qat_alg_init_common_hdr(header, 1);
+	qat_alg_init_common_hdr(header);
 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
 	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
 					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
@@ -432,7 +417,7 @@ static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
 
 	memcpy(cd->aes.key, key, keylen);
-	qat_alg_init_common_hdr(header, 0);
+	qat_alg_init_common_hdr(header);
 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
 	cd_pars->u.s.content_desc_params_sz =
 				sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
@@ -787,6 +772,61 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
 	areq->base.complete(&areq->base, res);
 }
 
+static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req)
+{
+	struct skcipher_request *sreq = qat_req->skcipher_req;
+	u64 iv_lo_prev;
+	u64 iv_lo;
+	u64 iv_hi;
+
+	memcpy(qat_req->iv, sreq->iv, AES_BLOCK_SIZE);
+
+	iv_lo = be64_to_cpu(qat_req->iv_lo);
+	iv_hi = be64_to_cpu(qat_req->iv_hi);
+
+	iv_lo_prev = iv_lo;
+	iv_lo += DIV_ROUND_UP(sreq->cryptlen, AES_BLOCK_SIZE);
+	if (iv_lo < iv_lo_prev)
+		iv_hi++;
+
+	qat_req->iv_lo = cpu_to_be64(iv_lo);
+	qat_req->iv_hi = cpu_to_be64(iv_hi);
+}
+
+static void qat_alg_update_iv_cbc_mode(struct qat_crypto_request *qat_req)
+{
+	struct skcipher_request *sreq = qat_req->skcipher_req;
+	int offset = sreq->cryptlen - AES_BLOCK_SIZE;
+	struct scatterlist *sgl;
+
+	if (qat_req->encryption)
+		sgl = sreq->dst;
+	else
+		sgl = sreq->src;
+
+	scatterwalk_map_and_copy(qat_req->iv, sgl, offset, AES_BLOCK_SIZE, 0);
+}
+
+static void qat_alg_update_iv(struct qat_crypto_request *qat_req)
+{
+	struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
+	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+	switch (ctx->mode) {
+	case ICP_QAT_HW_CIPHER_CTR_MODE:
+		qat_alg_update_iv_ctr_mode(qat_req);
+		break;
+	case ICP_QAT_HW_CIPHER_CBC_MODE:
+		qat_alg_update_iv_cbc_mode(qat_req);
+		break;
+	case ICP_QAT_HW_CIPHER_XTS_MODE:
+		break;
+	default:
+		dev_warn(dev, "Unsupported IV update for cipher mode %d\n",
+			 ctx->mode);
+	}
+}
+
 static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
 				      struct qat_crypto_request *qat_req)
 {
@@ -794,16 +834,16 @@ static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
 	struct qat_crypto_instance *inst = ctx->inst;
 	struct skcipher_request *sreq = qat_req->skcipher_req;
 	u8 stat_filed = qat_resp->comn_resp.comn_status;
-	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
 	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
 
 	qat_alg_free_bufl(inst, qat_req);
 	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
 		res = -EINVAL;
 
+	if (qat_req->encryption)
+		qat_alg_update_iv(qat_req);
+
 	memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
-	dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
-			  qat_req->iv_paddr);
 
 	sreq->base.complete(&sreq->base, res);
 }
@@ -981,6 +1021,8 @@ static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
 {
 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 
+	ctx->mode = mode;
+
 	if (ctx->enc_cd)
 		return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
 	else
@@ -1035,23 +1077,14 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
 	struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
 	struct icp_qat_fw_la_bulk_req *msg;
-	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
 	int ret, ctr = 0;
 
 	if (req->cryptlen == 0)
 		return 0;
 
-	qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
-					 &qat_req->iv_paddr, GFP_ATOMIC);
-	if (!qat_req->iv)
-		return -ENOMEM;
-
 	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
-	if (unlikely(ret)) {
-		dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
-				  qat_req->iv_paddr);
+	if (unlikely(ret))
 		return ret;
-	}
 
 	msg = &qat_req->req;
 	*msg = ctx->enc_fw_req;
@@ -1061,19 +1094,18 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
 	qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+	qat_req->encryption = true;
 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
 	cipher_param->cipher_length = req->cryptlen;
 	cipher_param->cipher_offset = 0;
-	cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
-	memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
+	memcpy(cipher_param->u.cipher_IV_array, req->iv, AES_BLOCK_SIZE);
+
 	do {
 		ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
 	} while (ret == -EAGAIN && ctr++ < 10);
 
 	if (ret == -EAGAIN) {
 		qat_alg_free_bufl(ctx->inst, qat_req);
-		dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
-				  qat_req->iv_paddr);
 		return -EBUSY;
 	}
 	return -EINPROGRESS;
@@ -1113,23 +1145,14 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
 	struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
 	struct icp_qat_fw_la_bulk_req *msg;
-	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
 	int ret, ctr = 0;
 
 	if (req->cryptlen == 0)
 		return 0;
 
-	qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
-					 &qat_req->iv_paddr, GFP_ATOMIC);
-	if (!qat_req->iv)
-		return -ENOMEM;
-
 	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
-	if (unlikely(ret)) {
-		dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
-				  qat_req->iv_paddr);
+	if (unlikely(ret))
 		return ret;
-	}
 
 	msg = &qat_req->req;
 	*msg = ctx->dec_fw_req;
@@ -1139,19 +1162,20 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
 	qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+	qat_req->encryption = false;
 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
 	cipher_param->cipher_length = req->cryptlen;
 	cipher_param->cipher_offset = 0;
-	cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
-	memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
+	memcpy(cipher_param->u.cipher_IV_array, req->iv, AES_BLOCK_SIZE);
+
+	qat_alg_update_iv(qat_req);
+
 	do {
 		ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
 	} while (ret == -EAGAIN && ctr++ < 10);
 
 	if (ret == -EAGAIN) {
 		qat_alg_free_bufl(ctx->inst, qat_req);
-		dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
-				  qat_req->iv_paddr);
 		return -EBUSY;
 	}
 	return -EINPROGRESS;
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
index 12682d1e9f5f..8d11e94cbf08 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -3,6 +3,7 @@
 #ifndef _QAT_CRYPTO_INSTANCE_H_
 #define _QAT_CRYPTO_INSTANCE_H_
 
+#include <crypto/aes.h>
 #include <linux/list.h>
 #include <linux/slab.h>
 #include "adf_accel_devices.h"
@@ -44,8 +45,14 @@ struct qat_crypto_request {
 	struct qat_crypto_request_buffs buf;
 	void (*cb)(struct icp_qat_fw_la_resp *resp,
 		   struct qat_crypto_request *req);
-	void *iv;
-	dma_addr_t iv_paddr;
+	union {
+		struct {
+			__be64 iv_hi;
+			__be64 iv_lo;
+		};
+		u8 iv[AES_BLOCK_SIZE];
+	};
+	bool encryption;
 };
 
 #endif
-- 
2.26.2


  reply	other threads:[~2020-10-12 20:39 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-12 20:38 [PATCH 00/31] crypto: qat - rework in preparation for qat_4xxx driver Giovanni Cabiddu
2020-10-12 20:38 ` Giovanni Cabiddu [this message]
2020-10-12 20:38 ` [PATCH 02/31] crypto: qat - mask device capabilities with soft straps Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 03/31] crypto: qat - num_rings_per_bank is device dependent Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 04/31] crypto: qat - fix configuration of iov threads Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 05/31] crypto: qat - split transport CSR access logic Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 06/31] crypto: qat - relocate GEN2 CSR access code Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 07/31] crypto: qat - abstract admin interface Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 08/31] crypto: qat - add packed to init admin structures Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 09/31] crypto: qat - rename ME in AE Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 10/31] crypto: qat - change admin sequence Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 11/31] crypto: qat - use admin mask to send fw constants Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 12/31] crypto: qat - update constants table Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 13/31] crypto: qat - remove writes into WQCFG Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 14/31] crypto: qat - remove unused macros in arbiter module Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 15/31] crypto: qat - abstract arbiter access Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 16/31] crypto: qat - add support for capability detection Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 17/31] crypto: qat - register crypto instances based on capability Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 18/31] crypto: qat - enable ring after pair is programmed Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 19/31] crypto: qat - abstract build ring base Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 20/31] crypto: qat - replace constant masks with GENMASK Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 21/31] crypto: qat - use BIT_ULL() - 1 pattern for masks Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 22/31] crypto: qat - abstract writes to arbiter enable Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 23/31] crypto: qat - remove hardcoded bank irq clear flag mask Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 24/31] crypto: qat - call functions in adf_sriov if available Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 25/31] crypto: qat - remove unnecessary void* casts Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 26/31] crypto: qat - change return value in adf_cfg_add_key_value_param() Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 27/31] crypto: qat - change return value in adf_cfg_key_val_get() Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 28/31] crypto: qat - refactor qat_crypto_create_instances() Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 29/31] crypto: qat - refactor qat_crypto_dev_config() Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 30/31] crypto: qat - allow for instances in different banks Giovanni Cabiddu
2020-10-12 20:38 ` [PATCH 31/31] crypto: qat - extend ae_mask Giovanni Cabiddu
2020-10-30  6:49 ` [PATCH 00/31] crypto: qat - rework in preparation for qat_4xxx driver Herbert Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201012203847.340030-2-giovanni.cabiddu@intel.com \
    --to=giovanni.cabiddu@intel.com \
    --cc=andriy.shevchenko@linux.intel.com \
    --cc=herbert@gondor.apana.org.au \
    --cc=indrasena.reddygali@intel.com \
    --cc=linux-crypto@vger.kernel.org \
    --cc=marco.chiappero@intel.com \
    --cc=mateuszx.potrola@intel.com \
    --cc=qat-linux@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.