All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/6] Fix additional alignment issues in staging/ccree
@ 2017-07-18 20:03 ` Simon Sandström
  0 siblings, 0 replies; 10+ messages in thread
From: Simon Sandström @ 2017-07-18 20:03 UTC (permalink / raw)
  To: gilad; +Cc: gregkh, linux-crypto, driverdev-devel, devel, Simon Sandström

Here are a few more patches that fixes alignment issues in
staging/ccree. Includes the patches that I sent previously which could
not be applied plus a few more fixes for issues that I found. These
patches should fix all remaining alignment warnings reported by
checkpatch.pl in staging/ccree.

- Simon

---

Simon Sandström (6):
  staging: ccree: Fix alignment issues in ssi_aead.c
  staging: ccree: Fix alignment issues in ssi_hash.c
  staging: ccree: Fix alignment issues in ssi_buffer_mgr.c
  staging: ccree: Fix alignment issues in ssi_cipher.c
  staging: ccree: Fix alignment issues in ssi_ivgen.c
  staging: ccree: Fix alignment issues in ssi_request_mgr.c

 drivers/staging/ccree/ssi_aead.c        |  47 +++++++-------
 drivers/staging/ccree/ssi_buffer_mgr.c  |  40 +++++++-----
 drivers/staging/ccree/ssi_cipher.c      |   3 +-
 drivers/staging/ccree/ssi_hash.c        | 105 +++++++++++++++++---------------
 drivers/staging/ccree/ssi_ivgen.c       |   3 +-
 drivers/staging/ccree/ssi_request_mgr.c |   4 +-
 6 files changed, 112 insertions(+), 90 deletions(-)

-- 
2.11.0

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 0/6] Fix additional alignment issues in staging/ccree
@ 2017-07-18 20:03 ` Simon Sandström
  0 siblings, 0 replies; 10+ messages in thread
From: Simon Sandström @ 2017-07-18 20:03 UTC (permalink / raw)
  To: gilad; +Cc: devel, gregkh, driverdev-devel, linux-crypto, Simon Sandström

Here are a few more patches that fixes alignment issues in
staging/ccree. Includes the patches that I sent previously which could
not be applied plus a few more fixes for issues that I found. These
patches should fix all remaining alignment warnings reported by
checkpatch.pl in staging/ccree.

- Simon

---

Simon Sandström (6):
  staging: ccree: Fix alignment issues in ssi_aead.c
  staging: ccree: Fix alignment issues in ssi_hash.c
  staging: ccree: Fix alignment issues in ssi_buffer_mgr.c
  staging: ccree: Fix alignment issues in ssi_cipher.c
  staging: ccree: Fix alignment issues in ssi_ivgen.c
  staging: ccree: Fix alignment issues in ssi_request_mgr.c

 drivers/staging/ccree/ssi_aead.c        |  47 +++++++-------
 drivers/staging/ccree/ssi_buffer_mgr.c  |  40 +++++++-----
 drivers/staging/ccree/ssi_cipher.c      |   3 +-
 drivers/staging/ccree/ssi_hash.c        | 105 +++++++++++++++++---------------
 drivers/staging/ccree/ssi_ivgen.c       |   3 +-
 drivers/staging/ccree/ssi_request_mgr.c |   4 +-
 6 files changed, 112 insertions(+), 90 deletions(-)

-- 
2.11.0

_______________________________________________
devel mailing list
devel@linuxdriverproject.org
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 1/6] staging: ccree: Fix alignment issues in ssi_aead.c
  2017-07-18 20:03 ` Simon Sandström
@ 2017-07-18 20:03   ` Simon Sandström
  -1 siblings, 0 replies; 10+ messages in thread
From: Simon Sandström @ 2017-07-18 20:03 UTC (permalink / raw)
  To: gilad; +Cc: gregkh, linux-crypto, driverdev-devel, devel, Simon Sandström

Fixes checkpatch.pl alignment warnings.

Signed-off-by: Simon Sandström <simon@nikanor.nu>
---
 drivers/staging/ccree/ssi_aead.c | 47 +++++++++++++++++++++-------------------
 1 file changed, 25 insertions(+), 22 deletions(-)

diff --git a/drivers/staging/ccree/ssi_aead.c b/drivers/staging/ccree/ssi_aead.c
index ea29b8a1a71d..ad53126d6705 100644
--- a/drivers/staging/ccree/ssi_aead.c
+++ b/drivers/staging/ccree/ssi_aead.c
@@ -96,7 +96,7 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
 	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
 
 	SSI_LOG_DEBUG("Clearing context @%p for %s\n",
-		crypto_aead_ctx(tfm), crypto_tfm_alg_name(&tfm->base));
+		      crypto_aead_ctx(tfm), crypto_tfm_alg_name(&tfm->base));
 
 	dev = &ctx->drvdata->plat_dev->dev;
 	/* Unmap enckey buffer */
@@ -163,7 +163,7 @@ static int ssi_aead_init(struct crypto_aead *tfm)
 
 	/* Allocate key buffer, cache line aligned */
 	ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
-		&ctx->enckey_dma_addr, GFP_KERNEL);
+					 &ctx->enckey_dma_addr, GFP_KERNEL);
 	if (!ctx->enckey) {
 		SSI_LOG_ERR("Failed allocating key buffer\n");
 		goto init_failed;
@@ -239,7 +239,7 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
 
 	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
 		if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
-			ctx->authsize) != 0) {
+			   ctx->authsize) != 0) {
 			SSI_LOG_DEBUG("Payload authentication failure, "
 				"(auth-size=%d, cipher=%d).\n",
 				ctx->authsize, ctx->cipher_mode);
@@ -378,7 +378,7 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
 static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
 {
 	SSI_LOG_DEBUG("enc_keylen=%u  authkeylen=%u\n",
-		ctx->enc_keylen, ctx->auth_keylen);
+		      ctx->enc_keylen, ctx->auth_keylen);
 
 	switch (ctx->auth_mode) {
 	case DRV_HASH_SHA1:
@@ -402,7 +402,7 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
 	if (unlikely(ctx->flow_mode == S_DIN_to_DES)) {
 		if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
 			SSI_LOG_ERR("Invalid cipher(3DES) key size: %u\n",
-				ctx->enc_keylen);
+				    ctx->enc_keylen);
 			return -EINVAL;
 		}
 	} else { /* Default assumed to be AES ciphers */
@@ -410,7 +410,7 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
 		    (ctx->enc_keylen != AES_KEYSIZE_192) &&
 		    (ctx->enc_keylen != AES_KEYSIZE_256)) {
 			SSI_LOG_ERR("Invalid cipher(AES) key size: %u\n",
-				ctx->enc_keylen);
+				    ctx->enc_keylen);
 			return -EINVAL;
 		}
 	}
@@ -553,7 +553,8 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
 	int seq_len = 0, rc = -EINVAL;
 
 	SSI_LOG_DEBUG("Setting key in context @%p for %s. key=%p keylen=%u\n",
-		ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
+		      ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)),
+		      key, keylen);
 
 	/* STAT_PHASE_0: Init and sanity checks */
 
@@ -684,7 +685,7 @@ static int ssi_aead_setauthsize(
 
 #if SSI_CC_HAS_AES_CCM
 static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
-				      unsigned int authsize)
+				       unsigned int authsize)
 {
 	switch (authsize) {
 	case 8:
@@ -699,7 +700,7 @@ static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
 }
 
 static int ssi_ccm_setauthsize(struct crypto_aead *authenc,
-				      unsigned int authsize)
+			       unsigned int authsize)
 {
 	switch (authsize) {
 	case 4:
@@ -1183,8 +1184,8 @@ static inline void ssi_aead_load_mlli_to_sram(
 		(req_ctx->data_buff_type == SSI_DMA_BUF_MLLI) ||
 		!req_ctx->is_single_pass)) {
 		SSI_LOG_DEBUG("Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
-			(unsigned int)ctx->drvdata->mlli_sram_addr,
-			req_ctx->mlli_params.mlli_len);
+			      (unsigned int)ctx->drvdata->mlli_sram_addr,
+			      req_ctx->mlli_params.mlli_len);
 		/* Copy MLLI table host-to-sram */
 		hw_desc_init(&desc[*seq_size]);
 		set_din_type(&desc[*seq_size], DMA_DLLI,
@@ -1328,7 +1329,8 @@ ssi_aead_xcbc_authenc(
 }
 
 static int validate_data_size(struct ssi_aead_ctx *ctx,
-	enum drv_crypto_direction direct, struct aead_request *req)
+			      enum drv_crypto_direction direct,
+			      struct aead_request *req)
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	unsigned int assoclen = req->assoclen;
@@ -1336,7 +1338,7 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
 			(req->cryptlen - ctx->authsize) : req->cryptlen;
 
 	if (unlikely((direct == DRV_CRYPTO_DIRECTION_DECRYPT) &&
-		(req->cryptlen < ctx->authsize)))
+		     (req->cryptlen < ctx->authsize)))
 		goto data_size_err;
 
 	areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
@@ -1344,7 +1346,7 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
 	switch (ctx->flow_mode) {
 	case S_DIN_to_AES:
 		if (unlikely((ctx->cipher_mode == DRV_CIPHER_CBC) &&
-			!IS_ALIGNED(cipherlen, AES_BLOCK_SIZE)))
+			     !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE)))
 			goto data_size_err;
 		if (ctx->cipher_mode == DRV_CIPHER_CCM)
 			break;
@@ -1960,15 +1962,16 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
 	struct ssi_crypto_req ssi_req = {};
 
 	SSI_LOG_DEBUG("%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
-		((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"), ctx, req, req->iv,
-		sg_virt(req->src), req->src->offset, sg_virt(req->dst), req->dst->offset, req->cryptlen);
+		      ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"),
+		      ctx, req, req->iv, sg_virt(req->src), req->src->offset,
+		      sg_virt(req->dst), req->dst->offset, req->cryptlen);
 
 	/* STAT_PHASE_0: Init and sanity checks */
 
 	/* Check data length according to mode */
 	if (unlikely(validate_data_size(ctx, direct, req) != 0)) {
 		SSI_LOG_ERR("Unsupported crypt/assoc len %d/%d.\n",
-				req->cryptlen, req->assoclen);
+			    req->cryptlen, req->assoclen);
 		crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
 		return -EINVAL;
 	}
@@ -1991,7 +1994,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
 		memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, CTR_RFC3686_NONCE_SIZE);
 		if (!areq_ctx->backup_giv) /*User none-generated IV*/
 			memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
-				req->iv, CTR_RFC3686_IV_SIZE);
+			       req->iv, CTR_RFC3686_IV_SIZE);
 		/* Initialize counter portion of counter block */
 		*(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
 			    CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
@@ -2245,7 +2248,7 @@ static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
 }
 
 static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
-				      unsigned int authsize)
+			       unsigned int authsize)
 {
 	switch (authsize) {
 	case 4:
@@ -2264,7 +2267,7 @@ static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
 }
 
 static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
-				      unsigned int authsize)
+				       unsigned int authsize)
 {
 	SSI_LOG_DEBUG("ssi_rfc4106_gcm_setauthsize()  authsize %d\n", authsize);
 
@@ -2735,14 +2738,14 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
 		if (IS_ERR(t_alg)) {
 			rc = PTR_ERR(t_alg);
 			SSI_LOG_ERR("%s alg allocation failed\n",
-				 aead_algs[alg].driver_name);
+				    aead_algs[alg].driver_name);
 			goto fail1;
 		}
 		t_alg->drvdata = drvdata;
 		rc = crypto_register_aead(&t_alg->aead_alg);
 		if (unlikely(rc != 0)) {
 			SSI_LOG_ERR("%s alg registration failed\n",
-				t_alg->aead_alg.base.cra_driver_name);
+				    t_alg->aead_alg.base.cra_driver_name);
 			goto fail2;
 		} else {
 			list_add_tail(&t_alg->entry, &aead_handle->aead_list);
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 1/6] staging: ccree: Fix alignment issues in ssi_aead.c
@ 2017-07-18 20:03   ` Simon Sandström
  0 siblings, 0 replies; 10+ messages in thread
From: Simon Sandström @ 2017-07-18 20:03 UTC (permalink / raw)
  To: gilad; +Cc: devel, gregkh, driverdev-devel, linux-crypto, Simon Sandström

Fixes checkpatch.pl alignment warnings.

Signed-off-by: Simon Sandström <simon@nikanor.nu>
---
 drivers/staging/ccree/ssi_aead.c | 47 +++++++++++++++++++++-------------------
 1 file changed, 25 insertions(+), 22 deletions(-)

diff --git a/drivers/staging/ccree/ssi_aead.c b/drivers/staging/ccree/ssi_aead.c
index ea29b8a1a71d..ad53126d6705 100644
--- a/drivers/staging/ccree/ssi_aead.c
+++ b/drivers/staging/ccree/ssi_aead.c
@@ -96,7 +96,7 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
 	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
 
 	SSI_LOG_DEBUG("Clearing context @%p for %s\n",
-		crypto_aead_ctx(tfm), crypto_tfm_alg_name(&tfm->base));
+		      crypto_aead_ctx(tfm), crypto_tfm_alg_name(&tfm->base));
 
 	dev = &ctx->drvdata->plat_dev->dev;
 	/* Unmap enckey buffer */
@@ -163,7 +163,7 @@ static int ssi_aead_init(struct crypto_aead *tfm)
 
 	/* Allocate key buffer, cache line aligned */
 	ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
-		&ctx->enckey_dma_addr, GFP_KERNEL);
+					 &ctx->enckey_dma_addr, GFP_KERNEL);
 	if (!ctx->enckey) {
 		SSI_LOG_ERR("Failed allocating key buffer\n");
 		goto init_failed;
@@ -239,7 +239,7 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
 
 	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
 		if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
-			ctx->authsize) != 0) {
+			   ctx->authsize) != 0) {
 			SSI_LOG_DEBUG("Payload authentication failure, "
 				"(auth-size=%d, cipher=%d).\n",
 				ctx->authsize, ctx->cipher_mode);
@@ -378,7 +378,7 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
 static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
 {
 	SSI_LOG_DEBUG("enc_keylen=%u  authkeylen=%u\n",
-		ctx->enc_keylen, ctx->auth_keylen);
+		      ctx->enc_keylen, ctx->auth_keylen);
 
 	switch (ctx->auth_mode) {
 	case DRV_HASH_SHA1:
@@ -402,7 +402,7 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
 	if (unlikely(ctx->flow_mode == S_DIN_to_DES)) {
 		if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
 			SSI_LOG_ERR("Invalid cipher(3DES) key size: %u\n",
-				ctx->enc_keylen);
+				    ctx->enc_keylen);
 			return -EINVAL;
 		}
 	} else { /* Default assumed to be AES ciphers */
@@ -410,7 +410,7 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
 		    (ctx->enc_keylen != AES_KEYSIZE_192) &&
 		    (ctx->enc_keylen != AES_KEYSIZE_256)) {
 			SSI_LOG_ERR("Invalid cipher(AES) key size: %u\n",
-				ctx->enc_keylen);
+				    ctx->enc_keylen);
 			return -EINVAL;
 		}
 	}
@@ -553,7 +553,8 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
 	int seq_len = 0, rc = -EINVAL;
 
 	SSI_LOG_DEBUG("Setting key in context @%p for %s. key=%p keylen=%u\n",
-		ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
+		      ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)),
+		      key, keylen);
 
 	/* STAT_PHASE_0: Init and sanity checks */
 
@@ -684,7 +685,7 @@ static int ssi_aead_setauthsize(
 
 #if SSI_CC_HAS_AES_CCM
 static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
-				      unsigned int authsize)
+				       unsigned int authsize)
 {
 	switch (authsize) {
 	case 8:
@@ -699,7 +700,7 @@ static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
 }
 
 static int ssi_ccm_setauthsize(struct crypto_aead *authenc,
-				      unsigned int authsize)
+			       unsigned int authsize)
 {
 	switch (authsize) {
 	case 4:
@@ -1183,8 +1184,8 @@ static inline void ssi_aead_load_mlli_to_sram(
 		(req_ctx->data_buff_type == SSI_DMA_BUF_MLLI) ||
 		!req_ctx->is_single_pass)) {
 		SSI_LOG_DEBUG("Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
-			(unsigned int)ctx->drvdata->mlli_sram_addr,
-			req_ctx->mlli_params.mlli_len);
+			      (unsigned int)ctx->drvdata->mlli_sram_addr,
+			      req_ctx->mlli_params.mlli_len);
 		/* Copy MLLI table host-to-sram */
 		hw_desc_init(&desc[*seq_size]);
 		set_din_type(&desc[*seq_size], DMA_DLLI,
@@ -1328,7 +1329,8 @@ ssi_aead_xcbc_authenc(
 }
 
 static int validate_data_size(struct ssi_aead_ctx *ctx,
-	enum drv_crypto_direction direct, struct aead_request *req)
+			      enum drv_crypto_direction direct,
+			      struct aead_request *req)
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	unsigned int assoclen = req->assoclen;
@@ -1336,7 +1338,7 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
 			(req->cryptlen - ctx->authsize) : req->cryptlen;
 
 	if (unlikely((direct == DRV_CRYPTO_DIRECTION_DECRYPT) &&
-		(req->cryptlen < ctx->authsize)))
+		     (req->cryptlen < ctx->authsize)))
 		goto data_size_err;
 
 	areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
@@ -1344,7 +1346,7 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
 	switch (ctx->flow_mode) {
 	case S_DIN_to_AES:
 		if (unlikely((ctx->cipher_mode == DRV_CIPHER_CBC) &&
-			!IS_ALIGNED(cipherlen, AES_BLOCK_SIZE)))
+			     !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE)))
 			goto data_size_err;
 		if (ctx->cipher_mode == DRV_CIPHER_CCM)
 			break;
@@ -1960,15 +1962,16 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
 	struct ssi_crypto_req ssi_req = {};
 
 	SSI_LOG_DEBUG("%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
-		((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"), ctx, req, req->iv,
-		sg_virt(req->src), req->src->offset, sg_virt(req->dst), req->dst->offset, req->cryptlen);
+		      ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"),
+		      ctx, req, req->iv, sg_virt(req->src), req->src->offset,
+		      sg_virt(req->dst), req->dst->offset, req->cryptlen);
 
 	/* STAT_PHASE_0: Init and sanity checks */
 
 	/* Check data length according to mode */
 	if (unlikely(validate_data_size(ctx, direct, req) != 0)) {
 		SSI_LOG_ERR("Unsupported crypt/assoc len %d/%d.\n",
-				req->cryptlen, req->assoclen);
+			    req->cryptlen, req->assoclen);
 		crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
 		return -EINVAL;
 	}
@@ -1991,7 +1994,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
 		memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, CTR_RFC3686_NONCE_SIZE);
 		if (!areq_ctx->backup_giv) /*User none-generated IV*/
 			memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
-				req->iv, CTR_RFC3686_IV_SIZE);
+			       req->iv, CTR_RFC3686_IV_SIZE);
 		/* Initialize counter portion of counter block */
 		*(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
 			    CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
@@ -2245,7 +2248,7 @@ static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
 }
 
 static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
-				      unsigned int authsize)
+			       unsigned int authsize)
 {
 	switch (authsize) {
 	case 4:
@@ -2264,7 +2267,7 @@ static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
 }
 
 static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
-				      unsigned int authsize)
+				       unsigned int authsize)
 {
 	SSI_LOG_DEBUG("ssi_rfc4106_gcm_setauthsize()  authsize %d\n", authsize);
 
@@ -2735,14 +2738,14 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
 		if (IS_ERR(t_alg)) {
 			rc = PTR_ERR(t_alg);
 			SSI_LOG_ERR("%s alg allocation failed\n",
-				 aead_algs[alg].driver_name);
+				    aead_algs[alg].driver_name);
 			goto fail1;
 		}
 		t_alg->drvdata = drvdata;
 		rc = crypto_register_aead(&t_alg->aead_alg);
 		if (unlikely(rc != 0)) {
 			SSI_LOG_ERR("%s alg registration failed\n",
-				t_alg->aead_alg.base.cra_driver_name);
+				    t_alg->aead_alg.base.cra_driver_name);
 			goto fail2;
 		} else {
 			list_add_tail(&t_alg->entry, &aead_handle->aead_list);
-- 
2.11.0

_______________________________________________
devel mailing list
devel@linuxdriverproject.org
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 2/6] staging: ccree: Fix alignment issues in ssi_hash.c
  2017-07-18 20:03 ` Simon Sandström
  (?)
  (?)
@ 2017-07-18 20:03 ` Simon Sandström
  -1 siblings, 0 replies; 10+ messages in thread
From: Simon Sandström @ 2017-07-18 20:03 UTC (permalink / raw)
  To: gilad; +Cc: gregkh, linux-crypto, driverdev-devel, devel, Simon Sandström

Fixes checkpatch.pl alignment warnings.

Signed-off-by: Simon Sandström <simon@nikanor.nu>
---
 drivers/staging/ccree/ssi_hash.c | 105 +++++++++++++++++++++------------------
 1 file changed, 56 insertions(+), 49 deletions(-)

diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c
index fba0643e78fa..a5b3e9bebd95 100644
--- a/drivers/staging/ccree/ssi_hash.c
+++ b/drivers/staging/ccree/ssi_hash.c
@@ -70,8 +70,8 @@ static void ssi_hash_create_xcbc_setup(
 	unsigned int *seq_size);
 
 static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
-				  struct cc_hw_desc desc[],
-				  unsigned int *seq_size);
+				       struct cc_hw_desc desc[],
+				       unsigned int *seq_size);
 
 struct ssi_hash_alg {
 	struct list_head entry;
@@ -117,8 +117,8 @@ static void ssi_hash_create_data_desc(
 static inline void ssi_set_hash_endianity(u32 mode, struct cc_hw_desc *desc)
 {
 	if (unlikely((mode == DRV_HASH_MD5) ||
-		(mode == DRV_HASH_SHA384) ||
-		(mode == DRV_HASH_SHA512))) {
+		     (mode == DRV_HASH_SHA384) ||
+		     (mode == DRV_HASH_SHA512))) {
 		set_bytes_swap(desc, 1);
 	} else {
 		set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
@@ -135,7 +135,7 @@ static int ssi_hash_map_result(struct device *dev,
 			       DMA_BIDIRECTIONAL);
 	if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) {
 		SSI_LOG_ERR("Mapping digest result buffer %u B for DMA failed\n",
-			digestsize);
+			    digestsize);
 		return -ENOMEM;
 	}
 	SSI_LOG_DEBUG("Mapped digest result buffer %u B "
@@ -200,12 +200,12 @@ static int ssi_hash_map_request(struct device *dev,
 	state->digest_buff_dma_addr = dma_map_single(dev, (void *)state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 	if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
 		SSI_LOG_ERR("Mapping digest len %d B at va=%pK for DMA failed\n",
-		ctx->inter_digestsize, state->digest_buff);
+			    ctx->inter_digestsize, state->digest_buff);
 		goto fail3;
 	}
 	SSI_LOG_DEBUG("Mapped digest %d B at va=%pK to dma=%pad\n",
-		ctx->inter_digestsize, state->digest_buff,
-		state->digest_buff_dma_addr);
+		      ctx->inter_digestsize, state->digest_buff,
+		      state->digest_buff_dma_addr);
 
 	if (is_hmac) {
 		dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
@@ -249,12 +249,12 @@ static int ssi_hash_map_request(struct device *dev,
 		state->digest_bytes_len_dma_addr = dma_map_single(dev, (void *)state->digest_bytes_len, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
 		if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
 			SSI_LOG_ERR("Mapping digest len %u B at va=%pK for DMA failed\n",
-			HASH_LEN_SIZE, state->digest_bytes_len);
+				    HASH_LEN_SIZE, state->digest_bytes_len);
 			goto fail4;
 		}
 		SSI_LOG_DEBUG("Mapped digest len %u B at va=%pK to dma=%pad\n",
-			HASH_LEN_SIZE, state->digest_bytes_len,
-			state->digest_bytes_len_dma_addr);
+			      HASH_LEN_SIZE, state->digest_bytes_len,
+			      state->digest_bytes_len_dma_addr);
 	} else {
 		state->digest_bytes_len_dma_addr = 0;
 	}
@@ -263,12 +263,13 @@ static int ssi_hash_map_request(struct device *dev,
 		state->opad_digest_dma_addr = dma_map_single(dev, (void *)state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 		if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
 			SSI_LOG_ERR("Mapping opad digest %d B at va=%pK for DMA failed\n",
-			ctx->inter_digestsize, state->opad_digest_buff);
+				    ctx->inter_digestsize,
+				    state->opad_digest_buff);
 			goto fail5;
 		}
 		SSI_LOG_DEBUG("Mapped opad digest %d B at va=%pK to dma=%pad\n",
-			ctx->inter_digestsize, state->opad_digest_buff,
-			state->opad_digest_dma_addr);
+			      ctx->inter_digestsize, state->opad_digest_buff,
+			      state->opad_digest_dma_addr);
 	} else {
 		state->opad_digest_dma_addr = 0;
 	}
@@ -602,7 +603,7 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
 	if (unlikely(rc)) {
 		if (rc == 1) {
 			SSI_LOG_DEBUG(" data size not require HW update %x\n",
-				     nbytes);
+				      nbytes);
 			/* No hardware updates are required */
 			return 0;
 		}
@@ -1145,17 +1146,17 @@ static int ssi_hash_setkey(void *hash,
 
 	if (ctx->key_params.key_dma_addr) {
 		dma_unmap_single(&ctx->drvdata->plat_dev->dev,
-				ctx->key_params.key_dma_addr,
-				ctx->key_params.keylen, DMA_TO_DEVICE);
+				 ctx->key_params.key_dma_addr,
+				 ctx->key_params.keylen, DMA_TO_DEVICE);
 		SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
-				ctx->key_params.key_dma_addr,
-				ctx->key_params.keylen);
+			      ctx->key_params.key_dma_addr,
+			      ctx->key_params.keylen);
 	}
 	return rc;
 }
 
 static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
-			const u8 *key, unsigned int keylen)
+			   const u8 *key, unsigned int keylen)
 {
 	struct ssi_crypto_req ssi_req = {};
 	struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
@@ -1232,18 +1233,18 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
 		crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
 
 	dma_unmap_single(&ctx->drvdata->plat_dev->dev,
-			ctx->key_params.key_dma_addr,
-			ctx->key_params.keylen, DMA_TO_DEVICE);
+			 ctx->key_params.key_dma_addr,
+			 ctx->key_params.keylen, DMA_TO_DEVICE);
 	SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
-			ctx->key_params.key_dma_addr,
-			ctx->key_params.keylen);
+		      ctx->key_params.key_dma_addr,
+		      ctx->key_params.keylen);
 
 	return rc;
 }
 
 #if SSI_CC_HAS_CMAC
 static int ssi_cmac_setkey(struct crypto_ahash *ahash,
-			const u8 *key, unsigned int keylen)
+			   const u8 *key, unsigned int keylen)
 {
 	struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
 
@@ -1316,22 +1317,22 @@ static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
 	ctx->digest_buff_dma_addr = dma_map_single(dev, (void *)ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
 	if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
 		SSI_LOG_ERR("Mapping digest len %zu B at va=%pK for DMA failed\n",
-			sizeof(ctx->digest_buff), ctx->digest_buff);
+			    sizeof(ctx->digest_buff), ctx->digest_buff);
 		goto fail;
 	}
 	SSI_LOG_DEBUG("Mapped digest %zu B at va=%pK to dma=%pad\n",
-		sizeof(ctx->digest_buff), ctx->digest_buff,
+		      sizeof(ctx->digest_buff), ctx->digest_buff,
 		      ctx->digest_buff_dma_addr);
 
 	ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL);
 	if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
 		SSI_LOG_ERR("Mapping opad digest %zu B at va=%pK for DMA failed\n",
-			sizeof(ctx->opad_tmp_keys_buff),
-			ctx->opad_tmp_keys_buff);
+			    sizeof(ctx->opad_tmp_keys_buff),
+			    ctx->opad_tmp_keys_buff);
 		goto fail;
 	}
 	SSI_LOG_DEBUG("Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
-		sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
+		      sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
 		      ctx->opad_tmp_keys_dma_addr);
 
 	ctx->is_hmac = false;
@@ -1353,7 +1354,7 @@ static int ssi_ahash_cra_init(struct crypto_tfm *tfm)
 			container_of(ahash_alg, struct ssi_hash_alg, ahash_alg);
 
 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
-				sizeof(struct ahash_req_ctx));
+				 sizeof(struct ahash_req_ctx));
 
 	ctx->hash_mode = ssi_alg->hash_mode;
 	ctx->hw_mode = ssi_alg->hw_mode;
@@ -1394,7 +1395,7 @@ static int ssi_mac_update(struct ahash_request *req)
 	if (unlikely(rc)) {
 		if (rc == 1) {
 			SSI_LOG_DEBUG(" data size not require HW update %x\n",
-				     req->nbytes);
+				      req->nbytes);
 			/* No hardware updates are required */
 			return 0;
 		}
@@ -1837,7 +1838,7 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
 }
 
 static int ssi_ahash_setkey(struct crypto_ahash *ahash,
-			const u8 *key, unsigned int keylen)
+			    const u8 *key, unsigned int keylen)
 {
 	return ssi_hash_setkey((void *)ahash, key, keylen, false);
 }
@@ -2119,7 +2120,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
 
 	/* Copy-to-sram digest-len */
 	ssi_sram_mgr_const2sram_desc(digest_len_init, sram_buff_ofs,
-		ARRAY_SIZE(digest_len_init), larval_seq, &larval_seq_len);
+				     ARRAY_SIZE(digest_len_init),
+				     larval_seq, &larval_seq_len);
 	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
 	if (unlikely(rc != 0))
 		goto init_digest_const_err;
@@ -2130,7 +2132,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
 #if (DX_DEV_SHA_MAX > 256)
 	/* Copy-to-sram digest-len for sha384/512 */
 	ssi_sram_mgr_const2sram_desc(digest_len_sha512_init, sram_buff_ofs,
-		ARRAY_SIZE(digest_len_sha512_init), larval_seq, &larval_seq_len);
+				     ARRAY_SIZE(digest_len_sha512_init),
+				     larval_seq, &larval_seq_len);
 	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
 	if (unlikely(rc != 0))
 		goto init_digest_const_err;
@@ -2144,7 +2147,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
 
 	/* Copy-to-sram initial SHA* digests */
 	ssi_sram_mgr_const2sram_desc(md5_init, sram_buff_ofs,
-		ARRAY_SIZE(md5_init), larval_seq, &larval_seq_len);
+				     ARRAY_SIZE(md5_init), larval_seq,
+				     &larval_seq_len);
 	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
 	if (unlikely(rc != 0))
 		goto init_digest_const_err;
@@ -2152,7 +2156,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
 	larval_seq_len = 0;
 
 	ssi_sram_mgr_const2sram_desc(sha1_init, sram_buff_ofs,
-		ARRAY_SIZE(sha1_init), larval_seq, &larval_seq_len);
+				     ARRAY_SIZE(sha1_init), larval_seq,
+				     &larval_seq_len);
 	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
 	if (unlikely(rc != 0))
 		goto init_digest_const_err;
@@ -2160,7 +2165,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
 	larval_seq_len = 0;
 
 	ssi_sram_mgr_const2sram_desc(sha224_init, sram_buff_ofs,
-		ARRAY_SIZE(sha224_init), larval_seq, &larval_seq_len);
+				     ARRAY_SIZE(sha224_init), larval_seq,
+				     &larval_seq_len);
 	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
 	if (unlikely(rc != 0))
 		goto init_digest_const_err;
@@ -2168,7 +2174,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
 	larval_seq_len = 0;
 
 	ssi_sram_mgr_const2sram_desc(sha256_init, sram_buff_ofs,
-		ARRAY_SIZE(sha256_init), larval_seq, &larval_seq_len);
+				     ARRAY_SIZE(sha256_init), larval_seq,
+				     &larval_seq_len);
 	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
 	if (unlikely(rc != 0))
 		goto init_digest_const_err;
@@ -2182,10 +2189,10 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
 		const u32 const1 = ((u32 *)((u64 *)&sha384_init[i]))[0];
 
 		ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
-			larval_seq, &larval_seq_len);
+					     larval_seq, &larval_seq_len);
 		sram_buff_ofs += sizeof(u32);
 		ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
-			larval_seq, &larval_seq_len);
+					     larval_seq, &larval_seq_len);
 		sram_buff_ofs += sizeof(u32);
 	}
 	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
@@ -2200,10 +2207,10 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
 		const u32 const1 = ((u32 *)((u64 *)&sha512_init[i]))[0];
 
 		ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
-			larval_seq, &larval_seq_len);
+					     larval_seq, &larval_seq_len);
 		sram_buff_ofs += sizeof(u32);
 		ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
-			larval_seq, &larval_seq_len);
+					     larval_seq, &larval_seq_len);
 		sram_buff_ofs += sizeof(u32);
 	}
 	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
@@ -2228,7 +2235,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
 	hash_handle = kzalloc(sizeof(struct ssi_hash_handle), GFP_KERNEL);
 	if (!hash_handle) {
 		SSI_LOG_ERR("kzalloc failed to allocate %zu B\n",
-			sizeof(struct ssi_hash_handle));
+			    sizeof(struct ssi_hash_handle));
 		rc = -ENOMEM;
 		goto fail;
 	}
@@ -2300,7 +2307,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
 		if (IS_ERR(t_alg)) {
 			rc = PTR_ERR(t_alg);
 			SSI_LOG_ERR("%s alg allocation failed\n",
-				 driver_hash[alg].driver_name);
+				    driver_hash[alg].driver_name);
 			goto fail;
 		}
 		t_alg->drvdata = drvdata;
@@ -2346,8 +2353,8 @@ int ssi_hash_free(struct ssi_drvdata *drvdata)
 }
 
 static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
-				  struct cc_hw_desc desc[],
-				  unsigned int *seq_size)
+				       struct cc_hw_desc desc[],
+				       unsigned int *seq_size)
 {
 	unsigned int idx = *seq_size;
 	struct ahash_req_ctx *state = ahash_request_ctx(areq);
@@ -2404,8 +2411,8 @@ static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
 }
 
 static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
-				  struct cc_hw_desc desc[],
-				  unsigned int *seq_size)
+				       struct cc_hw_desc desc[],
+				       unsigned int *seq_size)
 {
 	unsigned int idx = *seq_size;
 	struct ahash_req_ctx *state = ahash_request_ctx(areq);
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 3/6] staging: ccree: Fix alignment issues in ssi_buffer_mgr.c
  2017-07-18 20:03 ` Simon Sandström
                   ` (2 preceding siblings ...)
  (?)
@ 2017-07-18 20:03 ` Simon Sandström
  -1 siblings, 0 replies; 10+ messages in thread
From: Simon Sandström @ 2017-07-18 20:03 UTC (permalink / raw)
  To: gilad; +Cc: gregkh, linux-crypto, driverdev-devel, devel, Simon Sandström

Fixes checkpatch.pl alignment warnings.

Signed-off-by: Simon Sandström <simon@nikanor.nu>
---
 drivers/staging/ccree/ssi_buffer_mgr.c | 40 ++++++++++++++++++++--------------
 1 file changed, 24 insertions(+), 16 deletions(-)

diff --git a/drivers/staging/ccree/ssi_buffer_mgr.c b/drivers/staging/ccree/ssi_buffer_mgr.c
index 6579a54f9dc4..63936091d524 100644
--- a/drivers/staging/ccree/ssi_buffer_mgr.c
+++ b/drivers/staging/ccree/ssi_buffer_mgr.c
@@ -371,7 +371,7 @@ static int ssi_buffer_mgr_map_scatterlist(
 		*mapped_nents = 1;
 	} else {  /*sg_is_last*/
 		*nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes,
-						     &is_chained);
+						      &is_chained);
 		if (*nents > max_sg_nents) {
 			*nents = 0;
 			SSI_LOG_ERR("Too many fragments. current %d max %d\n",
@@ -393,9 +393,9 @@ static int ssi_buffer_mgr_map_scatterlist(
 			 * must have the same nents before and after map
 			 */
 			*mapped_nents = ssi_buffer_mgr_dma_map_sg(dev,
-								 sg,
-								 *nents,
-								 direction);
+								  sg,
+								  *nents,
+								  direction);
 			if (unlikely(*mapped_nents != *nents)) {
 				*nents = *mapped_nents;
 				SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
@@ -783,8 +783,8 @@ static inline int ssi_buffer_mgr_aead_chain_iv(
 		goto chain_iv_exit;
 	}
 
-	areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
-		hw_iv_size, DMA_BIDIRECTIONAL);
+	areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv, hw_iv_size,
+						       DMA_BIDIRECTIONAL);
 	if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) {
 		SSI_LOG_ERR("Mapping iv %u B at va=%pK for DMA failed\n",
 			    hw_iv_size, req->iv);
@@ -1323,8 +1323,9 @@ int ssi_buffer_mgr_map_aead_request(
 				req->cryptlen :
 				(req->cryptlen - authsize);
 
-	areq_ctx->mac_buf_dma_addr = dma_map_single(dev,
-		areq_ctx->mac_buf, MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
+	areq_ctx->mac_buf_dma_addr = dma_map_single(dev, areq_ctx->mac_buf,
+						    MAX_MAC_SIZE,
+						    DMA_BIDIRECTIONAL);
 	if (unlikely(dma_mapping_error(dev, areq_ctx->mac_buf_dma_addr))) {
 		SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK for DMA failed\n",
 			    MAX_MAC_SIZE, areq_ctx->mac_buf);
@@ -1334,8 +1335,9 @@ int ssi_buffer_mgr_map_aead_request(
 
 	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 		areq_ctx->ccm_iv0_dma_addr = dma_map_single(dev,
-			(areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET),
-			AES_BLOCK_SIZE, DMA_TO_DEVICE);
+							    (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET),
+							    AES_BLOCK_SIZE,
+							    DMA_TO_DEVICE);
 
 		if (unlikely(dma_mapping_error(dev, areq_ctx->ccm_iv0_dma_addr))) {
 			SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK "
@@ -1356,7 +1358,9 @@ int ssi_buffer_mgr_map_aead_request(
 #if SSI_CC_HAS_AES_GCM
 	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
 		areq_ctx->hkey_dma_addr = dma_map_single(dev,
-			areq_ctx->hkey, AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
+							 areq_ctx->hkey,
+							 AES_BLOCK_SIZE,
+							 DMA_BIDIRECTIONAL);
 		if (unlikely(dma_mapping_error(dev, areq_ctx->hkey_dma_addr))) {
 			SSI_LOG_ERR("Mapping hkey %u B at va=%pK for DMA failed\n",
 				    AES_BLOCK_SIZE, areq_ctx->hkey);
@@ -1365,7 +1369,9 @@ int ssi_buffer_mgr_map_aead_request(
 		}
 
 		areq_ctx->gcm_block_len_dma_addr = dma_map_single(dev,
-			&areq_ctx->gcm_len_block, AES_BLOCK_SIZE, DMA_TO_DEVICE);
+								  &areq_ctx->gcm_len_block,
+								  AES_BLOCK_SIZE,
+								  DMA_TO_DEVICE);
 		if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_block_len_dma_addr))) {
 			SSI_LOG_ERR("Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
 				    AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
@@ -1374,8 +1380,9 @@ int ssi_buffer_mgr_map_aead_request(
 		}
 
 		areq_ctx->gcm_iv_inc1_dma_addr = dma_map_single(dev,
-			areq_ctx->gcm_iv_inc1,
-			AES_BLOCK_SIZE, DMA_TO_DEVICE);
+								areq_ctx->gcm_iv_inc1,
+								AES_BLOCK_SIZE,
+								DMA_TO_DEVICE);
 
 		if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc1_dma_addr))) {
 			SSI_LOG_ERR("Mapping gcm_iv_inc1 %u B at va=%pK "
@@ -1387,8 +1394,9 @@ int ssi_buffer_mgr_map_aead_request(
 		}
 
 		areq_ctx->gcm_iv_inc2_dma_addr = dma_map_single(dev,
-			areq_ctx->gcm_iv_inc2,
-			AES_BLOCK_SIZE, DMA_TO_DEVICE);
+								areq_ctx->gcm_iv_inc2,
+								AES_BLOCK_SIZE,
+								DMA_TO_DEVICE);
 
 		if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc2_dma_addr))) {
 			SSI_LOG_ERR("Mapping gcm_iv_inc2 %u B at va=%pK "
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 4/6] staging: ccree: Fix alignment issues in ssi_cipher.c
  2017-07-18 20:03 ` Simon Sandström
@ 2017-07-18 20:03   ` Simon Sandström
  -1 siblings, 0 replies; 10+ messages in thread
From: Simon Sandström @ 2017-07-18 20:03 UTC (permalink / raw)
  To: gilad; +Cc: devel, gregkh, driverdev-devel, linux-crypto, Simon Sandström

Fixes checkpatch.pl alignment warnings.

Signed-off-by: Simon Sandström <simon@nikanor.nu>
---
 drivers/staging/ccree/ssi_cipher.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/ssi_cipher.c
index bfe9b1ccbf37..aec7c1480336 100644
--- a/drivers/staging/ccree/ssi_cipher.c
+++ b/drivers/staging/ccree/ssi_cipher.c
@@ -203,7 +203,8 @@ static int ssi_blkcipher_init(struct crypto_tfm *tfm)
 
 	/* Map key buffer */
 	ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key,
-					     max_key_buf_size, DMA_TO_DEVICE);
+						  max_key_buf_size,
+						  DMA_TO_DEVICE);
 	if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
 		SSI_LOG_ERR("Mapping Key %u B at va=%pK for DMA failed\n",
 			    max_key_buf_size, ctx_p->user.key);
-- 
2.11.0

_______________________________________________
devel mailing list
devel@linuxdriverproject.org
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 4/6] staging: ccree: Fix alignment issues in ssi_cipher.c
@ 2017-07-18 20:03   ` Simon Sandström
  0 siblings, 0 replies; 10+ messages in thread
From: Simon Sandström @ 2017-07-18 20:03 UTC (permalink / raw)
  To: gilad; +Cc: gregkh, linux-crypto, driverdev-devel, devel, Simon Sandström

Fixes checkpatch.pl alignment warnings.

Signed-off-by: Simon Sandström <simon@nikanor.nu>
---
 drivers/staging/ccree/ssi_cipher.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/ssi_cipher.c
index bfe9b1ccbf37..aec7c1480336 100644
--- a/drivers/staging/ccree/ssi_cipher.c
+++ b/drivers/staging/ccree/ssi_cipher.c
@@ -203,7 +203,8 @@ static int ssi_blkcipher_init(struct crypto_tfm *tfm)
 
 	/* Map key buffer */
 	ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key,
-					     max_key_buf_size, DMA_TO_DEVICE);
+						  max_key_buf_size,
+						  DMA_TO_DEVICE);
 	if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
 		SSI_LOG_ERR("Mapping Key %u B at va=%pK for DMA failed\n",
 			    max_key_buf_size, ctx_p->user.key);
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 5/6] staging: ccree: Fix alignment issues in ssi_ivgen.c
  2017-07-18 20:03 ` Simon Sandström
                   ` (4 preceding siblings ...)
  (?)
@ 2017-07-18 20:03 ` Simon Sandström
  -1 siblings, 0 replies; 10+ messages in thread
From: Simon Sandström @ 2017-07-18 20:03 UTC (permalink / raw)
  To: gilad; +Cc: gregkh, linux-crypto, driverdev-devel, devel, Simon Sandström

Fixes checkpatch.pl alignment warnings.

Signed-off-by: Simon Sandström <simon@nikanor.nu>
---
 drivers/staging/ccree/ssi_ivgen.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/staging/ccree/ssi_ivgen.c b/drivers/staging/ccree/ssi_ivgen.c
index f140dbc5195c..86364f81acab 100644
--- a/drivers/staging/ccree/ssi_ivgen.c
+++ b/drivers/staging/ccree/ssi_ivgen.c
@@ -202,7 +202,8 @@ int ssi_ivgen_init(struct ssi_drvdata *drvdata)
 
 	/* Allocate pool's header for intial enc. key/IV */
 	ivgen_ctx->pool_meta = dma_alloc_coherent(device, SSI_IVPOOL_META_SIZE,
-			&ivgen_ctx->pool_meta_dma, GFP_KERNEL);
+						  &ivgen_ctx->pool_meta_dma,
+						  GFP_KERNEL);
 	if (!ivgen_ctx->pool_meta) {
 		SSI_LOG_ERR("Not enough memory to allocate DMA of pool_meta "
 			   "(%u B)\n", SSI_IVPOOL_META_SIZE);
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 6/6] staging: ccree: Fix alignment issues in ssi_request_mgr.c
  2017-07-18 20:03 ` Simon Sandström
                   ` (5 preceding siblings ...)
  (?)
@ 2017-07-18 20:03 ` Simon Sandström
  -1 siblings, 0 replies; 10+ messages in thread
From: Simon Sandström @ 2017-07-18 20:03 UTC (permalink / raw)
  To: gilad; +Cc: gregkh, linux-crypto, driverdev-devel, devel, Simon Sandström

Fixes checkpatch.pl alignment warnings.

Signed-off-by: Simon Sandström <simon@nikanor.nu>
---
 drivers/staging/ccree/ssi_request_mgr.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/staging/ccree/ssi_request_mgr.c b/drivers/staging/ccree/ssi_request_mgr.c
index 3f39150cda4f..2eda82f317d2 100644
--- a/drivers/staging/ccree/ssi_request_mgr.c
+++ b/drivers/staging/ccree/ssi_request_mgr.c
@@ -136,7 +136,9 @@ int request_mgr_init(struct ssi_drvdata *drvdata)
 
 	/* Allocate DMA word for "dummy" completion descriptor use */
 	req_mgr_h->dummy_comp_buff = dma_alloc_coherent(&drvdata->plat_dev->dev,
-		sizeof(u32), &req_mgr_h->dummy_comp_buff_dma, GFP_KERNEL);
+							sizeof(u32),
+							&req_mgr_h->dummy_comp_buff_dma,
+							GFP_KERNEL);
 	if (!req_mgr_h->dummy_comp_buff) {
 		SSI_LOG_ERR("Not enough memory to allocate DMA (%zu) dropped "
 			   "buffer\n", sizeof(u32));
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2017-07-18 20:05 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-07-18 20:03 [PATCH 0/6] Fix additional alignment issues in staging/ccree Simon Sandström
2017-07-18 20:03 ` Simon Sandström
2017-07-18 20:03 ` [PATCH 1/6] staging: ccree: Fix alignment issues in ssi_aead.c Simon Sandström
2017-07-18 20:03   ` Simon Sandström
2017-07-18 20:03 ` [PATCH 2/6] staging: ccree: Fix alignment issues in ssi_hash.c Simon Sandström
2017-07-18 20:03 ` [PATCH 3/6] staging: ccree: Fix alignment issues in ssi_buffer_mgr.c Simon Sandström
2017-07-18 20:03 ` [PATCH 4/6] staging: ccree: Fix alignment issues in ssi_cipher.c Simon Sandström
2017-07-18 20:03   ` Simon Sandström
2017-07-18 20:03 ` [PATCH 5/6] staging: ccree: Fix alignment issues in ssi_ivgen.c Simon Sandström
2017-07-18 20:03 ` [PATCH 6/6] staging: ccree: Fix alignment issues in ssi_request_mgr.c Simon Sandström

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.