linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] md: dm-crypt: Add Inline Encryption support for dmcrypt
@ 2018-05-28 13:01 Ladvine D Almeida
  2018-05-28 16:32 ` Milan Broz
  0 siblings, 1 reply; 9+ messages in thread
From: Ladvine D Almeida @ 2018-05-28 13:01 UTC (permalink / raw)
  To: Alasdair Kergon <agk@redhat.com>; Mike Snitzer
  Cc: linux-kernel,
	Manjunath M Bettegowda <manjumb@synopsys.com>; Prabu
	Thangamuthu <prabut@synopsys.com>; Tejas Joglekar


This patch adds new option
    -- perform_inline_encrypt
that set the option inside dmcrypt to use inline encryption for
the configured mapping. I want to introduce inline encryption support
in the UFS Host Controller driver. The usage of the same for accomplishing
the Full Disk Encryption is done by the following changes in the
dmcrypt subsystem:
    - New function crypt_inline_encrypt_submit() is added to the
dmcrypt which associate the crypto context to the bios which
are submitted by the subsystem.
    - Successful configuration for the inline encryption will result in
the crypto transformation job being bypassed in the dmcrypt layer and the
actual encryption happens inline to the block device.

Another patch set is sent to the block layer community for
CONFIG_BLK_DEV_INLINE_ENCRYPTION config, which enables changes in the
block layer for adding the bi_ie_private variable to the bio structure.

Signed-off-by: Ladvine D Almeida <ladvine@synopsys.com>
---
 drivers/md/dm-crypt.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 53 insertions(+), 2 deletions(-)

diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 44ff473..a9ed567 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -39,6 +39,7 @@
 #include <linux/device-mapper.h>
 
 #define DM_MSG_PREFIX "crypt"
+#define REQ_INLINE_ENCRYPTION REQ_DRV
 
 /*
  * context holding the current state of a multi-part conversion
@@ -125,7 +126,8 @@ struct iv_tcw_private {
  * and encrypts / decrypts at the same time.
  */
 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
-	     DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
+	     DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
+		 DM_CRYPT_INLINE_ENCRYPT };
 
 enum cipher_flags {
 	CRYPT_MODE_INTEGRITY_AEAD,	/* Use authenticated mode for cihper */
@@ -215,6 +217,10 @@ struct crypt_config {
 
 	u8 *authenc_key; /* space for keys in authenc() format (if used) */
 	u8 key[0];
+#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
+	void *ie_private; /* crypto context for inline enc drivers */
+	int key_cfg_idx;  /* key configuration index for inline enc */
+#endif
 };
 
 #define MIN_IOS		64
@@ -1470,6 +1476,20 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
 	atomic_set(&io->io_pending, 0);
 }
 
+#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
+static void crypt_inline_encrypt_submit(struct crypt_config *cc,
+	struct dm_target *ti, struct bio *bio)
+{
+	bio_set_dev(bio, cc->dev->bdev);
+	if (bio_sectors(bio))
+		bio->bi_iter.bi_sector = cc->start +
+			dm_target_offset(ti, bio->bi_iter.bi_sector);
+	bio->bi_opf |= REQ_INLINE_ENCRYPTION;
+	bio->bi_ie_private = cc->ie_private;
+	generic_make_request(bio);
+}
+#endif
+
 static void crypt_inc_pending(struct dm_crypt_io *io)
 {
 	atomic_inc(&io->io_pending);
@@ -1960,6 +1980,9 @@ static int crypt_setkey(struct crypt_config *cc)
 
 	/* Ignore extra keys (which are used for IV etc) */
 	subkey_size = crypt_subkey_size(cc);
+#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
+	cc->key_cfg_idx = -1;
+#endif
 
 	if (crypt_integrity_hmac(cc)) {
 		if (subkey_size < cc->key_mac_size)
@@ -1978,10 +2001,19 @@ static int crypt_setkey(struct crypt_config *cc)
 			r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
 					       cc->key + (i * subkey_size),
 					       subkey_size);
-		else
+		else {
 			r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
 						   cc->key + (i * subkey_size),
 						   subkey_size);
+#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
+			if (r > 0) {
+				cc->key_cfg_idx = r;
+				cc->ie_private = cc->cipher_tfm.tfms[i];
+				r = 0;
+			}
+#endif
+		}
+
 		if (r)
 			err = r;
 	}
@@ -2654,6 +2686,10 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
 			cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
 		} else if (!strcasecmp(opt_string, "iv_large_sectors"))
 			set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
+#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
+		else if (!strcasecmp(opt_string, "perform_inline_encrypt"))
+			set_bit(DM_CRYPT_INLINE_ENCRYPT, &cc->flags);
+#endif
 		else {
 			ti->error = "Invalid feature arguments";
 			return -EINVAL;
@@ -2892,6 +2928,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
 	if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
 		return DM_MAPIO_KILL;
 
+#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
+	if (cc->key_cfg_idx > 0) {
+		crypt_inline_encrypt_submit(cc, ti, bio);
+		return DM_MAPIO_SUBMITTED;
+	}
+#endif
+
 	io = dm_per_bio_data(bio, cc->per_bio_data_size);
 	crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
 
@@ -2954,6 +2997,10 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
 		num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
 		num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
 		num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
+#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
+		num_feature_args +=
+			test_bit(DM_CRYPT_INLINE_ENCRYPT, &cc->flags);
+#endif
 		if (cc->on_disk_tag_size)
 			num_feature_args++;
 		if (num_feature_args) {
@@ -2970,6 +3017,10 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
 				DMEMIT(" sector_size:%d", cc->sector_size);
 			if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
 				DMEMIT(" iv_large_sectors");
+#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
+			if (test_bit(DM_CRYPT_INLINE_ENCRYPT, &cc->flags))
+				DMEMIT(" perform_inline_encrypt");
+#endif
 		}
 
 		break;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH] md: dm-crypt: Add Inline Encryption support for dmcrypt
  2018-05-28 13:01 [PATCH] md: dm-crypt: Add Inline Encryption support for dmcrypt Ladvine D Almeida
@ 2018-05-28 16:32 ` Milan Broz
  2018-05-30 14:52   ` Ladvine D Almeida
  0 siblings, 1 reply; 9+ messages in thread
From: Milan Broz @ 2018-05-28 16:32 UTC (permalink / raw)
  To: Ladvine D Almeida, Alasdair Kergon, Mike Snitzer
  Cc: linux-kernel, Manjunath M Bettegowda, Prabu Thangamuthu,
	Tejas Joglekar, device-mapper development

On 05/28/2018 03:01 PM, Ladvine D Almeida wrote:
> 
> This patch adds new option
>     -- perform_inline_encrypt
> that set the option inside dmcrypt to use inline encryption for
> the configured mapping. I want to introduce inline encryption support
> in the UFS Host Controller driver. The usage of the same for accomplishing
> the Full Disk Encryption is done by the following changes in the
> dmcrypt subsystem:
>     - New function crypt_inline_encrypt_submit() is added to the
> dmcrypt which associate the crypto context to the bios which
> are submitted by the subsystem.
>     - Successful configuration for the inline encryption will result in
> the crypto transformation job being bypassed in the dmcrypt layer and the
> actual encryption happens inline to the block device.

I am not sure this is a good idea. Dm-crypt should never ever forward
plaintext sector to underlying device.

And you do not even check that your hw is available in the underlying device!

If you want to use dm-crypt, write a crypto API driver for your crypto hw that defines
specific cipher and let dm-crypt use this cipher (no patches needed in this case!)

If I read the patch correctly, you do not check any parameters for
compatibility with your hw support (cipher, mode, IV algorithm, key length, sector size ...)

It seems like if the "perform_inline_encrypt" option is present, you just submit
the whole bio to your code with new INLINE_ENCRYPTION bit set.

What happens, if the cipher in table is different from what your hw is doing?

Milan

> Another patch set is sent to the block layer community for
> CONFIG_BLK_DEV_INLINE_ENCRYPTION config, which enables changes in the
> block layer for adding the bi_ie_private variable to the bio structure.
> 
> Signed-off-by: Ladvine D Almeida <ladvine@synopsys.com>
> ---
>  drivers/md/dm-crypt.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++++--
>  1 file changed, 53 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
> index 44ff473..a9ed567 100644
> --- a/drivers/md/dm-crypt.c
> +++ b/drivers/md/dm-crypt.c
> @@ -39,6 +39,7 @@
>  #include <linux/device-mapper.h>
>  
>  #define DM_MSG_PREFIX "crypt"
> +#define REQ_INLINE_ENCRYPTION REQ_DRV
>  
>  /*
>   * context holding the current state of a multi-part conversion
> @@ -125,7 +126,8 @@ struct iv_tcw_private {
>   * and encrypts / decrypts at the same time.
>   */
>  enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
> -	     DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
> +	     DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
> +		 DM_CRYPT_INLINE_ENCRYPT };
>  
>  enum cipher_flags {
>  	CRYPT_MODE_INTEGRITY_AEAD,	/* Use authenticated mode for cihper */
> @@ -215,6 +217,10 @@ struct crypt_config {
>  
>  	u8 *authenc_key; /* space for keys in authenc() format (if used) */
>  	u8 key[0];
> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
> +	void *ie_private; /* crypto context for inline enc drivers */
> +	int key_cfg_idx;  /* key configuration index for inline enc */
> +#endif
>  };
>  
>  #define MIN_IOS		64
> @@ -1470,6 +1476,20 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
>  	atomic_set(&io->io_pending, 0);
>  }
>  
> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
> +static void crypt_inline_encrypt_submit(struct crypt_config *cc,
> +	struct dm_target *ti, struct bio *bio)
> +{
> +	bio_set_dev(bio, cc->dev->bdev);
> +	if (bio_sectors(bio))
> +		bio->bi_iter.bi_sector = cc->start +
> +			dm_target_offset(ti, bio->bi_iter.bi_sector);
> +	bio->bi_opf |= REQ_INLINE_ENCRYPTION;
> +	bio->bi_ie_private = cc->ie_private;
> +	generic_make_request(bio);
> +}
> +#endif
> +
>  static void crypt_inc_pending(struct dm_crypt_io *io)
>  {
>  	atomic_inc(&io->io_pending);
> @@ -1960,6 +1980,9 @@ static int crypt_setkey(struct crypt_config *cc)
>  
>  	/* Ignore extra keys (which are used for IV etc) */
>  	subkey_size = crypt_subkey_size(cc);
> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
> +	cc->key_cfg_idx = -1;
> +#endif
>  
>  	if (crypt_integrity_hmac(cc)) {
>  		if (subkey_size < cc->key_mac_size)
> @@ -1978,10 +2001,19 @@ static int crypt_setkey(struct crypt_config *cc)
>  			r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
>  					       cc->key + (i * subkey_size),
>  					       subkey_size);
> -		else
> +		else {
>  			r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
>  						   cc->key + (i * subkey_size),
>  						   subkey_size);
> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
> +			if (r > 0) {
> +				cc->key_cfg_idx = r;
> +				cc->ie_private = cc->cipher_tfm.tfms[i];
> +				r = 0;
> +			}
> +#endif
> +		}
> +
>  		if (r)
>  			err = r;
>  	}
> @@ -2654,6 +2686,10 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
>  			cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
>  		} else if (!strcasecmp(opt_string, "iv_large_sectors"))
>  			set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
> +		else if (!strcasecmp(opt_string, "perform_inline_encrypt"))
> +			set_bit(DM_CRYPT_INLINE_ENCRYPT, &cc->flags);
> +#endif
>  		else {
>  			ti->error = "Invalid feature arguments";
>  			return -EINVAL;
> @@ -2892,6 +2928,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
>  	if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
>  		return DM_MAPIO_KILL;
>  
> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
> +	if (cc->key_cfg_idx > 0) {
> +		crypt_inline_encrypt_submit(cc, ti, bio);
> +		return DM_MAPIO_SUBMITTED;
> +	}
> +#endif
> +
>  	io = dm_per_bio_data(bio, cc->per_bio_data_size);
>  	crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
>  
> @@ -2954,6 +2997,10 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
>  		num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
>  		num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
>  		num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
> +		num_feature_args +=
> +			test_bit(DM_CRYPT_INLINE_ENCRYPT, &cc->flags);
> +#endif
>  		if (cc->on_disk_tag_size)
>  			num_feature_args++;
>  		if (num_feature_args) {
> @@ -2970,6 +3017,10 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
>  				DMEMIT(" sector_size:%d", cc->sector_size);
>  			if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
>  				DMEMIT(" iv_large_sectors");
> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
> +			if (test_bit(DM_CRYPT_INLINE_ENCRYPT, &cc->flags))
> +				DMEMIT(" perform_inline_encrypt");
> +#endif
>  		}
>  
>  		break;
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] md: dm-crypt: Add Inline Encryption support for dmcrypt
  2018-05-28 16:32 ` Milan Broz
@ 2018-05-30 14:52   ` Ladvine D Almeida
  2018-06-01  7:52     ` Milan Broz
                       ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: Ladvine D Almeida @ 2018-05-30 14:52 UTC (permalink / raw)
  To: Milan Broz, Ladvine D Almeida, Alasdair Kergon, Mike Snitzer
  Cc: linux-kernel, Manjunath M Bettegowda, Prabu Thangamuthu,
	Tejas Joglekar, device-mapper development, Joao Pinto

On Monday 28 May 2018 05:33 PM, Milan Broz wrote:
> On 05/28/2018 03:01 PM, Ladvine D Almeida wrote:
>> This patch adds new option
>>     -- perform_inline_encrypt
>> that set the option inside dmcrypt to use inline encryption for
>> the configured mapping. I want to introduce inline encryption support
>> in the UFS Host Controller driver. The usage of the same for accomplishing
>> the Full Disk Encryption is done by the following changes in the
>> dmcrypt subsystem:
>>     - New function crypt_inline_encrypt_submit() is added to the
>> dmcrypt which associate the crypto context to the bios which
>> are submitted by the subsystem.
>>     - Successful configuration for the inline encryption will result in
>> the crypto transformation job being bypassed in the dmcrypt layer and the
>> actual encryption happens inline to the block device.
> I am not sure this is a good idea. Dm-crypt should never ever forward
> plaintext sector to underlying device.
>
> And you do not even check that your hw is available in the underlying device!
>
> If you want to use dm-crypt, write a crypto API driver for your crypto hw that defines
> specific cipher and let dm-crypt use this cipher (no patches needed in this case!)

Thanks for sharing your review.
I am sharing the links for the patches which are related to inline encryption below:
https://lkml.org/lkml/2018/5/28/1153
https://lkml.org/lkml/2018/5/28/1196
https://lkml.org/lkml/2018/5/28/1158
https://lkml.org/lkml/2018/5/28/1173
https://lkml.org/lkml/2018/5/28/1178

we have crypto API implementation for the hardware for  XTS algorithm, which will get registered when
the XTS algorithm capability of the inline encryption engine inside UFS Host Controller get detected by the UFS HC
driver. dm-crypt will be using this registered cipher.
 dm-crypt patch is unavoidable because the encrypt/decrypt function cannot perform the transformation
when inline encryption engine is involved. Also, it demands forwarding the plaintext sectors to the underlying
block device driver and  the crypto transformation happens internally in controller when data transfer happens.

>
> If I read the patch correctly, you do not check any parameters for
> compatibility with your hw support (cipher, mode, IV algorithm, key length, sector size ...)

I am registering an algorithm with cipher mode, IV size, block size, supported key size etc. for use by dm-crypt
as per the hardware capability of inline encryption engine.
If any other cipher mode, etc is used during the setup stage, DM-Crypt will work as normal.

>
> It seems like if the "perform_inline_encrypt" option is present, you just submit
> the whole bio to your code with new INLINE_ENCRYPTION bit set.

when the optional argument "perform_inline_encrypt" is set, we are not unconditionally sending the bio
to the block devices. The steps are explained below:
1. user invokes the dm-setup command with the registered cipher "xts" and with the optional argument
"perform_inline_encrypt".
2. dm-setup invokes the setkey function of the newly introduced algorithm, which finds the available key slots
to be programmed(UFS Host controller Inline Encryption engine has multiple keyslots), program the key slot,
and return the key slot index as return value of the set key function.
3. When read/write operation happens, crypt_map() function in dm-crypt validates whether there is associated
key configuration index for the request. The Bio will be submitted directly in this case only with the associated
crypto context.
4. Block device driver, eg. UFS host controller driver will create the Transfer requests as per this crypto context and
encryption happens inside the controller.

>
> What happens, if the cipher in table is different from what your hw is doing?

In this case, the dm-crypt will work as previous. This is because the setkey returns 0.
whenever there is key configuration index associated, setkey returns index value(greater than 0). The bios are submitted
with that information to underlying block device drivers.
Also, care is taken to ensure that fallback will happen incase hardware lacks the support of any key lengths.

Appreciate your suggestions/feedback. We are trying to bring modifications into the subsystem to support controllers with
inline encryption capabilities and tried our best to take care of any vulnerabilities or risks associated to same.
Inline encryption engines got huge advantage over the accelerators/software algorithms that it removes overhead associated
to current implementation like performing transformation on 512 byte chunks, allocation of scatterlists etc.

>
> Milan
>
>> Another patch set is sent to the block layer community for
>> CONFIG_BLK_DEV_INLINE_ENCRYPTION config, which enables changes in the
>> block layer for adding the bi_ie_private variable to the bio structure.
>>
>> Signed-off-by: Ladvine D Almeida <ladvine@synopsys.com>
>> ---
>>  drivers/md/dm-crypt.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++++--
>>  1 file changed, 53 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
>> index 44ff473..a9ed567 100644
>> --- a/drivers/md/dm-crypt.c
>> +++ b/drivers/md/dm-crypt.c
>> @@ -39,6 +39,7 @@
>>  #include <linux/device-mapper.h>
>>  
>>  #define DM_MSG_PREFIX "crypt"
>> +#define REQ_INLINE_ENCRYPTION REQ_DRV
>>  
>>  /*
>>   * context holding the current state of a multi-part conversion
>> @@ -125,7 +126,8 @@ struct iv_tcw_private {
>>   * and encrypts / decrypts at the same time.
>>   */
>>  enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
>> -	     DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
>> +	     DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
>> +		 DM_CRYPT_INLINE_ENCRYPT };
>>  
>>  enum cipher_flags {
>>  	CRYPT_MODE_INTEGRITY_AEAD,	/* Use authenticated mode for cihper */
>> @@ -215,6 +217,10 @@ struct crypt_config {
>>  
>>  	u8 *authenc_key; /* space for keys in authenc() format (if used) */
>>  	u8 key[0];
>> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
>> +	void *ie_private; /* crypto context for inline enc drivers */
>> +	int key_cfg_idx;  /* key configuration index for inline enc */
>> +#endif
>>  };
>>  
>>  #define MIN_IOS		64
>> @@ -1470,6 +1476,20 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
>>  	atomic_set(&io->io_pending, 0);
>>  }
>>  
>> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
>> +static void crypt_inline_encrypt_submit(struct crypt_config *cc,
>> +	struct dm_target *ti, struct bio *bio)
>> +{
>> +	bio_set_dev(bio, cc->dev->bdev);
>> +	if (bio_sectors(bio))
>> +		bio->bi_iter.bi_sector = cc->start +
>> +			dm_target_offset(ti, bio->bi_iter.bi_sector);
>> +	bio->bi_opf |= REQ_INLINE_ENCRYPTION;
>> +	bio->bi_ie_private = cc->ie_private;
>> +	generic_make_request(bio);
>> +}
>> +#endif
>> +
>>  static void crypt_inc_pending(struct dm_crypt_io *io)
>>  {
>>  	atomic_inc(&io->io_pending);
>> @@ -1960,6 +1980,9 @@ static int crypt_setkey(struct crypt_config *cc)
>>  
>>  	/* Ignore extra keys (which are used for IV etc) */
>>  	subkey_size = crypt_subkey_size(cc);
>> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
>> +	cc->key_cfg_idx = -1;
>> +#endif
>>  
>>  	if (crypt_integrity_hmac(cc)) {
>>  		if (subkey_size < cc->key_mac_size)
>> @@ -1978,10 +2001,19 @@ static int crypt_setkey(struct crypt_config *cc)
>>  			r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
>>  					       cc->key + (i * subkey_size),
>>  					       subkey_size);
>> -		else
>> +		else {
>>  			r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
>>  						   cc->key + (i * subkey_size),
>>  						   subkey_size);
>> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
>> +			if (r > 0) {
>> +				cc->key_cfg_idx = r;
>> +				cc->ie_private = cc->cipher_tfm.tfms[i];
>> +				r = 0;
>> +			}
>> +#endif
>> +		}
>> +
>>  		if (r)
>>  			err = r;
>>  	}
>> @@ -2654,6 +2686,10 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
>>  			cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
>>  		} else if (!strcasecmp(opt_string, "iv_large_sectors"))
>>  			set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
>> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
>> +		else if (!strcasecmp(opt_string, "perform_inline_encrypt"))
>> +			set_bit(DM_CRYPT_INLINE_ENCRYPT, &cc->flags);
>> +#endif
>>  		else {
>>  			ti->error = "Invalid feature arguments";
>>  			return -EINVAL;
>> @@ -2892,6 +2928,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
>>  	if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
>>  		return DM_MAPIO_KILL;
>>  
>> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
>> +	if (cc->key_cfg_idx > 0) {
>> +		crypt_inline_encrypt_submit(cc, ti, bio);
>> +		return DM_MAPIO_SUBMITTED;
>> +	}
>> +#endif
>> +
>>  	io = dm_per_bio_data(bio, cc->per_bio_data_size);
>>  	crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
>>  
>> @@ -2954,6 +2997,10 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
>>  		num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
>>  		num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
>>  		num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
>> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
>> +		num_feature_args +=
>> +			test_bit(DM_CRYPT_INLINE_ENCRYPT, &cc->flags);
>> +#endif
>>  		if (cc->on_disk_tag_size)
>>  			num_feature_args++;
>>  		if (num_feature_args) {
>> @@ -2970,6 +3017,10 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
>>  				DMEMIT(" sector_size:%d", cc->sector_size);
>>  			if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
>>  				DMEMIT(" iv_large_sectors");
>> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
>> +			if (test_bit(DM_CRYPT_INLINE_ENCRYPT, &cc->flags))
>> +				DMEMIT(" perform_inline_encrypt");
>> +#endif
>>  		}
>>  
>>  		break;
>>
>

Best Regards,

Ladvine

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] md: dm-crypt: Add Inline Encryption support for dmcrypt
  2018-05-30 14:52   ` Ladvine D Almeida
@ 2018-06-01  7:52     ` Milan Broz
  2018-06-01  8:16     ` Christoph Hellwig
  2018-06-01 14:46     ` [dm-devel] " Eric Biggers
  2 siblings, 0 replies; 9+ messages in thread
From: Milan Broz @ 2018-06-01  7:52 UTC (permalink / raw)
  To: Ladvine D Almeida, Alasdair Kergon, Mike Snitzer
  Cc: linux-kernel, Manjunath M Bettegowda, Prabu Thangamuthu,
	Tejas Joglekar, device-mapper development, Joao Pinto

On 05/30/2018 04:52 PM, Ladvine D Almeida wrote:
 
> we have crypto API implementation for the hardware for  XTS algorithm, which will get registered when
> the XTS algorithm capability of the inline encryption engine inside UFS Host Controller get detected by the UFS HC
> driver. dm-crypt will be using this registered cipher.
>  dm-crypt patch is unavoidable because the encrypt/decrypt function cannot perform the transformation
> when inline encryption engine is involved. Also, it demands forwarding the plaintext sectors to the underlying
> block device driver and  the crypto transformation happens internally in controller when data transfer happens.

I understand you want to utilize hardware this way, but this is very dangerous abusing of dm-crypt,
that is designed to be software-based FDE (hw acceleration should be on crypto API layer that decides where to encrypt data).

Either you should implement crypto accelerator driver, that will work with dm-crypt through crypto API,
or you can implement own crypto block layer driver or a new device-mapper target (very similar to linear one)
that will just add key reference to bio for inline encryption.

>> If I read the patch correctly, you do not check any parameters for
>> compatibility with your hw support (cipher, mode, IV algorithm, key length, sector size ...)
> 
> I am registering an algorithm with cipher mode, IV size, block size, supported key size etc. for use by dm-crypt
> as per the hardware capability of inline encryption engine.
> If any other cipher mode, etc is used during the setup stage, DM-Crypt will work as normal.
> 
>>
>> It seems like if the "perform_inline_encrypt" option is present, you just submit
>> the whole bio to your code with new INLINE_ENCRYPTION bit set.
> 
> when the optional argument "perform_inline_encrypt" is set, we are not unconditionally sending the bio
> to the block devices. The steps are explained below:
> 1. user invokes the dm-setup command with the registered cipher "xts" and with the optional argument
> "perform_inline_encrypt".
> 2. dm-setup invokes the setkey function of the newly introduced algorithm, which finds the available key slots
> to be programmed(UFS Host controller Inline Encryption engine has multiple keyslots), program the key slot,
> and return the key slot index as return value of the set key function.
> 3. When read/write operation happens, crypt_map() function in dm-crypt validates whether there is associated
> key configuration index for the request. The Bio will be submitted directly in this case only with the associated
> crypto context.
> 4. Block device driver, eg. UFS host controller driver will create the Transfer requests as per this crypto context and
> encryption happens inside the controller.
>>
>> What happens, if the cipher in table is different from what your hw is doing?
> 
> In this case, the dm-crypt will work as previous. This is because the setkey returns 0.
> whenever there is key configuration index associated, setkey returns index value(greater than 0). The bios are submitted
> with that information to underlying block device drivers.
> Also, care is taken to ensure that fallback will happen incase hardware lacks the support of any key lengths.

What I see in the patch is:

You set key through crypto_skcipher_setkey() for crypto API tpm (whatever driver it uses)
seems that you expect that this function returns your key offset in hw and this is the only
check I see there.

According to crypto/skcipher.h, the positive return value is not defined for crypto_skcipher_setkey()
"* Return: 0 if the setting of the key was successful; < 0 if an error occurred"

If it is undocumented extension of crypto API, then ANY driver, that returns positive integer
here causes that dmcrypt will bypass encryption in map function later.


Another question - how do you handle different IV generators (that are currently implemented in dmcrypt inline)?
What if I want to use (for some reason) other IV generator that plain sector number with XTS (aes-xts-essiv:sha256 for example)?

Where do you check that inline encryption will use properly generated IV for sector?
Ditto for dm-crypt sector_size and some other parameters (we can use 4k encryption sector over device
that announces 512 hw sectors).

> Appreciate your suggestions/feedback. We are trying to bring modifications into the subsystem to support controllers with
> inline encryption capabilities and tried our best to take care of any vulnerabilities or risks associated to same.
> Inline encryption engines got huge advantage over the accelerators/software algorithms that it removes overhead associated
> to current implementation like performing transformation on 512 byte chunks, allocation of scatterlists etc.

Well, it is up to Mike if he accepts such extensions, but for me this is not the correct way.

I would suggest you to implement a new device-mapper target for inline encryption (it should be quite straightforward,
basically just key setting extension to linear target?).
All this logic seems to be just a wrapper for some preconfigured hw block device with encryption.

All dm-crypt mappings should be configurable using cryptsetup tool (not only dmsetup),
you solution is hw dependent and is then unsupportable in cryptsetup.

We can support in future some key wrapping schemes fore HSM (like paes in S390) but the interface to dm-crypt must
be hw independent.

Also the extension of bio struct is perhaps not acceptable, this is why there is bio cloning interface and bi_private
(I think Jens already mentioned it in reply for another patch from series).

Milan


>>> Another patch set is sent to the block layer community for
>>> CONFIG_BLK_DEV_INLINE_ENCRYPTION config, which enables changes in the
>>> block layer for adding the bi_ie_private variable to the bio structure.
>>>
>>> Signed-off-by: Ladvine D Almeida <ladvine@synopsys.com>
>>> ---
>>>  drivers/md/dm-crypt.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++++--
>>>  1 file changed, 53 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
>>> index 44ff473..a9ed567 100644
>>> --- a/drivers/md/dm-crypt.c
>>> +++ b/drivers/md/dm-crypt.c
>>> @@ -39,6 +39,7 @@
>>>  #include <linux/device-mapper.h>
>>>  
>>>  #define DM_MSG_PREFIX "crypt"
>>> +#define REQ_INLINE_ENCRYPTION REQ_DRV
>>>  
>>>  /*
>>>   * context holding the current state of a multi-part conversion
>>> @@ -125,7 +126,8 @@ struct iv_tcw_private {
>>>   * and encrypts / decrypts at the same time.
>>>   */
>>>  enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
>>> -	     DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
>>> +	     DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
>>> +		 DM_CRYPT_INLINE_ENCRYPT };
>>>  
>>>  enum cipher_flags {
>>>  	CRYPT_MODE_INTEGRITY_AEAD,	/* Use authenticated mode for cihper */
>>> @@ -215,6 +217,10 @@ struct crypt_config {
>>>  
>>>  	u8 *authenc_key; /* space for keys in authenc() format (if used) */
>>>  	u8 key[0];
>>> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
>>> +	void *ie_private; /* crypto context for inline enc drivers */
>>> +	int key_cfg_idx;  /* key configuration index for inline enc */
>>> +#endif
>>>  };
>>>  
>>>  #define MIN_IOS		64
>>> @@ -1470,6 +1476,20 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
>>>  	atomic_set(&io->io_pending, 0);
>>>  }
>>>  
>>> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
>>> +static void crypt_inline_encrypt_submit(struct crypt_config *cc,
>>> +	struct dm_target *ti, struct bio *bio)
>>> +{
>>> +	bio_set_dev(bio, cc->dev->bdev);
>>> +	if (bio_sectors(bio))
>>> +		bio->bi_iter.bi_sector = cc->start +
>>> +			dm_target_offset(ti, bio->bi_iter.bi_sector);
>>> +	bio->bi_opf |= REQ_INLINE_ENCRYPTION;
>>> +	bio->bi_ie_private = cc->ie_private;
>>> +	generic_make_request(bio);
>>> +}
>>> +#endif
>>> +
>>>  static void crypt_inc_pending(struct dm_crypt_io *io)
>>>  {
>>>  	atomic_inc(&io->io_pending);
>>> @@ -1960,6 +1980,9 @@ static int crypt_setkey(struct crypt_config *cc)
>>>  
>>>  	/* Ignore extra keys (which are used for IV etc) */
>>>  	subkey_size = crypt_subkey_size(cc);
>>> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
>>> +	cc->key_cfg_idx = -1;
>>> +#endif
>>>  
>>>  	if (crypt_integrity_hmac(cc)) {
>>>  		if (subkey_size < cc->key_mac_size)
>>> @@ -1978,10 +2001,19 @@ static int crypt_setkey(struct crypt_config *cc)
>>>  			r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
>>>  					       cc->key + (i * subkey_size),
>>>  					       subkey_size);
>>> -		else
>>> +		else {
>>>  			r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
>>>  						   cc->key + (i * subkey_size),
>>>  						   subkey_size);
>>> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
>>> +			if (r > 0) {
>>> +				cc->key_cfg_idx = r;
>>> +				cc->ie_private = cc->cipher_tfm.tfms[i];
>>> +				r = 0;
>>> +			}
>>> +#endif
>>> +		}
>>> +
>>>  		if (r)
>>>  			err = r;
>>>  	}
>>> @@ -2654,6 +2686,10 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
>>>  			cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
>>>  		} else if (!strcasecmp(opt_string, "iv_large_sectors"))
>>>  			set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
>>> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
>>> +		else if (!strcasecmp(opt_string, "perform_inline_encrypt"))
>>> +			set_bit(DM_CRYPT_INLINE_ENCRYPT, &cc->flags);
>>> +#endif
>>>  		else {
>>>  			ti->error = "Invalid feature arguments";
>>>  			return -EINVAL;
>>> @@ -2892,6 +2928,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
>>>  	if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
>>>  		return DM_MAPIO_KILL;
>>>  
>>> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
>>> +	if (cc->key_cfg_idx > 0) {
>>> +		crypt_inline_encrypt_submit(cc, ti, bio);
>>> +		return DM_MAPIO_SUBMITTED;
>>> +	}
>>> +#endif
>>> +
>>>  	io = dm_per_bio_data(bio, cc->per_bio_data_size);
>>>  	crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
>>>  
>>> @@ -2954,6 +2997,10 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
>>>  		num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
>>>  		num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
>>>  		num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
>>> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
>>> +		num_feature_args +=
>>> +			test_bit(DM_CRYPT_INLINE_ENCRYPT, &cc->flags);
>>> +#endif
>>>  		if (cc->on_disk_tag_size)
>>>  			num_feature_args++;
>>>  		if (num_feature_args) {
>>> @@ -2970,6 +3017,10 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
>>>  				DMEMIT(" sector_size:%d", cc->sector_size);
>>>  			if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
>>>  				DMEMIT(" iv_large_sectors");
>>> +#ifdef CONFIG_BLK_DEV_INLINE_ENCRYPTION
>>> +			if (test_bit(DM_CRYPT_INLINE_ENCRYPT, &cc->flags))
>>> +				DMEMIT(" perform_inline_encrypt");
>>> +#endif
>>>  		}
>>>  
>>>  		break;
>>>
>>
> 
> Best Regards,
> 
> Ladvine
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] md: dm-crypt: Add Inline Encryption support for dmcrypt
  2018-05-30 14:52   ` Ladvine D Almeida
  2018-06-01  7:52     ` Milan Broz
@ 2018-06-01  8:16     ` Christoph Hellwig
  2018-06-06 10:04       ` Ladvine D Almeida
  2018-06-01 14:46     ` [dm-devel] " Eric Biggers
  2 siblings, 1 reply; 9+ messages in thread
From: Christoph Hellwig @ 2018-06-01  8:16 UTC (permalink / raw)
  To: Ladvine D Almeida
  Cc: Milan Broz, Alasdair Kergon, Mike Snitzer, linux-kernel,
	Manjunath M Bettegowda, Prabu Thangamuthu, Tejas Joglekar,
	device-mapper development, Joao Pinto

On Wed, May 30, 2018 at 02:52:07PM +0000, Ladvine D Almeida wrote:
> when the optional argument "perform_inline_encrypt" is set, we are not unconditionally sending the bio
> to the block devices. The steps are explained below:
> 1. user invokes the dm-setup command with the registered cipher "xts" and with the optional argument
> "perform_inline_encrypt".
> 2. dm-setup invokes the setkey function of the newly introduced algorithm, which finds the available key slots
> to be programmed(UFS Host controller Inline Encryption engine has multiple keyslots), program the key slot,
> and return the key slot index as return value of the set key function.
> 3. When read/write operation happens, crypt_map() function in dm-crypt validates whether there is associated
> key configuration index for the request. The Bio will be submitted directly in this case only with the associated
> crypto context.
> 4. Block device driver, eg. UFS host controller driver will create the Transfer requests as per this crypto context and
> encryption happens inside the controller.

Why isn't this all controlled by the ufs drivers, using helpers as
required?

Also why do we even need this API over just implementing TCG
Opal/Opalite on the device?

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dm-devel] [PATCH] md: dm-crypt: Add Inline Encryption support for dmcrypt
  2018-05-30 14:52   ` Ladvine D Almeida
  2018-06-01  7:52     ` Milan Broz
  2018-06-01  8:16     ` Christoph Hellwig
@ 2018-06-01 14:46     ` Eric Biggers
  2018-06-04 13:15       ` Ladvine D Almeida
  2 siblings, 1 reply; 9+ messages in thread
From: Eric Biggers @ 2018-06-01 14:46 UTC (permalink / raw)
  To: Ladvine D Almeida
  Cc: Milan Broz, Alasdair Kergon, Mike Snitzer, Joao Pinto,
	linux-kernel, device-mapper development, Manjunath M Bettegowda,
	Tejas Joglekar, Prabu Thangamuthu

Hi Ladvine,

On Wed, May 30, 2018 at 02:52:07PM +0000, Ladvine D Almeida wrote:
> On Monday 28 May 2018 05:33 PM, Milan Broz wrote:
> > On 05/28/2018 03:01 PM, Ladvine D Almeida wrote:
> >> This patch adds new option
> >>     -- perform_inline_encrypt
> >> that set the option inside dmcrypt to use inline encryption for
> >> the configured mapping. I want to introduce inline encryption support
> >> in the UFS Host Controller driver. The usage of the same for accomplishing
> >> the Full Disk Encryption is done by the following changes in the
> >> dmcrypt subsystem:
> >>     - New function crypt_inline_encrypt_submit() is added to the
> >> dmcrypt which associate the crypto context to the bios which
> >> are submitted by the subsystem.
> >>     - Successful configuration for the inline encryption will result in
> >> the crypto transformation job being bypassed in the dmcrypt layer and the
> >> actual encryption happens inline to the block device.
> > I am not sure this is a good idea. Dm-crypt should never ever forward
> > plaintext sector to underlying device.
> >
> > And you do not even check that your hw is available in the underlying device!
> >
> > If you want to use dm-crypt, write a crypto API driver for your crypto hw that defines
> > specific cipher and let dm-crypt use this cipher (no patches needed in this case!)
> 
> Thanks for sharing your review.
> I am sharing the links for the patches which are related to inline encryption below:
> https://lkml.org/lkml/2018/5/28/1153
> https://lkml.org/lkml/2018/5/28/1196
> https://lkml.org/lkml/2018/5/28/1158
> https://lkml.org/lkml/2018/5/28/1173
> https://lkml.org/lkml/2018/5/28/1178
> 
> we have crypto API implementation for the hardware for  XTS algorithm, which will get registered when
> the XTS algorithm capability of the inline encryption engine inside UFS Host Controller get detected by the UFS HC
> driver. dm-crypt will be using this registered cipher.
>  dm-crypt patch is unavoidable because the encrypt/decrypt function cannot perform the transformation
> when inline encryption engine is involved. Also, it demands forwarding the plaintext sectors to the underlying
> block device driver and  the crypto transformation happens internally in controller when data transfer happens.
> 

I appreciate that you're working on this, but can you please send the full
series to the relevant mailing lists, so that everyone has the needed context?
Single random patches for this are basically impossible to review.  OFC that
also means including a cover letter that explains the overall problem and your
solution.  In the Cc list I also recommend including the ext4 maintainer
Theodore Ts'o <tytso@mit.edu>, who led a discussion at LSFMM 2017 about this
topic (https://lwn.net/Articles/717754/), and the f2fs maintainer Jaegeuk Kim
<jaegeuk@kernel.org> who has had to deal with this stuff in an Android device
kernel, but for another vendor's inline encryption hardware.  I assume your
solution will work for other vendors too?  I think inline encryption is part of
the latest UFS specificaton now, so the latest hardware does it in a
"standardized" way and doesn't require ad-hoc vendor-specific stuff --- right?
And do you have any plans for inline encryption support with fscrypt (ext4/f2fs
encryption, a.k.a. what Android calls "File-Based Encryption")?  We need a
design for inline encryption in the upstream kernel that is sufficiently general
to work for all these vendors and use cases, not just specific ones.

Just at a quick glance, you also seem to be abusing the crypto API by exposing
inline encryption capabilities as a skcipher_alg.  With inline encryption by
definition you can't actually do encryption outside of a block device request;
as a result, your patch has to use a fallback algorithm to do the actual
encryption for the skcipher_alg, which makes no sense.  So it is not at all
clear to me that you should use the crypto API at all, as opposed to having it
be purely a block layer thing.  (Please also Cc linux-crypto if you are doing
significant work related to the crypto API.)

> >
> > If I read the patch correctly, you do not check any parameters for
> > compatibility with your hw support (cipher, mode, IV algorithm, key length, sector size ...)
> 
> I am registering an algorithm with cipher mode, IV size, block size, supported key size etc. for use by dm-crypt
> as per the hardware capability of inline encryption engine.
> If any other cipher mode, etc is used during the setup stage, DM-Crypt will work as normal.
> 
> >
> > It seems like if the "perform_inline_encrypt" option is present, you just submit
> > the whole bio to your code with new INLINE_ENCRYPTION bit set.
> 
> when the optional argument "perform_inline_encrypt" is set, we are not unconditionally sending the bio
> to the block devices. The steps are explained below:
> 1. user invokes the dm-setup command with the registered cipher "xts" and with the optional argument
> "perform_inline_encrypt".
> 2. dm-setup invokes the setkey function of the newly introduced algorithm, which finds the available key slots
> to be programmed(UFS Host controller Inline Encryption engine has multiple keyslots), program the key slot,
> and return the key slot index as return value of the set key function.
> 3. When read/write operation happens, crypt_map() function in dm-crypt validates whether there is associated
> key configuration index for the request. The Bio will be submitted directly in this case only with the associated
> crypto context.
> 4. Block device driver, eg. UFS host controller driver will create the Transfer requests as per this crypto context and
> encryption happens inside the controller.
> 
> >
> > What happens, if the cipher in table is different from what your hw is doing?
> 
> In this case, the dm-crypt will work as previous. This is because the setkey returns 0.
> whenever there is key configuration index associated, setkey returns index value(greater than 0). The bios are submitted
> with that information to underlying block device drivers.
> Also, care is taken to ensure that fallback will happen incase hardware lacks the support of any key lengths.
> 
> Appreciate your suggestions/feedback. We are trying to bring modifications into the subsystem to support controllers with
> inline encryption capabilities and tried our best to take care of any vulnerabilities or risks associated to same.
> Inline encryption engines got huge advantage over the accelerators/software algorithms that it removes overhead associated
> to current implementation like performing transformation on 512 byte chunks, allocation of scatterlists etc.

Thanks,

Eric

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dm-devel] [PATCH] md: dm-crypt: Add Inline Encryption support for dmcrypt
  2018-06-01 14:46     ` [dm-devel] " Eric Biggers
@ 2018-06-04 13:15       ` Ladvine D Almeida
  0 siblings, 0 replies; 9+ messages in thread
From: Ladvine D Almeida @ 2018-06-04 13:15 UTC (permalink / raw)
  To: Eric Biggers, Ladvine D Almeida
  Cc: Milan Broz, Alasdair Kergon, Mike Snitzer, Joao Pinto,
	linux-kernel, device-mapper development, Manjunath M Bettegowda,
	Tejas Joglekar, Prabu Thangamuthu, tytso, jaegeuk, linux-crypto,
	linux-block

On Friday 01 June 2018 03:46 PM, Eric Biggers wrote:
> Hi Ladvine,
>
> On Wed, May 30, 2018 at 02:52:07PM +0000, Ladvine D Almeida wrote:
>> On Monday 28 May 2018 05:33 PM, Milan Broz wrote:
>>> On 05/28/2018 03:01 PM, Ladvine D Almeida wrote:
>>>> This patch adds new option
>>>>     -- perform_inline_encrypt
>>>> that set the option inside dmcrypt to use inline encryption for
>>>> the configured mapping. I want to introduce inline encryption support
>>>> in the UFS Host Controller driver. The usage of the same for accomplishing
>>>> the Full Disk Encryption is done by the following changes in the
>>>> dmcrypt subsystem:
>>>>     - New function crypt_inline_encrypt_submit() is added to the
>>>> dmcrypt which associate the crypto context to the bios which
>>>> are submitted by the subsystem.
>>>>     - Successful configuration for the inline encryption will result in
>>>> the crypto transformation job being bypassed in the dmcrypt layer and the
>>>> actual encryption happens inline to the block device.
>>> I am not sure this is a good idea. Dm-crypt should never ever forward
>>> plaintext sector to underlying device.
>>>
>>> And you do not even check that your hw is available in the underlying device!
>>>
>>> If you want to use dm-crypt, write a crypto API driver for your crypto hw that defines
>>> specific cipher and let dm-crypt use this cipher (no patches needed in this case!)
>> Thanks for sharing your review.
>> I am sharing the links for the patches which are related to inline encryption below:
>> https://urldefense.proofpoint.com/v2/url?u=https-3A__lkml.org_lkml_2018_5_28_1153&d=DwIBAg&c=DPL6_X_6JkXFx7AXWqB0tg&r=z00zRD9ARrwHpe-XSl1OtUp1uNKGYoXI1G2DhOaDDBI&m=qwfogRmPFyDoEzG0-jvbwxKzFR6CEey4rqhZlWnAyGg&s=McNQrXeM_-QGjnBJCyJklJ7cBXfcGHLy-JcMFHMq1z0&e=
>> https://urldefense.proofpoint.com/v2/url?u=https-3A__lkml.org_lkml_2018_5_28_1196&d=DwIBAg&c=DPL6_X_6JkXFx7AXWqB0tg&r=z00zRD9ARrwHpe-XSl1OtUp1uNKGYoXI1G2DhOaDDBI&m=qwfogRmPFyDoEzG0-jvbwxKzFR6CEey4rqhZlWnAyGg&s=t11MogIToXI_6PCllWgf1C16-dSOqNqMIv_T2EK4INw&e=
>> https://urldefense.proofpoint.com/v2/url?u=https-3A__lkml.org_lkml_2018_5_28_1158&d=DwIBAg&c=DPL6_X_6JkXFx7AXWqB0tg&r=z00zRD9ARrwHpe-XSl1OtUp1uNKGYoXI1G2DhOaDDBI&m=qwfogRmPFyDoEzG0-jvbwxKzFR6CEey4rqhZlWnAyGg&s=28i-jGwv-oAAIrSLGhoPppWb8XY1m32JYPsssF-LvdU&e=
>> https://urldefense.proofpoint.com/v2/url?u=https-3A__lkml.org_lkml_2018_5_28_1173&d=DwIBAg&c=DPL6_X_6JkXFx7AXWqB0tg&r=z00zRD9ARrwHpe-XSl1OtUp1uNKGYoXI1G2DhOaDDBI&m=qwfogRmPFyDoEzG0-jvbwxKzFR6CEey4rqhZlWnAyGg&s=1KRdmOB67zrJQDYGEAUBvdxBtYI6cPrueQ1PH6Io8g0&e=
>> https://urldefense.proofpoint.com/v2/url?u=https-3A__lkml.org_lkml_2018_5_28_1178&d=DwIBAg&c=DPL6_X_6JkXFx7AXWqB0tg&r=z00zRD9ARrwHpe-XSl1OtUp1uNKGYoXI1G2DhOaDDBI&m=qwfogRmPFyDoEzG0-jvbwxKzFR6CEey4rqhZlWnAyGg&s=l4PRVW9Oo0QObWye_98AuxGSS3hLhdN5uMKCW-bJZkE&e=
>>
>> we have crypto API implementation for the hardware for  XTS algorithm, which will get registered when
>> the XTS algorithm capability of the inline encryption engine inside UFS Host Controller get detected by the UFS HC
>> driver. dm-crypt will be using this registered cipher.
>>  dm-crypt patch is unavoidable because the encrypt/decrypt function cannot perform the transformation
>> when inline encryption engine is involved. Also, it demands forwarding the plaintext sectors to the underlying
>> block device driver and  the crypto transformation happens internally in controller when data transfer happens.
>>
> I appreciate that you're working on this, but can you please send the full
> series to the relevant mailing lists, so that everyone has the needed context?
> Single random patches for this are basically impossible to review.  OFC that
> also means including a cover letter that explains the overall problem and your
> solution.  In the Cc list I also recommend including the ext4 maintainer
> Theodore Ts'o <tytso@mit.edu>, who led a discussion at LSFMM 2017 about this
> topic (https://urldefense.proofpoint.com/v2/url?u=https-3A__lwn.net_Articles_717754_&d=DwIBAg&c=DPL6_X_6JkXFx7AXWqB0tg&r=z00zRD9ARrwHpe-XSl1OtUp1uNKGYoXI1G2DhOaDDBI&m=qwfogRmPFyDoEzG0-jvbwxKzFR6CEey4rqhZlWnAyGg&s=x52JiuXr2DWZDm9hObhTwe-He-2waTLXmz9E1Tmz7Y0&e=), and the f2fs maintainer Jaegeuk Kim
> <jaegeuk@kernel.org> who has had to deal with this stuff in an Android device
> kernel, but for another vendor's inline encryption hardware.

Eric, Thanks for your suggestions. I will add the relevant persons in the cc list.

The patches were generated in the below manner:
For block level changes:
https://patchwork.kernel.org/patch/10432255/

For dmcrypt changes for Full Disk Encryption support:
https://lkml.org/lkml/2018/5/28/1027

UFS Host Controller driver changes, patch-series for Inline Encryption support:
https://lkml.org/lkml/2018/5/28/1187 -- cover letter
https://lkml.org/lkml/2018/5/28/1153
https://lkml.org/lkml/2018/5/28/1196
https://lkml.org/lkml/2018/5/28/1158
https://lkml.org/lkml/2018/5/28/1173
https://lkml.org/lkml/2018/5/28/1178


>   I assume your
> solution will work for other vendors too?  I think inline encryption is part of
> the latest UFS specificaton now, so the latest hardware does it in a
> "standardized" way and doesn't require ad-hoc vendor-specific stuff --- right?

Yes, correct. Implementation is done referring to Universal Flash Storage Host Controller Interface(UFSHCI)
document Version 3.0. ** There is no vendor-specific implementation**.
> And do you have any plans for inline encryption support with fscrypt (ext4/f2fs
> encryption, a.k.a. what Android calls "File-Based Encryption")?  We need a
> design for inline encryption in the upstream kernel that is sufficiently general
> to work for all these vendors and use cases, not just specific ones.

Support for the File Based Encryption requires modifications in the fs/crypto in the
similar line as modifications in dmcrypt. But, one challenging thing could be algorithm
implementation is agnostic about the block device with which it is used.
Registered algorithm can be used by filesystem with block device that doesn't have inline encryption
capability. How to identify the same and do a fallback in this case?

>
> Just at a quick glance, you also seem to be abusing the crypto API by exposing
> inline encryption capabilities as a skcipher_alg.  With inline encryption by
> definition you can't actually do encryption outside of a block device request;
> as a result, your patch has to use a fallback algorithm to do the actual
> encryption for the skcipher_alg, which makes no sense.  So it is not at all
> clear to me that you should use the crypto API at all, as opposed to having it
> be purely a block layer thing.  (Please also Cc linux-crypto if you are doing
> significant work related to the crypto API.)

The idea behind registering this way is to support fallback incase the hardware
misses the capability. eg:- when the inline encryption engine cannot perform
transformation using 384bits key for XTS algorithm, it could be accomplished now
using a fallback.
The normal flow for the inline encryption will not make use of the encrypt/decrypt
functions:
    - It use setkey function to identify the key configuration index(when the hardware
        support multiple key slots) and to program the key.
    - the dm-crypt layer bypasses calling the encrypt/decrypt function and passes reference
       to the crypto information in each bio that is submitted by it.
The intention behind implementation as crypto API is to stick to LKCF so that existing utilities
can be used.


>
>>> If I read the patch correctly, you do not check any parameters for
>>> compatibility with your hw support (cipher, mode, IV algorithm, key length, sector size ...)
>> I am registering an algorithm with cipher mode, IV size, block size, supported key size etc. for use by dm-crypt
>> as per the hardware capability of inline encryption engine.
>> If any other cipher mode, etc is used during the setup stage, DM-Crypt will work as normal.
>>
>>> It seems like if the "perform_inline_encrypt" option is present, you just submit
>>> the whole bio to your code with new INLINE_ENCRYPTION bit set.
>> when the optional argument "perform_inline_encrypt" is set, we are not unconditionally sending the bio
>> to the block devices. The steps are explained below:
>> 1. user invokes the dm-setup command with the registered cipher "xts" and with the optional argument
>> "perform_inline_encrypt".
>> 2. dm-setup invokes the setkey function of the newly introduced algorithm, which finds the available key slots
>> to be programmed(UFS Host controller Inline Encryption engine has multiple keyslots), program the key slot,
>> and return the key slot index as return value of the set key function.
>> 3. When read/write operation happens, crypt_map() function in dm-crypt validates whether there is associated
>> key configuration index for the request. The Bio will be submitted directly in this case only with the associated
>> crypto context.
>> 4. Block device driver, eg. UFS host controller driver will create the Transfer requests as per this crypto context and
>> encryption happens inside the controller.
>>
>>> What happens, if the cipher in table is different from what your hw is doing?
>> In this case, the dm-crypt will work as previous. This is because the setkey returns 0.
>> whenever there is key configuration index associated, setkey returns index value(greater than 0). The bios are submitted
>> with that information to underlying block device drivers.
>> Also, care is taken to ensure that fallback will happen incase hardware lacks the support of any key lengths.
>>
>> Appreciate your suggestions/feedback. We are trying to bring modifications into the subsystem to support controllers with
>> inline encryption capabilities and tried our best to take care of any vulnerabilities or risks associated to same.
>> Inline encryption engines got huge advantage over the accelerators/software algorithms that it removes overhead associated
>> to current implementation like performing transformation on 512 byte chunks, allocation of scatterlists etc.
> Thanks,
>
> Eric
>

Best Regards,

Ladvine

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] md: dm-crypt: Add Inline Encryption support for dmcrypt
  2018-06-01  8:16     ` Christoph Hellwig
@ 2018-06-06 10:04       ` Ladvine D Almeida
  2018-06-06 13:15         ` Christoph Hellwig
  0 siblings, 1 reply; 9+ messages in thread
From: Ladvine D Almeida @ 2018-06-06 10:04 UTC (permalink / raw)
  To: Christoph Hellwig, Ladvine D Almeida
  Cc: Milan Broz, Alasdair Kergon, Mike Snitzer, linux-kernel,
	Manjunath M Bettegowda, Prabu Thangamuthu, Tejas Joglekar,
	device-mapper development, Joao Pinto, tytso, jaegeuk,
	linux-crypto, linux-block

On Friday 01 June 2018 09:16 AM, Christoph Hellwig wrote:
> On Wed, May 30, 2018 at 02:52:07PM +0000, Ladvine D Almeida wrote:
>> when the optional argument "perform_inline_encrypt" is set, we are not unconditionally sending the bio
>> to the block devices. The steps are explained below:
>> 1. user invokes the dm-setup command with the registered cipher "xts" and with the optional argument
>> "perform_inline_encrypt".
>> 2. dm-setup invokes the setkey function of the newly introduced algorithm, which finds the available key slots
>> to be programmed(UFS Host controller Inline Encryption engine has multiple keyslots), program the key slot,
>> and return the key slot index as return value of the set key function.
>> 3. When read/write operation happens, crypt_map() function in dm-crypt validates whether there is associated
>> key configuration index for the request. The Bio will be submitted directly in this case only with the associated
>> crypto context.
>> 4. Block device driver, eg. UFS host controller driver will create the Transfer requests as per this crypto context and
>> encryption happens inside the controller.
> Why isn't this all controlled by the ufs drivers, using helpers as
> required?

The idea is to make use of the existing utilities like dmsetup to configure the keys, mapping etc.

>
> Also why do we even need this API over just implementing TCG
> Opal/Opalite on the device?
>
TCG Opal/Opalite is FDE solution. right?

File Based Encryption is accomplished in the ext4/f2fs layer by invoking the registered algorithms from LKCF.

There is a scope for FBE, if the implementation is crypto API.


Regards,

Ladvine


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH] md: dm-crypt: Add Inline Encryption support for dmcrypt
  2018-06-06 10:04       ` Ladvine D Almeida
@ 2018-06-06 13:15         ` Christoph Hellwig
  0 siblings, 0 replies; 9+ messages in thread
From: Christoph Hellwig @ 2018-06-06 13:15 UTC (permalink / raw)
  To: Ladvine D Almeida
  Cc: Christoph Hellwig, Milan Broz, Alasdair Kergon, Mike Snitzer,
	linux-kernel, Manjunath M Bettegowda, Prabu Thangamuthu,
	Tejas Joglekar, device-mapper development, Joao Pinto, tytso,
	jaegeuk, linux-crypto, linux-block

On Wed, Jun 06, 2018 at 10:04:14AM +0000, Ladvine D Almeida wrote:
> The idea is to make use of the existing utilities like dmsetup to configure the keys, mapping etc.

So fix the utilities to support your encryption instead of implementing
kernel-level shim code.

> TCG Opal/Opalite is FDE solution. right?

Doesn't have to the full disk, although that is the typical use case.

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2018-06-06 13:15 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-05-28 13:01 [PATCH] md: dm-crypt: Add Inline Encryption support for dmcrypt Ladvine D Almeida
2018-05-28 16:32 ` Milan Broz
2018-05-30 14:52   ` Ladvine D Almeida
2018-06-01  7:52     ` Milan Broz
2018-06-01  8:16     ` Christoph Hellwig
2018-06-06 10:04       ` Ladvine D Almeida
2018-06-06 13:15         ` Christoph Hellwig
2018-06-01 14:46     ` [dm-devel] " Eric Biggers
2018-06-04 13:15       ` Ladvine D Almeida

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).