All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next] net/tls: Calculate nsg for zerocopy path without skb_cow_data.
@ 2018-08-03  0:30 Doron Roberts-Kedes
  2018-08-03  1:23 ` Vakul Garg
  0 siblings, 1 reply; 5+ messages in thread
From: Doron Roberts-Kedes @ 2018-08-03  0:30 UTC (permalink / raw)
  To: David S . Miller
  Cc: Dave Watson, Vakul Garg, Boris Pismenny, Aviad Yehezkel, netdev,
	Doron Roberts-Kedes

decrypt_skb fails if the number of sg elements required to map is
greater than MAX_SKB_FRAGS. As noted by Vakul Garg, nsg must always be
calculated, but skb_cow_data adds unnecessary memcpy's for the zerocopy
case.

The new function skb_nsg calculates the number of scatterlist elements
required to map the skb without the extra overhead of skb_cow_data. This
function mimics the structure of skb_to_sgvec.

Fixes: c46234ebb4d1 ("tls: RX path for ktls")
Signed-off-by: Doron Roberts-Kedes <doronrk@fb.com>
---
 net/tls/tls_sw.c | 89 ++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 86 insertions(+), 3 deletions(-)

diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index ff3a6904a722..c62793601cfc 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -43,6 +43,76 @@
 
 #define MAX_IV_SIZE	TLS_CIPHER_AES_GCM_128_IV_SIZE
 
+static int __skb_nsg(struct sk_buff *skb, int offset, int len,
+		   unsigned int recursion_level)
+{
+        int start = skb_headlen(skb);
+        int i, copy = start - offset;
+        struct sk_buff *frag_iter;
+        int elt = 0;
+
+        if (unlikely(recursion_level >= 24))
+                return -EMSGSIZE;
+
+        if (copy > 0) {
+                if (copy > len)
+                        copy = len;
+                elt++;
+                if ((len -= copy) == 0)
+                        return elt;
+                offset += copy;
+        }
+
+        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+                int end;
+
+                WARN_ON(start > offset + len);
+
+                end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
+                if ((copy = end - offset) > 0) {
+                        if (copy > len)
+                                copy = len;
+                        elt++;
+                        if (!(len -= copy))
+                                return elt;
+                        offset += copy;
+                }
+                start = end;
+        }
+
+        skb_walk_frags(skb, frag_iter) {
+                int end, ret;
+
+                WARN_ON(start > offset + len);
+
+                end = start + frag_iter->len;
+                if ((copy = end - offset) > 0) {
+
+                        if (copy > len)
+                                copy = len;
+                        ret = __skb_nsg(frag_iter, offset - start, copy,
+					recursion_level + 1);
+                        if (unlikely(ret < 0))
+                                return ret;
+                        elt += ret;
+                        if ((len -= copy) == 0)
+                                return elt;
+                        offset += copy;
+                }
+                start = end;
+        }
+        BUG_ON(len);
+        return elt;
+}
+
+/* Return the number of scatterlist elements required to completely map the
+ * skb, or -EMSGSIZE if the recursion depth is exceeded.
+ */
+static int skb_nsg(struct sk_buff *skb, int offset, int len)
+{
+	return __skb_nsg(skb, offset, len, 0);
+}
+
 static int tls_do_decryption(struct sock *sk,
 			     struct scatterlist *sgin,
 			     struct scatterlist *sgout,
@@ -693,7 +763,7 @@ int decrypt_skb(struct sock *sk, struct sk_buff *skb,
 	struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2];
 	struct scatterlist *sgin = &sgin_arr[0];
 	struct strp_msg *rxm = strp_msg(skb);
-	int ret, nsg = ARRAY_SIZE(sgin_arr);
+	int ret, nsg;
 	struct sk_buff *unused;
 
 	ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
@@ -704,10 +774,23 @@ int decrypt_skb(struct sock *sk, struct sk_buff *skb,
 
 	memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
 	if (!sgout) {
-		nsg = skb_cow_data(skb, 0, &unused) + 1;
+		nsg = skb_cow_data(skb, 0, &unused);
+	} else {
+		nsg = skb_nsg(skb,
+			      rxm->offset + tls_ctx->rx.prepend_size,
+			      rxm->full_len - tls_ctx->rx.prepend_size);
+		if (nsg <= 0)
+			return nsg;
+	}
+
+	// We need one extra for ctx->rx_aad_ciphertext
+	nsg++;
+
+	if (nsg > ARRAY_SIZE(sgin_arr))
 		sgin = kmalloc_array(nsg, sizeof(*sgin), sk->sk_allocation);
+
+	if (!sgout)
 		sgout = sgin;
-	}
 
 	sg_init_table(sgin, nsg);
 	sg_set_buf(&sgin[0], ctx->rx_aad_ciphertext, TLS_AAD_SPACE_SIZE);
-- 
2.17.1

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* RE: [PATCH net-next] net/tls: Calculate nsg for zerocopy path without skb_cow_data.
  2018-08-03  0:30 [PATCH net-next] net/tls: Calculate nsg for zerocopy path without skb_cow_data Doron Roberts-Kedes
@ 2018-08-03  1:23 ` Vakul Garg
  2018-08-03 18:49   ` Doron Roberts-Kedes
  0 siblings, 1 reply; 5+ messages in thread
From: Vakul Garg @ 2018-08-03  1:23 UTC (permalink / raw)
  To: Doron Roberts-Kedes, David S . Miller
  Cc: Dave Watson, Boris Pismenny, Aviad Yehezkel, netdev



> -----Original Message-----
> From: Doron Roberts-Kedes [mailto:doronrk@fb.com]
> Sent: Friday, August 3, 2018 6:00 AM
> To: David S . Miller <davem@davemloft.net>
> Cc: Dave Watson <davejwatson@fb.com>; Vakul Garg
> <vakul.garg@nxp.com>; Boris Pismenny <borisp@mellanox.com>; Aviad
> Yehezkel <aviadye@mellanox.com>; netdev@vger.kernel.org; Doron
> Roberts-Kedes <doronrk@fb.com>
> Subject: [PATCH net-next] net/tls: Calculate nsg for zerocopy path without
> skb_cow_data.
> 
> decrypt_skb fails if the number of sg elements required to map is greater
> than MAX_SKB_FRAGS. As noted by Vakul Garg, nsg must always be
> calculated, but skb_cow_data adds unnecessary memcpy's for the zerocopy
> case.
> 
> The new function skb_nsg calculates the number of scatterlist elements
> required to map the skb without the extra overhead of skb_cow_data. This
> function mimics the structure of skb_to_sgvec.
> 
> Fixes: c46234ebb4d1 ("tls: RX path for ktls")
> Signed-off-by: Doron Roberts-Kedes <doronrk@fb.com>
> ---
>  net/tls/tls_sw.c | 89
> ++++++++++++++++++++++++++++++++++++++++++++++--
>  1 file changed, 86 insertions(+), 3 deletions(-)
> 
> diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index
> ff3a6904a722..c62793601cfc 100644
> --- a/net/tls/tls_sw.c
> +++ b/net/tls/tls_sw.c
> @@ -43,6 +43,76 @@
> 
>  #define MAX_IV_SIZE	TLS_CIPHER_AES_GCM_128_IV_SIZE
> 
> +static int __skb_nsg(struct sk_buff *skb, int offset, int len,
> +		   unsigned int recursion_level)
> +{
> +        int start = skb_headlen(skb);
> +        int i, copy = start - offset;
> +        struct sk_buff *frag_iter;
> +        int elt = 0;
> +
> +        if (unlikely(recursion_level >= 24))
> +                return -EMSGSIZE;
> +
> +        if (copy > 0) {
> +                if (copy > len)
> +                        copy = len;
> +                elt++;
> +                if ((len -= copy) == 0)
> +                        return elt;
> +                offset += copy;
> +        }
> +
> +        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
> +                int end;
> +
> +                WARN_ON(start > offset + len);
> +
> +                end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
> +                if ((copy = end - offset) > 0) {
> +                        if (copy > len)
> +                                copy = len;
> +                        elt++;
> +                        if (!(len -= copy))
> +                                return elt;
> +                        offset += copy;
> +                }
> +                start = end;
> +        }
> +
> +        skb_walk_frags(skb, frag_iter) {
> +                int end, ret;
> +
> +                WARN_ON(start > offset + len);
> +
> +                end = start + frag_iter->len;
> +                if ((copy = end - offset) > 0) {
> +
> +                        if (copy > len)
> +                                copy = len;
> +                        ret = __skb_nsg(frag_iter, offset - start, copy,
> +					recursion_level + 1);
> +                        if (unlikely(ret < 0))
> +                                return ret;
> +                        elt += ret;
> +                        if ((len -= copy) == 0)
> +                                return elt;
> +                        offset += copy;
> +                }
> +                start = end;
> +        }
> +        BUG_ON(len);
> +        return elt;
> +}
> +
> +/* Return the number of scatterlist elements required to completely map
> +the
> + * skb, or -EMSGSIZE if the recursion depth is exceeded.
> + */
> +static int skb_nsg(struct sk_buff *skb, int offset, int len) {
> +	return __skb_nsg(skb, offset, len, 0); }
> +

These is generic function and useful elsewhere too.
Should the above two functions be exported by skbuff.c?

>  static int tls_do_decryption(struct sock *sk,
>  			     struct scatterlist *sgin,
>  			     struct scatterlist *sgout,
> @@ -693,7 +763,7 @@ int decrypt_skb(struct sock *sk, struct sk_buff *skb,
>  	struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2];
>  	struct scatterlist *sgin = &sgin_arr[0];
>  	struct strp_msg *rxm = strp_msg(skb);
> -	int ret, nsg = ARRAY_SIZE(sgin_arr);
> +	int ret, nsg;
>  	struct sk_buff *unused;
> 
>  	ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE, @@ -
> 704,10 +774,23 @@ int decrypt_skb(struct sock *sk, struct sk_buff *skb,
> 
>  	memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
>  	if (!sgout) {
> -		nsg = skb_cow_data(skb, 0, &unused) + 1;
> +		nsg = skb_cow_data(skb, 0, &unused);
> +	} else {
> +		nsg = skb_nsg(skb,
> +			      rxm->offset + tls_ctx->rx.prepend_size,
> +			      rxm->full_len - tls_ctx->rx.prepend_size);
> +		if (nsg <= 0)
> +			return nsg;
Comparison should be (nsg < 1). TLS forbids '0' sized records.

> +	}
> +
> +	// We need one extra for ctx->rx_aad_ciphertext
> +	nsg++;
> +
> +	if (nsg > ARRAY_SIZE(sgin_arr))
>  		sgin = kmalloc_array(nsg, sizeof(*sgin), sk->sk_allocation);

Add check for kmalloc_array returnining NULL.

> +
> +	if (!sgout)
>  		sgout = sgin;
> -	}
> 
>  	sg_init_table(sgin, nsg);
>  	sg_set_buf(&sgin[0], ctx->rx_aad_ciphertext, TLS_AAD_SPACE_SIZE);
> --
> 2.17.1

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH net-next] net/tls: Calculate nsg for zerocopy path without skb_cow_data.
  2018-08-03  1:23 ` Vakul Garg
@ 2018-08-03 18:49   ` Doron Roberts-Kedes
  2018-08-06 18:32     ` Doron Roberts-Kedes
  0 siblings, 1 reply; 5+ messages in thread
From: Doron Roberts-Kedes @ 2018-08-03 18:49 UTC (permalink / raw)
  To: Vakul Garg
  Cc: David S . Miller, Dave Watson, Boris Pismenny, Aviad Yehezkel, netdev

On Fri, Aug 03, 2018 at 01:23:33AM +0000, Vakul Garg wrote:
> 
> 
> > -----Original Message-----
> > From: Doron Roberts-Kedes [mailto:doronrk@fb.com]
> > Sent: Friday, August 3, 2018 6:00 AM
> > To: David S . Miller <davem@davemloft.net>
> > Cc: Dave Watson <davejwatson@fb.com>; Vakul Garg
> > <vakul.garg@nxp.com>; Boris Pismenny <borisp@mellanox.com>; Aviad
> > Yehezkel <aviadye@mellanox.com>; netdev@vger.kernel.org; Doron
> > Roberts-Kedes <doronrk@fb.com>
> > Subject: [PATCH net-next] net/tls: Calculate nsg for zerocopy path without
> > skb_cow_data.
> > 
> > decrypt_skb fails if the number of sg elements required to map is greater
> > than MAX_SKB_FRAGS. As noted by Vakul Garg, nsg must always be
> > calculated, but skb_cow_data adds unnecessary memcpy's for the zerocopy
> > case.
> > 
> > The new function skb_nsg calculates the number of scatterlist elements
> > required to map the skb without the extra overhead of skb_cow_data. This
> > function mimics the structure of skb_to_sgvec.
> > 
> > Fixes: c46234ebb4d1 ("tls: RX path for ktls")
> > Signed-off-by: Doron Roberts-Kedes <doronrk@fb.com>
> > ---
> >  net/tls/tls_sw.c | 89
> > ++++++++++++++++++++++++++++++++++++++++++++++--
> >  1 file changed, 86 insertions(+), 3 deletions(-)
> > 
> > diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index
> > ff3a6904a722..c62793601cfc 100644
> > --- a/net/tls/tls_sw.c
> > +++ b/net/tls/tls_sw.c
> > @@ -43,6 +43,76 @@
> > 
> >  #define MAX_IV_SIZE	TLS_CIPHER_AES_GCM_128_IV_SIZE
> > 
> > +static int __skb_nsg(struct sk_buff *skb, int offset, int len,
> > +		   unsigned int recursion_level)
> > +{
> > +        int start = skb_headlen(skb);
> > +        int i, copy = start - offset;
> > +        struct sk_buff *frag_iter;
> > +        int elt = 0;
> > +
> > +        if (unlikely(recursion_level >= 24))
> > +                return -EMSGSIZE;
> > +
> > +        if (copy > 0) {
> > +                if (copy > len)
> > +                        copy = len;
> > +                elt++;
> > +                if ((len -= copy) == 0)
> > +                        return elt;
> > +                offset += copy;
> > +        }
> > +
> > +        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
> > +                int end;
> > +
> > +                WARN_ON(start > offset + len);
> > +
> > +                end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
> > +                if ((copy = end - offset) > 0) {
> > +                        if (copy > len)
> > +                                copy = len;
> > +                        elt++;
> > +                        if (!(len -= copy))
> > +                                return elt;
> > +                        offset += copy;
> > +                }
> > +                start = end;
> > +        }
> > +
> > +        skb_walk_frags(skb, frag_iter) {
> > +                int end, ret;
> > +
> > +                WARN_ON(start > offset + len);
> > +
> > +                end = start + frag_iter->len;
> > +                if ((copy = end - offset) > 0) {
> > +
> > +                        if (copy > len)
> > +                                copy = len;
> > +                        ret = __skb_nsg(frag_iter, offset - start, copy,
> > +					recursion_level + 1);
> > +                        if (unlikely(ret < 0))
> > +                                return ret;
> > +                        elt += ret;
> > +                        if ((len -= copy) == 0)
> > +                                return elt;
> > +                        offset += copy;
> > +                }
> > +                start = end;
> > +        }
> > +        BUG_ON(len);
> > +        return elt;
> > +}
> > +
> > +/* Return the number of scatterlist elements required to completely map
> > +the
> > + * skb, or -EMSGSIZE if the recursion depth is exceeded.
> > + */
> > +static int skb_nsg(struct sk_buff *skb, int offset, int len) {
> > +	return __skb_nsg(skb, offset, len, 0); }
> > +
> 
> These is generic function and useful elsewhere too.
> Should the above two functions be exported by skbuff.c?

True. Perhaps it can move into skbuff.c if/when there is a second
use case for it.

> 
> >  static int tls_do_decryption(struct sock *sk,
> >  			     struct scatterlist *sgin,
> >  			     struct scatterlist *sgout,
> > @@ -693,7 +763,7 @@ int decrypt_skb(struct sock *sk, struct sk_buff *skb,
> >  	struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2];
> >  	struct scatterlist *sgin = &sgin_arr[0];
> >  	struct strp_msg *rxm = strp_msg(skb);
> > -	int ret, nsg = ARRAY_SIZE(sgin_arr);
> > +	int ret, nsg;
> >  	struct sk_buff *unused;
> > 
> >  	ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE, @@ -
> > 704,10 +774,23 @@ int decrypt_skb(struct sock *sk, struct sk_buff *skb,
> > 
> >  	memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
> >  	if (!sgout) {
> > -		nsg = skb_cow_data(skb, 0, &unused) + 1;
> > +		nsg = skb_cow_data(skb, 0, &unused);
> > +	} else {
> > +		nsg = skb_nsg(skb,
> > +			      rxm->offset + tls_ctx->rx.prepend_size,
> > +			      rxm->full_len - tls_ctx->rx.prepend_size);
> > +		if (nsg <= 0)
> > +			return nsg;
> Comparison should be (nsg < 1). TLS forbids '0' sized records.

Yes true, v2 incoming

> 
> > +	}
> > +
> > +	// We need one extra for ctx->rx_aad_ciphertext
> > +	nsg++;
> > +
> > +	if (nsg > ARRAY_SIZE(sgin_arr))
> >  		sgin = kmalloc_array(nsg, sizeof(*sgin), sk->sk_allocation);
> 
> Add check for kmalloc_array returnining NULL.

Yes true, v2 incoming.

> 
> > +
> > +	if (!sgout)
> >  		sgout = sgin;
> > -	}
> > 
> >  	sg_init_table(sgin, nsg);
> >  	sg_set_buf(&sgin[0], ctx->rx_aad_ciphertext, TLS_AAD_SPACE_SIZE);
> > --
> > 2.17.1
> 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH net-next] net/tls: Calculate nsg for zerocopy path without skb_cow_data.
  2018-08-03 18:49   ` Doron Roberts-Kedes
@ 2018-08-06 18:32     ` Doron Roberts-Kedes
  2018-08-07  4:41       ` Vakul Garg
  0 siblings, 1 reply; 5+ messages in thread
From: Doron Roberts-Kedes @ 2018-08-06 18:32 UTC (permalink / raw)
  To: Vakul Garg
  Cc: David S . Miller, Dave Watson, Boris Pismenny, Aviad Yehezkel, netdev

On Fri, Aug 03, 2018 at 11:49:02AM -0700, Doron Roberts-Kedes wrote:
> On Fri, Aug 03, 2018 at 01:23:33AM +0000, Vakul Garg wrote:
> > 
> > 
> > > -----Original Message-----
> > > From: Doron Roberts-Kedes [mailto:doronrk@fb.com]
> > > Sent: Friday, August 3, 2018 6:00 AM
> > > To: David S . Miller <davem@davemloft.net>
> > > Cc: Dave Watson <davejwatson@fb.com>; Vakul Garg
> > > <vakul.garg@nxp.com>; Boris Pismenny <borisp@mellanox.com>; Aviad
> > > Yehezkel <aviadye@mellanox.com>; netdev@vger.kernel.org; Doron
> > > Roberts-Kedes <doronrk@fb.com>
> > > Subject: [PATCH net-next] net/tls: Calculate nsg for zerocopy path without
> > > skb_cow_data.
> > > 
> > > decrypt_skb fails if the number of sg elements required to map is greater
> > > than MAX_SKB_FRAGS. As noted by Vakul Garg, nsg must always be
> > > calculated, but skb_cow_data adds unnecessary memcpy's for the zerocopy
> > > case.
> > > 
> > > The new function skb_nsg calculates the number of scatterlist elements
> > > required to map the skb without the extra overhead of skb_cow_data. This
> > > function mimics the structure of skb_to_sgvec.
> > > 
> > > Fixes: c46234ebb4d1 ("tls: RX path for ktls")
> > > Signed-off-by: Doron Roberts-Kedes <doronrk@fb.com>
> > > ---
> > >  net/tls/tls_sw.c | 89
> > > ++++++++++++++++++++++++++++++++++++++++++++++--
> > >  1 file changed, 86 insertions(+), 3 deletions(-)
> > > 
> > >  	memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
> > >  	if (!sgout) {
> > > -		nsg = skb_cow_data(skb, 0, &unused) + 1;
> > > +		nsg = skb_cow_data(skb, 0, &unused);
> > > +	} else {
> > > +		nsg = skb_nsg(skb,
> > > +			      rxm->offset + tls_ctx->rx.prepend_size,
> > > +			      rxm->full_len - tls_ctx->rx.prepend_size);
> > > +		if (nsg <= 0)
> > > +			return nsg;
> > Comparison should be (nsg < 1). TLS forbids '0' sized records.
> 
> Yes true, v2 incoming
>

Glancing at this a second time, I actually don't believe this should be
changed. nsg <= 0 is equivalent to nsg < 1. Returning 0 if the record is
0 sized is the proper behavior here, since decrypting a zero-length skb
is a no-op. It is true that zero sized TLS records are forbidden, but it
is confusing to enforce that in this part of the code. I would be
surpised to learn that tls_sw_recvmsg could be invoked with a len equal
to 0, but if I'm wrong, and that case does need to be handled, then it
should be in a different patch. 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH net-next] net/tls: Calculate nsg for zerocopy path without skb_cow_data.
  2018-08-06 18:32     ` Doron Roberts-Kedes
@ 2018-08-07  4:41       ` Vakul Garg
  0 siblings, 0 replies; 5+ messages in thread
From: Vakul Garg @ 2018-08-07  4:41 UTC (permalink / raw)
  To: Doron Roberts-Kedes
  Cc: David S . Miller, Dave Watson, Boris Pismenny, Aviad Yehezkel, netdev



> -----Original Message-----
> From: Doron Roberts-Kedes [mailto:doronrk@fb.com]
> Sent: Tuesday, August 7, 2018 12:02 AM
> To: Vakul Garg <vakul.garg@nxp.com>
> Cc: David S . Miller <davem@davemloft.net>; Dave Watson
> <davejwatson@fb.com>; Boris Pismenny <borisp@mellanox.com>; Aviad
> Yehezkel <aviadye@mellanox.com>; netdev@vger.kernel.org
> Subject: Re: [PATCH net-next] net/tls: Calculate nsg for zerocopy path
> without skb_cow_data.
> 
> On Fri, Aug 03, 2018 at 11:49:02AM -0700, Doron Roberts-Kedes wrote:
> > On Fri, Aug 03, 2018 at 01:23:33AM +0000, Vakul Garg wrote:
> > >
> > >
> > > > -----Original Message-----
> > > > From: Doron Roberts-Kedes [mailto:doronrk@fb.com]
> > > > Sent: Friday, August 3, 2018 6:00 AM
> > > > To: David S . Miller <davem@davemloft.net>
> > > > Cc: Dave Watson <davejwatson@fb.com>; Vakul Garg
> > > > <vakul.garg@nxp.com>; Boris Pismenny <borisp@mellanox.com>;
> Aviad
> > > > Yehezkel <aviadye@mellanox.com>; netdev@vger.kernel.org; Doron
> > > > Roberts-Kedes <doronrk@fb.com>
> > > > Subject: [PATCH net-next] net/tls: Calculate nsg for zerocopy path
> > > > without skb_cow_data.
> > > >
> > > > decrypt_skb fails if the number of sg elements required to map is
> > > > greater than MAX_SKB_FRAGS. As noted by Vakul Garg, nsg must
> > > > always be calculated, but skb_cow_data adds unnecessary memcpy's
> > > > for the zerocopy case.
> > > >
> > > > The new function skb_nsg calculates the number of scatterlist
> > > > elements required to map the skb without the extra overhead of
> > > > skb_cow_data. This function mimics the structure of skb_to_sgvec.
> > > >
> > > > Fixes: c46234ebb4d1 ("tls: RX path for ktls")
> > > > Signed-off-by: Doron Roberts-Kedes <doronrk@fb.com>
> > > > ---
> > > >  net/tls/tls_sw.c | 89
> > > > ++++++++++++++++++++++++++++++++++++++++++++++--
> > > >  1 file changed, 86 insertions(+), 3 deletions(-)
> > > >
> > > >  	memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
> > > >  	if (!sgout) {
> > > > -		nsg = skb_cow_data(skb, 0, &unused) + 1;
> > > > +		nsg = skb_cow_data(skb, 0, &unused);
> > > > +	} else {
> > > > +		nsg = skb_nsg(skb,
> > > > +			      rxm->offset + tls_ctx->rx.prepend_size,
> > > > +			      rxm->full_len - tls_ctx->rx.prepend_size);
> > > > +		if (nsg <= 0)
> > > > +			return nsg;
> > > Comparison should be (nsg < 1). TLS forbids '0' sized records.
> >
> > Yes true, v2 incoming
> >
> 
> Glancing at this a second time, I actually don't believe this should be
> changed. nsg <= 0 is equivalent to nsg < 1. 

Correct.


> Returning 0 if the record is
> 0 sized is the proper behavior here, since decrypting a zero-length skb is a no-
> op. It is true that zero sized TLS records are forbidden, but it is confusing to
> enforce that in this part of the code. I would be surpised to learn that
> tls_sw_recvmsg could be invoked with a len equal to 0, but if I'm wrong, and
> that case does need to be handled, then it should be in a different patch.

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2018-08-07  6:55 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-08-03  0:30 [PATCH net-next] net/tls: Calculate nsg for zerocopy path without skb_cow_data Doron Roberts-Kedes
2018-08-03  1:23 ` Vakul Garg
2018-08-03 18:49   ` Doron Roberts-Kedes
2018-08-06 18:32     ` Doron Roberts-Kedes
2018-08-07  4:41       ` Vakul Garg

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.