netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net-next v2 1/2] net: mhi-net: Add de-aggeration support
@ 2021-02-02 16:16 Loic Poulain
  2021-02-02 16:16 ` [PATCH net-next v2 2/2] net: qualcomm: rmnet: Fix rx_handler for non-linear skbs Loic Poulain
  2021-02-02 22:45 ` [PATCH net-next v2 1/2] net: mhi-net: Add de-aggeration support Willem de Bruijn
  0 siblings, 2 replies; 7+ messages in thread
From: Loic Poulain @ 2021-02-02 16:16 UTC (permalink / raw)
  To: kuba, davem
  Cc: willemdebruijn.kernel, netdev, stranche, subashab, Loic Poulain

When device side MTU is larger than host side MTU, the packets
(typically rmnet packets) are split over multiple MHI transfers.
In that case, fragments must be re-aggregated to recover the packet
before forwarding to upper layer.

A fragmented packet result in -EOVERFLOW MHI transaction status for
each of its fragments, except the final one. Such transfer was
previoulsy considered as error and fragments were simply dropped.

This change adds re-aggregation mechanism using skb chaining, via
skb frag_list.

A warning (once) is printed since this behavior usually comes from
a misconfiguration of the device (e.g. modem MTU).

Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
---
 v2: use zero-copy skb chaining instead of skb_copy_expand.

 drivers/net/mhi_net.c | 79 ++++++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 69 insertions(+), 10 deletions(-)

diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
index 4f512531..be39779 100644
--- a/drivers/net/mhi_net.c
+++ b/drivers/net/mhi_net.c
@@ -32,6 +32,8 @@ struct mhi_net_stats {
 struct mhi_net_dev {
 	struct mhi_device *mdev;
 	struct net_device *ndev;
+	struct sk_buff *skbagg_head;
+	struct sk_buff *skbagg_tail;
 	struct delayed_work rx_refill;
 	struct mhi_net_stats stats;
 	u32 rx_queue_sz;
@@ -132,6 +134,37 @@ static void mhi_net_setup(struct net_device *ndev)
 	ndev->tx_queue_len = 1000;
 }
 
+static struct sk_buff *mhi_net_skb_agg(struct mhi_net_dev *mhi_netdev,
+				       struct sk_buff *skb)
+{
+	struct sk_buff *head = mhi_netdev->skbagg_head;
+	struct sk_buff *tail = mhi_netdev->skbagg_tail;
+
+	/* This is non-paged skb chaining using frag_list */
+
+	if (!head) {
+		mhi_netdev->skbagg_head = skb;
+		return skb;
+	}
+
+	if (!skb_shinfo(head)->frag_list)
+		skb_shinfo(head)->frag_list = skb;
+	else
+		tail->next = skb;
+
+	/* data_len is normally the size of paged data, in our case there is no
+	 * paged data (nr_frags = 0), so it represents the size of chained skbs,
+	 * This way, net core will consider skb->frag_list.
+	 */
+	head->len += skb->len;
+	head->data_len += skb->len;
+	head->truesize += skb->truesize;
+
+	mhi_netdev->skbagg_tail = skb;
+
+	return mhi_netdev->skbagg_head;
+}
+
 static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
 				struct mhi_result *mhi_res)
 {
@@ -142,19 +175,42 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
 	free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
 
 	if (unlikely(mhi_res->transaction_status)) {
-		dev_kfree_skb_any(skb);
-
-		/* MHI layer stopping/resetting the DL channel */
-		if (mhi_res->transaction_status == -ENOTCONN)
+		switch (mhi_res->transaction_status) {
+		case -EOVERFLOW:
+			/* Packet can not fit in one MHI buffer and has been
+			 * split over multiple MHI transfers, do re-aggregation.
+			 * That usually means the device side MTU is larger than
+			 * the host side MTU/MRU. Since this is not optimal,
+			 * print a warning (once).
+			 */
+			netdev_warn_once(mhi_netdev->ndev,
+					 "Fragmented packets received, fix MTU?\n");
+			skb_put(skb, mhi_res->bytes_xferd);
+			mhi_net_skb_agg(mhi_netdev, skb);
+			break;
+		case -ENOTCONN:
+			/* MHI layer stopping/resetting the DL channel */
+			dev_kfree_skb_any(skb);
 			return;
-
-		u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
-		u64_stats_inc(&mhi_netdev->stats.rx_errors);
-		u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
+		default:
+			/* Unknown error, simply drop */
+			dev_kfree_skb_any(skb);
+			u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
+			u64_stats_inc(&mhi_netdev->stats.rx_errors);
+			u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
+		}
 	} else {
+		skb_put(skb, mhi_res->bytes_xferd);
+
+		if (mhi_netdev->skbagg_head) {
+			/* Aggregate the final fragment */
+			skb = mhi_net_skb_agg(mhi_netdev, skb);
+			mhi_netdev->skbagg_head = NULL;
+		}
+
 		u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
 		u64_stats_inc(&mhi_netdev->stats.rx_packets);
-		u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd);
+		u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
 		u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
 
 		switch (skb->data[0] & 0xf0) {
@@ -169,7 +225,6 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
 			break;
 		}
 
-		skb_put(skb, mhi_res->bytes_xferd);
 		netif_rx(skb);
 	}
 
@@ -267,6 +322,7 @@ static int mhi_net_probe(struct mhi_device *mhi_dev,
 	dev_set_drvdata(dev, mhi_netdev);
 	mhi_netdev->ndev = ndev;
 	mhi_netdev->mdev = mhi_dev;
+	mhi_netdev->skbagg_head = NULL;
 	SET_NETDEV_DEV(ndev, &mhi_dev->dev);
 	SET_NETDEV_DEVTYPE(ndev, &wwan_type);
 
@@ -301,6 +357,9 @@ static void mhi_net_remove(struct mhi_device *mhi_dev)
 
 	mhi_unprepare_from_transfer(mhi_netdev->mdev);
 
+	if (mhi_netdev->skbagg_head)
+		kfree_skb(mhi_netdev->skbagg_head);
+
 	free_netdev(mhi_netdev->ndev);
 }
 
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH net-next v2 2/2] net: qualcomm: rmnet: Fix rx_handler for non-linear skbs
  2021-02-02 16:16 [PATCH net-next v2 1/2] net: mhi-net: Add de-aggeration support Loic Poulain
@ 2021-02-02 16:16 ` Loic Poulain
  2021-02-02 22:46   ` Willem de Bruijn
  2021-02-03  0:57   ` subashab
  2021-02-02 22:45 ` [PATCH net-next v2 1/2] net: mhi-net: Add de-aggeration support Willem de Bruijn
  1 sibling, 2 replies; 7+ messages in thread
From: Loic Poulain @ 2021-02-02 16:16 UTC (permalink / raw)
  To: kuba, davem
  Cc: willemdebruijn.kernel, netdev, stranche, subashab, Loic Poulain

There is no guarantee that rmnet rx_handler is only fed with linear
skbs, but current rmnet implementation does not check that, leading
to crash in case of non linear skbs processed as linear ones.

Fix that by ensuring skb linearization before processing.

Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
---
 v2: Add this patch to the series to prevent crash

 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 3d7d3ab..2776c32 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -180,7 +180,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
 	struct rmnet_port *port;
 	struct net_device *dev;
 
-	if (!skb)
+	if (!skb || skb_linearize(skb))
 		goto done;
 
 	if (skb->pkt_type == PACKET_LOOPBACK)
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH net-next v2 1/2] net: mhi-net: Add de-aggeration support
  2021-02-02 16:16 [PATCH net-next v2 1/2] net: mhi-net: Add de-aggeration support Loic Poulain
  2021-02-02 16:16 ` [PATCH net-next v2 2/2] net: qualcomm: rmnet: Fix rx_handler for non-linear skbs Loic Poulain
@ 2021-02-02 22:45 ` Willem de Bruijn
  2021-02-03  7:27   ` Loic Poulain
  1 sibling, 1 reply; 7+ messages in thread
From: Willem de Bruijn @ 2021-02-02 22:45 UTC (permalink / raw)
  To: Loic Poulain
  Cc: Jakub Kicinski, David Miller, Network Development,
	Sean Tranchetti, Subash Abhinov Kasiviswanathan

On Tue, Feb 2, 2021 at 11:08 AM Loic Poulain <loic.poulain@linaro.org> wrote:
>
> When device side MTU is larger than host side MTU, the packets
> (typically rmnet packets) are split over multiple MHI transfers.
> In that case, fragments must be re-aggregated to recover the packet
> before forwarding to upper layer.
>
> A fragmented packet result in -EOVERFLOW MHI transaction status for
> each of its fragments, except the final one. Such transfer was
> previoulsy considered as error and fragments were simply dropped.

nit: previously

>
> This change adds re-aggregation mechanism using skb chaining, via
> skb frag_list.
>
> A warning (once) is printed since this behavior usually comes from
> a misconfiguration of the device (e.g. modem MTU).
>
> Signed-off-by: Loic Poulain <loic.poulain@linaro.org>

Only one real question wrt stats. Otherwise looks good to me, thanks.

> ---
>  v2: use zero-copy skb chaining instead of skb_copy_expand.
>
>  drivers/net/mhi_net.c | 79 ++++++++++++++++++++++++++++++++++++++++++++-------
>  1 file changed, 69 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
> index 4f512531..be39779 100644
> --- a/drivers/net/mhi_net.c
> +++ b/drivers/net/mhi_net.c
> @@ -32,6 +32,8 @@ struct mhi_net_stats {
>  struct mhi_net_dev {
>         struct mhi_device *mdev;
>         struct net_device *ndev;
> +       struct sk_buff *skbagg_head;
> +       struct sk_buff *skbagg_tail;
>         struct delayed_work rx_refill;
>         struct mhi_net_stats stats;
>         u32 rx_queue_sz;
> @@ -132,6 +134,37 @@ static void mhi_net_setup(struct net_device *ndev)
>         ndev->tx_queue_len = 1000;
>  }
>
> +static struct sk_buff *mhi_net_skb_agg(struct mhi_net_dev *mhi_netdev,
> +                                      struct sk_buff *skb)
> +{
> +       struct sk_buff *head = mhi_netdev->skbagg_head;
> +       struct sk_buff *tail = mhi_netdev->skbagg_tail;
> +
> +       /* This is non-paged skb chaining using frag_list */
> +

no need for empty line?

> +       if (!head) {
> +               mhi_netdev->skbagg_head = skb;
> +               return skb;
> +       }
> +
> +       if (!skb_shinfo(head)->frag_list)
> +               skb_shinfo(head)->frag_list = skb;
> +       else
> +               tail->next = skb;
> +
> +       /* data_len is normally the size of paged data, in our case there is no

data_len is defined as the data excluding the linear len (ref:
skb_headlen). That is not just paged data, but includes frag_list.

> +        * paged data (nr_frags = 0), so it represents the size of chained skbs,
> +        * This way, net core will consider skb->frag_list.
> +        */
> +       head->len += skb->len;
> +       head->data_len += skb->len;
> +       head->truesize += skb->truesize;
> +
> +       mhi_netdev->skbagg_tail = skb;
> +
> +       return mhi_netdev->skbagg_head;
> +}
> +
>  static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
>                                 struct mhi_result *mhi_res)
>  {
> @@ -142,19 +175,42 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
>         free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
>
>         if (unlikely(mhi_res->transaction_status)) {
> -               dev_kfree_skb_any(skb);
> -
> -               /* MHI layer stopping/resetting the DL channel */
> -               if (mhi_res->transaction_status == -ENOTCONN)
> +               switch (mhi_res->transaction_status) {
> +               case -EOVERFLOW:
> +                       /* Packet can not fit in one MHI buffer and has been
> +                        * split over multiple MHI transfers, do re-aggregation.
> +                        * That usually means the device side MTU is larger than
> +                        * the host side MTU/MRU. Since this is not optimal,
> +                        * print a warning (once).
> +                        */
> +                       netdev_warn_once(mhi_netdev->ndev,
> +                                        "Fragmented packets received, fix MTU?\n");
> +                       skb_put(skb, mhi_res->bytes_xferd);
> +                       mhi_net_skb_agg(mhi_netdev, skb);
> +                       break;
> +               case -ENOTCONN:
> +                       /* MHI layer stopping/resetting the DL channel */
> +                       dev_kfree_skb_any(skb);
>                         return;
> -
> -               u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
> -               u64_stats_inc(&mhi_netdev->stats.rx_errors);
> -               u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
> +               default:
> +                       /* Unknown error, simply drop */
> +                       dev_kfree_skb_any(skb);
> +                       u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
> +                       u64_stats_inc(&mhi_netdev->stats.rx_errors);
> +                       u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
> +               }
>         } else {
> +               skb_put(skb, mhi_res->bytes_xferd);
> +
> +               if (mhi_netdev->skbagg_head) {
> +                       /* Aggregate the final fragment */
> +                       skb = mhi_net_skb_agg(mhi_netdev, skb);
> +                       mhi_netdev->skbagg_head = NULL;
> +               }
> +
>                 u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
>                 u64_stats_inc(&mhi_netdev->stats.rx_packets);
> -               u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd);
> +               u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);

might this change stats? it will if skb->len != 0 before skb_put. Even
if so, perhaps it doesn't matter.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH net-next v2 2/2] net: qualcomm: rmnet: Fix rx_handler for non-linear skbs
  2021-02-02 16:16 ` [PATCH net-next v2 2/2] net: qualcomm: rmnet: Fix rx_handler for non-linear skbs Loic Poulain
@ 2021-02-02 22:46   ` Willem de Bruijn
  2021-02-03  0:57   ` subashab
  1 sibling, 0 replies; 7+ messages in thread
From: Willem de Bruijn @ 2021-02-02 22:46 UTC (permalink / raw)
  To: Loic Poulain
  Cc: Jakub Kicinski, David Miller, Network Development,
	Sean Tranchetti, Subash Abhinov Kasiviswanathan

On Tue, Feb 2, 2021 at 11:08 AM Loic Poulain <loic.poulain@linaro.org> wrote:
>
> There is no guarantee that rmnet rx_handler is only fed with linear
> skbs, but current rmnet implementation does not check that, leading
> to crash in case of non linear skbs processed as linear ones.
>
> Fix that by ensuring skb linearization before processing.
>
> Signed-off-by: Loic Poulain <loic.poulain@linaro.org>

Acked-by: Willem de Bruijn <willemb@google.com>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH net-next v2 2/2] net: qualcomm: rmnet: Fix rx_handler for non-linear skbs
  2021-02-02 16:16 ` [PATCH net-next v2 2/2] net: qualcomm: rmnet: Fix rx_handler for non-linear skbs Loic Poulain
  2021-02-02 22:46   ` Willem de Bruijn
@ 2021-02-03  0:57   ` subashab
  1 sibling, 0 replies; 7+ messages in thread
From: subashab @ 2021-02-03  0:57 UTC (permalink / raw)
  To: Loic Poulain; +Cc: kuba, davem, willemdebruijn.kernel, netdev, stranche

On 2021-02-02 09:16, Loic Poulain wrote:
> There is no guarantee that rmnet rx_handler is only fed with linear
> skbs, but current rmnet implementation does not check that, leading
> to crash in case of non linear skbs processed as linear ones.
> 
> Fix that by ensuring skb linearization before processing.
> 
> Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
> ---
>  v2: Add this patch to the series to prevent crash
> 
>  drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
> b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
> index 3d7d3ab..2776c32 100644
> --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
> +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
> @@ -180,7 +180,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff
> **pskb)
>  	struct rmnet_port *port;
>  	struct net_device *dev;
> 
> -	if (!skb)
> +	if (!skb || skb_linearize(skb))
>  		goto done;
> 
>  	if (skb->pkt_type == PACKET_LOOPBACK)

Reviewed-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH net-next v2 1/2] net: mhi-net: Add de-aggeration support
  2021-02-02 22:45 ` [PATCH net-next v2 1/2] net: mhi-net: Add de-aggeration support Willem de Bruijn
@ 2021-02-03  7:27   ` Loic Poulain
  2021-02-03 14:05     ` Willem de Bruijn
  0 siblings, 1 reply; 7+ messages in thread
From: Loic Poulain @ 2021-02-03  7:27 UTC (permalink / raw)
  To: Willem de Bruijn
  Cc: Jakub Kicinski, David Miller, Network Development,
	Sean Tranchetti, Subash Abhinov Kasiviswanathan

Hi Willem,

On Tue, 2 Feb 2021 at 23:45, Willem de Bruijn
<willemdebruijn.kernel@gmail.com> wrote:
>
> On Tue, Feb 2, 2021 at 11:08 AM Loic Poulain <loic.poulain@linaro.org> wrote:
> >
> > When device side MTU is larger than host side MTU, the packets
> > (typically rmnet packets) are split over multiple MHI transfers.
> > In that case, fragments must be re-aggregated to recover the packet
> > before forwarding to upper layer.
> >
> > A fragmented packet result in -EOVERFLOW MHI transaction status for
> > each of its fragments, except the final one. Such transfer was
> > previoulsy considered as error and fragments were simply dropped.
[...]
> > +static struct sk_buff *mhi_net_skb_agg(struct mhi_net_dev *mhi_netdev,
> > +                                      struct sk_buff *skb)
> > +{
> > +       struct sk_buff *head = mhi_netdev->skbagg_head;
> > +       struct sk_buff *tail = mhi_netdev->skbagg_tail;
> > +
> > +       /* This is non-paged skb chaining using frag_list */
> > +
>
> no need for empty line?
>
> > +       if (!head) {
> > +               mhi_netdev->skbagg_head = skb;
> > +               return skb;
> > +       }
> > +
> > +       if (!skb_shinfo(head)->frag_list)
> > +               skb_shinfo(head)->frag_list = skb;
> > +       else
> > +               tail->next = skb;
> > +
> > +       /* data_len is normally the size of paged data, in our case there is no
>
> data_len is defined as the data excluding the linear len (ref:
> skb_headlen). That is not just paged data, but includes frag_list.

Ok, thanks for clarifying this, I'll remove the comment since it's
then a valid usage.

[...]
> >  static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
> >                                 struct mhi_result *mhi_res)
> >  {
> > @@ -142,19 +175,42 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
> >         free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
> >
> >         if (unlikely(mhi_res->transaction_status)) {
> > -               dev_kfree_skb_any(skb);
> > -
> > -               /* MHI layer stopping/resetting the DL channel */
> > -               if (mhi_res->transaction_status == -ENOTCONN)
> > +               switch (mhi_res->transaction_status) {
> > +               case -EOVERFLOW:
> > +                       /* Packet can not fit in one MHI buffer and has been
> > +                        * split over multiple MHI transfers, do re-aggregation.
> > +                        * That usually means the device side MTU is larger than
> > +                        * the host side MTU/MRU. Since this is not optimal,
> > +                        * print a warning (once).
> > +                        */
> > +                       netdev_warn_once(mhi_netdev->ndev,
> > +                                        "Fragmented packets received, fix MTU?\n");
> > +                       skb_put(skb, mhi_res->bytes_xferd);
> > +                       mhi_net_skb_agg(mhi_netdev, skb);
> > +                       break;
> > +               case -ENOTCONN:
> > +                       /* MHI layer stopping/resetting the DL channel */
> > +                       dev_kfree_skb_any(skb);
> >                         return;
> > -
> > -               u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
> > -               u64_stats_inc(&mhi_netdev->stats.rx_errors);
> > -               u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
> > +               default:
> > +                       /* Unknown error, simply drop */
> > +                       dev_kfree_skb_any(skb);
> > +                       u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
> > +                       u64_stats_inc(&mhi_netdev->stats.rx_errors);
> > +                       u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
> > +               }
> >         } else {
> > +               skb_put(skb, mhi_res->bytes_xferd);
> > +
> > +               if (mhi_netdev->skbagg_head) {
> > +                       /* Aggregate the final fragment */
> > +                       skb = mhi_net_skb_agg(mhi_netdev, skb);
> > +                       mhi_netdev->skbagg_head = NULL;
> > +               }
> > +
> >                 u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
> >                 u64_stats_inc(&mhi_netdev->stats.rx_packets);
> > -               u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd);
> > +               u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
>
> might this change stats? it will if skb->len != 0 before skb_put. Even
> if so, perhaps it doesn't matter.

Don't get that point, skb is the received MHI buffer, we simply set
its size because MHI core don't (skb->len is always 0 before put).
Then if it is part of a fragmented transfer we just do the extra
'skb = skb_agg+ skb', so skb->len should always be right here,
whether it's a standalone/linear packet or a multi-frag packet.

Regards,
Loic

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH net-next v2 1/2] net: mhi-net: Add de-aggeration support
  2021-02-03  7:27   ` Loic Poulain
@ 2021-02-03 14:05     ` Willem de Bruijn
  0 siblings, 0 replies; 7+ messages in thread
From: Willem de Bruijn @ 2021-02-03 14:05 UTC (permalink / raw)
  To: Loic Poulain
  Cc: Willem de Bruijn, Jakub Kicinski, David Miller,
	Network Development, Sean Tranchetti,
	Subash Abhinov Kasiviswanathan

>
> [...]
> > >  static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
> > >                                 struct mhi_result *mhi_res)
> > >  {
> > > @@ -142,19 +175,42 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
> > >         free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
> > >
> > >         if (unlikely(mhi_res->transaction_status)) {
> > > -               dev_kfree_skb_any(skb);
> > > -
> > > -               /* MHI layer stopping/resetting the DL channel */
> > > -               if (mhi_res->transaction_status == -ENOTCONN)
> > > +               switch (mhi_res->transaction_status) {
> > > +               case -EOVERFLOW:
> > > +                       /* Packet can not fit in one MHI buffer and has been
> > > +                        * split over multiple MHI transfers, do re-aggregation.
> > > +                        * That usually means the device side MTU is larger than
> > > +                        * the host side MTU/MRU. Since this is not optimal,
> > > +                        * print a warning (once).
> > > +                        */
> > > +                       netdev_warn_once(mhi_netdev->ndev,
> > > +                                        "Fragmented packets received, fix MTU?\n");
> > > +                       skb_put(skb, mhi_res->bytes_xferd);
> > > +                       mhi_net_skb_agg(mhi_netdev, skb);
> > > +                       break;
> > > +               case -ENOTCONN:
> > > +                       /* MHI layer stopping/resetting the DL channel */
> > > +                       dev_kfree_skb_any(skb);
> > >                         return;
> > > -
> > > -               u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
> > > -               u64_stats_inc(&mhi_netdev->stats.rx_errors);
> > > -               u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
> > > +               default:
> > > +                       /* Unknown error, simply drop */
> > > +                       dev_kfree_skb_any(skb);
> > > +                       u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
> > > +                       u64_stats_inc(&mhi_netdev->stats.rx_errors);
> > > +                       u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
> > > +               }
> > >         } else {
> > > +               skb_put(skb, mhi_res->bytes_xferd);
> > > +
> > > +               if (mhi_netdev->skbagg_head) {
> > > +                       /* Aggregate the final fragment */
> > > +                       skb = mhi_net_skb_agg(mhi_netdev, skb);
> > > +                       mhi_netdev->skbagg_head = NULL;
> > > +               }
> > > +
> > >                 u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
> > >                 u64_stats_inc(&mhi_netdev->stats.rx_packets);
> > > -               u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd);
> > > +               u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
> >
> > might this change stats? it will if skb->len != 0 before skb_put. Even
> > if so, perhaps it doesn't matter.
>
> Don't get that point, skb is the received MHI buffer, we simply set
> its size because MHI core don't (skb->len is always 0 before put).
> Then if it is part of a fragmented transfer we just do the extra
> 'skb = skb_agg+ skb', so skb->len should always be right here,
> whether it's a standalone/linear packet or a multi-frag packet.

Great. I did not know that skb->len is 0 before put for this codepath.
It isn't for other protocols, and then any protocol headers would have
been counted.

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2021-02-03 14:07 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-02-02 16:16 [PATCH net-next v2 1/2] net: mhi-net: Add de-aggeration support Loic Poulain
2021-02-02 16:16 ` [PATCH net-next v2 2/2] net: qualcomm: rmnet: Fix rx_handler for non-linear skbs Loic Poulain
2021-02-02 22:46   ` Willem de Bruijn
2021-02-03  0:57   ` subashab
2021-02-02 22:45 ` [PATCH net-next v2 1/2] net: mhi-net: Add de-aggeration support Willem de Bruijn
2021-02-03  7:27   ` Loic Poulain
2021-02-03 14:05     ` Willem de Bruijn

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).