All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next] net: mhi-net: Add de-aggeration support
@ 2021-01-25 15:45 Loic Poulain
  2021-01-30  1:01 ` Jakub Kicinski
  0 siblings, 1 reply; 9+ messages in thread
From: Loic Poulain @ 2021-01-25 15:45 UTC (permalink / raw)
  To: kuba, davem; +Cc: netdev, Loic Poulain

When device side MTU is larger than host side MRU, the packets
(typically rmnet packets) are split over multiple MHI transfers.
In that case, fragments must be re-aggregated to recover the packet
before forwarding to upper layer.

A fragmented packet result in -EOVERFLOW MHI transaction status for
each of its fragments, except the final one. Such transfer was
previoulsy considered as error and fragments were simply dropped.

This patch implements the aggregation mechanism allowing to recover
the initial packet. It also prints a warning (once) since this behavior
usually comes from a misconfiguration of the device (modem).

Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
---
 drivers/net/mhi_net.c | 74 ++++++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 64 insertions(+), 10 deletions(-)

diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
index a5a214d..780086f 100644
--- a/drivers/net/mhi_net.c
+++ b/drivers/net/mhi_net.c
@@ -34,6 +34,7 @@ struct mhi_net_dev {
 	struct mhi_device *mdev;
 	struct net_device *ndev;
 	struct delayed_work rx_refill;
+	struct sk_buff *skbagg;
 	struct mhi_net_stats stats;
 	u32 rx_queue_sz;
 };
@@ -133,6 +134,31 @@ static void mhi_net_setup(struct net_device *ndev)
 	ndev->tx_queue_len = 1000;
 }
 
+static struct sk_buff *mhi_net_skb_append(struct mhi_device *mhi_dev,
+					  struct sk_buff *skb1,
+					  struct sk_buff *skb2)
+{
+	struct sk_buff *new_skb;
+
+	/* This is the first fragment */
+	if (!skb1)
+		return skb2;
+
+	/* Expand packet */
+	new_skb = skb_copy_expand(skb1, 0, skb2->len, GFP_ATOMIC);
+	dev_kfree_skb_any(skb1);
+	if (!new_skb)
+		return skb2;
+
+	/* Append to expanded packet */
+	memcpy(skb_put(new_skb, skb2->len), skb2->data, skb2->len);
+
+	/* free appended skb */
+	dev_kfree_skb_any(skb2);
+
+	return new_skb;
+}
+
 static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
 				struct mhi_result *mhi_res)
 {
@@ -143,19 +169,44 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
 	remaining = atomic_dec_return(&mhi_netdev->stats.rx_queued);
 
 	if (unlikely(mhi_res->transaction_status)) {
-		dev_kfree_skb_any(skb);
-
-		/* MHI layer stopping/resetting the DL channel */
-		if (mhi_res->transaction_status == -ENOTCONN)
+		switch (mhi_res->transaction_status) {
+		case -EOVERFLOW:
+			/* Packet can not fit in one MHI buffer and has been
+			 * split over multiple MHI transfers, do re-aggregation.
+			 * That usually means the device side MTU is larger than
+			 * the host side MTU/MRU. Since this is not optimal,
+			 * print a warning (once).
+			 */
+			netdev_warn_once(mhi_netdev->ndev,
+					 "Fragmented packets received, fix MTU?\n");
+			skb_put(skb, mhi_res->bytes_xferd);
+			mhi_netdev->skbagg = mhi_net_skb_append(mhi_dev,
+								mhi_netdev->skbagg,
+								skb);
+			break;
+		case -ENOTCONN:
+			/* MHI layer stopping/resetting the DL channel */
+			dev_kfree_skb_any(skb);
 			return;
-
-		u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
-		u64_stats_inc(&mhi_netdev->stats.rx_errors);
-		u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
+		default:
+			/* Unknown error, simply drop */
+			dev_kfree_skb_any(skb);
+			u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
+			u64_stats_inc(&mhi_netdev->stats.rx_errors);
+			u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
+		}
 	} else {
+		skb_put(skb, mhi_res->bytes_xferd);
+
+		if (mhi_netdev->skbagg) {
+			/* Aggregate the final fragment */
+			skb = mhi_net_skb_append(mhi_dev, mhi_netdev->skbagg, skb);
+			mhi_netdev->skbagg = NULL;
+		}
+
 		u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
 		u64_stats_inc(&mhi_netdev->stats.rx_packets);
-		u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd);
+		u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
 		u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
 
 		switch (skb->data[0] & 0xf0) {
@@ -170,7 +221,6 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
 			break;
 		}
 
-		skb_put(skb, mhi_res->bytes_xferd);
 		netif_rx(skb);
 	}
 
@@ -270,6 +320,7 @@ static int mhi_net_probe(struct mhi_device *mhi_dev,
 	dev_set_drvdata(dev, mhi_netdev);
 	mhi_netdev->ndev = ndev;
 	mhi_netdev->mdev = mhi_dev;
+	mhi_netdev->skbagg = NULL;
 	SET_NETDEV_DEV(ndev, &mhi_dev->dev);
 	SET_NETDEV_DEVTYPE(ndev, &wwan_type);
 
@@ -304,6 +355,9 @@ static void mhi_net_remove(struct mhi_device *mhi_dev)
 
 	mhi_unprepare_from_transfer(mhi_netdev->mdev);
 
+	if (mhi_netdev->skbagg)
+		kfree_skb(mhi_netdev->skbagg);
+
 	free_netdev(mhi_netdev->ndev);
 }
 
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2021-02-01 21:21 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-01-25 15:45 [PATCH net-next] net: mhi-net: Add de-aggeration support Loic Poulain
2021-01-30  1:01 ` Jakub Kicinski
2021-01-30  2:12   ` Willem de Bruijn
2021-02-01  8:15   ` Loic Poulain
2021-02-01 14:23     ` Willem de Bruijn
2021-02-01 16:48       ` Loic Poulain
2021-02-01 18:33         ` Willem de Bruijn
2021-02-01 20:55           ` Jakub Kicinski
2021-02-01 21:20             ` Willem de Bruijn

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.