* [PATCH v7 1/2] bus: mhi: Add mhi_queue_is_full function
@ 2020-10-27 8:39 Loic Poulain
2020-10-27 8:39 ` [PATCH v7 2/2] net: Add mhi-net driver Loic Poulain
2020-10-27 21:03 ` [PATCH v7 1/2] bus: mhi: Add mhi_queue_is_full function Willem de Bruijn
0 siblings, 2 replies; 4+ messages in thread
From: Loic Poulain @ 2020-10-27 8:39 UTC (permalink / raw)
To: kuba, davem, manivannan.sadhasivam, hemantk
Cc: netdev, linux-arm-msm, jhugo, bbhatt, Loic Poulain
This function can be used by client driver to determine whether it's
possible to queue new elements in a channel ring.
Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
---
v1->v5: not part of the series
v6: Add this commit, used for stopping TX queue
v7: no change
drivers/bus/mhi/core/main.c | 15 +++++++++++++--
include/linux/mhi.h | 7 +++++++
2 files changed, 20 insertions(+), 2 deletions(-)
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index a588eac..44aa11f 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -943,8 +943,8 @@ void mhi_ctrl_ev_task(unsigned long data)
}
}
-static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
- struct mhi_ring *ring)
+static inline bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
+ struct mhi_ring *ring)
{
void *tmp = ring->wp + ring->el_size;
@@ -1173,6 +1173,17 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
}
EXPORT_SYMBOL_GPL(mhi_queue_buf);
+bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
+ mhi_dev->ul_chan : mhi_dev->dl_chan;
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+
+ return mhi_is_ring_full(mhi_cntrl, tre_ring);
+}
+EXPORT_SYMBOL_GPL(mhi_queue_is_full);
+
int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan,
enum mhi_cmd_type cmd)
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 9d67e75..f72c3a4 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -745,4 +745,11 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct sk_buff *skb, size_t len, enum mhi_flags mflags);
+/**
+ * mhi_queue_is_full - Determine whether queueing new elements is possible
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ */
+bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir);
+
#endif /* _MHI_H_ */
--
2.7.4
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH v7 2/2] net: Add mhi-net driver
2020-10-27 8:39 [PATCH v7 1/2] bus: mhi: Add mhi_queue_is_full function Loic Poulain
@ 2020-10-27 8:39 ` Loic Poulain
2020-10-27 21:21 ` Willem de Bruijn
2020-10-27 21:03 ` [PATCH v7 1/2] bus: mhi: Add mhi_queue_is_full function Willem de Bruijn
1 sibling, 1 reply; 4+ messages in thread
From: Loic Poulain @ 2020-10-27 8:39 UTC (permalink / raw)
To: kuba, davem, manivannan.sadhasivam, hemantk
Cc: netdev, linux-arm-msm, jhugo, bbhatt, Loic Poulain
This patch adds a new network driver implementing MHI transport for
network packets. Packets can be in any format, though QMAP (rmnet)
is the usual protocol (flow control + PDN mux).
It support two MHI devices, IP_HW0 which is, the path to the IPA
(IP accelerator) on qcom modem, And IP_SW0 which is the software
driven IP path (to modem CPU).
Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
---
v2: - rebase on net-next
- remove useless skb_linearize
- check error type on mhi_queue return
- rate limited errors
- Schedule RX refill only on 'low' buf level
- SET_NETDEV_DEV in probe
- reorder device remove sequence
v3: - Stop channels on net_register error
- Remove useles parentheses
- Add driver .owner
v4: - prevent potential cpu hog in rx-refill loop
- Access mtu via READ_ONCE
v5: - Fix access to u64 stats
v6: - Stop TX queue earlier if queue is full
- Preventing 'abnormal' NETDEV_TX_BUSY path
v7: - Stop dl/ul cb operations on channel resetting
drivers/net/Kconfig | 7 ++
drivers/net/Makefile | 1 +
drivers/net/mhi_net.c | 320 ++++++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 328 insertions(+)
create mode 100644 drivers/net/mhi_net.c
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 1368d1d..11a6357 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -426,6 +426,13 @@ config VSOCKMON
mostly intended for developers or support to debug vsock issues. If
unsure, say N.
+config MHI_NET
+ tristate "MHI network driver"
+ depends on MHI_BUS
+ help
+ This is the network driver for MHI. It can be used with
+ QCOM based WWAN modems (like SDX55). Say Y or M.
+
endif # NET_CORE
config SUNGEM_PHY
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 94b6080..8312037 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_GTP) += gtp.o
obj-$(CONFIG_NLMON) += nlmon.o
obj-$(CONFIG_NET_VRF) += vrf.o
obj-$(CONFIG_VSOCKMON) += vsockmon.o
+obj-$(CONFIG_MHI_NET) += mhi_net.o
#
# Networking Drivers
diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
new file mode 100644
index 0000000..3c8e258
--- /dev/null
+++ b/drivers/net/mhi_net.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* MHI Network driver - Network over MHI
+ *
+ * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
+ */
+
+#include <linux/if_arp.h>
+#include <linux/mhi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/u64_stats_sync.h>
+
+#define MIN_MTU ETH_MIN_MTU
+#define MAX_MTU 0xffff
+#define DEFAULT_MTU 8192
+
+struct mhi_net_stats {
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_errors;
+ u64_stats_t rx_dropped;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ u64_stats_t tx_errors;
+ u64_stats_t tx_dropped;
+ atomic_t rx_queued;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync rx_syncp;
+};
+
+struct mhi_net_dev {
+ struct mhi_device *mdev;
+ struct net_device *ndev;
+ struct delayed_work rx_refill;
+ struct mhi_net_stats stats;
+ u32 rx_queue_sz;
+};
+
+static int mhi_ndo_open(struct net_device *ndev)
+{
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+
+ /* Feed the rx buffer pool */
+ schedule_delayed_work(&mhi_netdev->rx_refill, 0);
+
+ /* Carrier is established via out-of-band channel (e.g. qmi) */
+ netif_carrier_on(ndev);
+
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int mhi_ndo_stop(struct net_device *ndev)
+{
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ netif_carrier_off(ndev);
+ cancel_delayed_work_sync(&mhi_netdev->rx_refill);
+
+ return 0;
+}
+
+static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+ struct mhi_device *mdev = mhi_netdev->mdev;
+ int err;
+
+ /* mhi_queue_skb is not thread-safe, but xmit is serialized by the
+ * network core. Once MHI core will be thread save, migrate to
+ * NETIF_F_LLTX support.
+ */
+ err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
+ if (unlikely(err)) {
+ net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
+ ndev->name, err);
+
+ u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
+ u64_stats_inc(&mhi_netdev->stats.tx_dropped);
+ u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
+
+ /* drop the packet */
+ kfree_skb(skb);
+ }
+
+ if (mhi_queue_is_full(mdev, DMA_TO_DEVICE))
+ netif_stop_queue(ndev);
+
+ return NETDEV_TX_OK;
+}
+
+static void mhi_ndo_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.rx_syncp);
+ stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets);
+ stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes);
+ stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors);
+ stats->rx_dropped = u64_stats_read(&mhi_netdev->stats.rx_dropped);
+ } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.tx_syncp);
+ stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets);
+ stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes);
+ stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors);
+ stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped);
+ } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.tx_syncp, start));
+}
+
+static const struct net_device_ops mhi_netdev_ops = {
+ .ndo_open = mhi_ndo_open,
+ .ndo_stop = mhi_ndo_stop,
+ .ndo_start_xmit = mhi_ndo_xmit,
+ .ndo_get_stats64 = mhi_ndo_get_stats64,
+};
+
+static void mhi_net_setup(struct net_device *ndev)
+{
+ ndev->header_ops = NULL; /* No header */
+ ndev->type = ARPHRD_NONE; /* QMAP... */
+ ndev->hard_header_len = 0;
+ ndev->addr_len = 0;
+ ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ ndev->netdev_ops = &mhi_netdev_ops;
+ ndev->mtu = DEFAULT_MTU;
+ ndev->min_mtu = MIN_MTU;
+ ndev->max_mtu = MAX_MTU;
+ ndev->tx_queue_len = 1000;
+}
+
+static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
+ struct sk_buff *skb = mhi_res->buf_addr;
+ int remaining;
+
+ remaining = atomic_dec_return(&mhi_netdev->stats.rx_queued);
+
+ if (unlikely(mhi_res->transaction_status)) {
+ u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_netdev->stats.rx_errors);
+ u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
+
+ kfree_skb(skb);
+
+ /* MHI layer resetting the DL channel */
+ if (mhi_res->transaction_status == -ENOTCONN)
+ return;
+ } else {
+ u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_netdev->stats.rx_packets);
+ u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd);
+ u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
+
+ skb->protocol = htons(ETH_P_MAP);
+ skb_put(skb, mhi_res->bytes_xferd);
+ netif_rx(skb);
+ }
+
+ /* Refill if RX buffers queue becomes low */
+ if (remaining <= mhi_netdev->rx_queue_sz / 2)
+ schedule_delayed_work(&mhi_netdev->rx_refill, 0);
+}
+
+static void mhi_net_ul_callback(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
+ struct net_device *ndev = mhi_netdev->ndev;
+ struct sk_buff *skb = mhi_res->buf_addr;
+
+ /* Hardware has consumed the buffer, so free the skb (which is not
+ * freed by the MHI stack) and perform accounting.
+ */
+ consume_skb(skb);
+
+ u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
+ if (unlikely(mhi_res->transaction_status)) {
+ u64_stats_inc(&mhi_netdev->stats.tx_errors);
+
+ /* MHI layer resetting the UL channel */
+ if (mhi_res->transaction_status == -ENOTCONN)
+ return;
+ } else {
+ u64_stats_inc(&mhi_netdev->stats.tx_packets);
+ u64_stats_add(&mhi_netdev->stats.tx_bytes, mhi_res->bytes_xferd);
+ }
+ u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
+
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+}
+
+static void mhi_net_rx_refill_work(struct work_struct *work)
+{
+ struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev,
+ rx_refill.work);
+ struct net_device *ndev = mhi_netdev->ndev;
+ struct mhi_device *mdev = mhi_netdev->mdev;
+ int size = READ_ONCE(ndev->mtu);
+ struct sk_buff *skb;
+ int err;
+
+ do {
+ skb = netdev_alloc_skb(ndev, size);
+ if (unlikely(!skb))
+ break;
+
+ err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT);
+ if (err) {
+ if (unlikely(err != -ENOMEM))
+ net_err_ratelimited("%s: Failed to queue RX buf (%d)\n",
+ ndev->name, err);
+ kfree_skb(skb);
+ break;
+ }
+
+ atomic_inc(&mhi_netdev->stats.rx_queued);
+
+ /* Do not hog the CPU if rx buffers are completed faster than
+ * queued (unlikely).
+ */
+ cond_resched();
+ } while (1);
+
+ /* If we're still starved of rx buffers, reschedule latter */
+ if (unlikely(!atomic_read(&mhi_netdev->stats.rx_queued)))
+ schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
+}
+
+static int mhi_net_probe(struct mhi_device *mhi_dev,
+ const struct mhi_device_id *id)
+{
+ const char *netname = (char *)id->driver_data;
+ struct mhi_net_dev *mhi_netdev;
+ struct net_device *ndev;
+ struct device *dev = &mhi_dev->dev;
+ int err;
+
+ ndev = alloc_netdev(sizeof(*mhi_netdev), netname, NET_NAME_PREDICTABLE,
+ mhi_net_setup);
+ if (!ndev)
+ return -ENOMEM;
+
+ mhi_netdev = netdev_priv(ndev);
+ dev_set_drvdata(dev, mhi_netdev);
+ mhi_netdev->ndev = ndev;
+ mhi_netdev->mdev = mhi_dev;
+ SET_NETDEV_DEV(ndev, &mhi_dev->dev);
+
+ /* All MHI net channels have 128 ring elements (at least for now) */
+ mhi_netdev->rx_queue_sz = 128;
+
+ INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work);
+ u64_stats_init(&mhi_netdev->stats.rx_syncp);
+ u64_stats_init(&mhi_netdev->stats.tx_syncp);
+
+ /* Start MHI channels */
+ err = mhi_prepare_for_transfer(mhi_dev);
+ if (err)
+ goto out_err;
+
+ err = register_netdev(ndev);
+ if (err) {
+ mhi_unprepare_from_transfer(mhi_dev);
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ free_netdev(ndev);
+ return err;
+}
+
+static void mhi_net_remove(struct mhi_device *mhi_dev)
+{
+ struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
+
+ unregister_netdev(mhi_netdev->ndev);
+
+ mhi_unprepare_from_transfer(mhi_netdev->mdev);
+
+ free_netdev(mhi_netdev->ndev);
+}
+
+static const struct mhi_device_id mhi_net_id_table[] = {
+ { .chan = "IP_HW0", .driver_data = (kernel_ulong_t)"mhi_hwip%d" },
+ { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)"mhi_swip%d" },
+ {}
+};
+MODULE_DEVICE_TABLE(mhi, mhi_net_id_table);
+
+static struct mhi_driver mhi_net_driver = {
+ .probe = mhi_net_probe,
+ .remove = mhi_net_remove,
+ .dl_xfer_cb = mhi_net_dl_callback,
+ .ul_xfer_cb = mhi_net_ul_callback,
+ .id_table = mhi_net_id_table,
+ .driver = {
+ .name = "mhi_net",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_mhi_driver(mhi_net_driver);
+
+MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
+MODULE_DESCRIPTION("Network over MHI");
+MODULE_LICENSE("GPL v2");
--
2.7.4
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH v7 1/2] bus: mhi: Add mhi_queue_is_full function
2020-10-27 8:39 [PATCH v7 1/2] bus: mhi: Add mhi_queue_is_full function Loic Poulain
2020-10-27 8:39 ` [PATCH v7 2/2] net: Add mhi-net driver Loic Poulain
@ 2020-10-27 21:03 ` Willem de Bruijn
1 sibling, 0 replies; 4+ messages in thread
From: Willem de Bruijn @ 2020-10-27 21:03 UTC (permalink / raw)
To: Loic Poulain
Cc: Jakub Kicinski, David Miller, manivannan.sadhasivam, hemantk,
Network Development, linux-arm-msm, jhugo, bbhatt
On Tue, Oct 27, 2020 at 4:33 AM Loic Poulain <loic.poulain@linaro.org> wrote:
>
> This function can be used by client driver to determine whether it's
> possible to queue new elements in a channel ring.
>
> Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
> ---
> v1->v5: not part of the series
> v6: Add this commit, used for stopping TX queue
> v7: no change
>
> drivers/bus/mhi/core/main.c | 15 +++++++++++++--
> include/linux/mhi.h | 7 +++++++
> 2 files changed, 20 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
> index a588eac..44aa11f 100644
> --- a/drivers/bus/mhi/core/main.c
> +++ b/drivers/bus/mhi/core/main.c
> @@ -943,8 +943,8 @@ void mhi_ctrl_ev_task(unsigned long data)
> }
> }
>
> -static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
> - struct mhi_ring *ring)
> +static inline bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
> + struct mhi_ring *ring)
The compiler can decide whether to inline or not.
> {
> void *tmp = ring->wp + ring->el_size;
>
> @@ -1173,6 +1173,17 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
> }
> EXPORT_SYM
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH v7 2/2] net: Add mhi-net driver
2020-10-27 8:39 ` [PATCH v7 2/2] net: Add mhi-net driver Loic Poulain
@ 2020-10-27 21:21 ` Willem de Bruijn
0 siblings, 0 replies; 4+ messages in thread
From: Willem de Bruijn @ 2020-10-27 21:21 UTC (permalink / raw)
To: Loic Poulain
Cc: Jakub Kicinski, David Miller, manivannan.sadhasivam, hemantk,
Network Development, linux-arm-msm, jhugo, bbhatt
On Tue, Oct 27, 2020 at 4:33 AM Loic Poulain <loic.poulain@linaro.org> wrote:
>
> This patch adds a new network driver implementing MHI transport for
> network packets. Packets can be in any format, though QMAP (rmnet)
> is the usual protocol (flow control + PDN mux).
>
> It support two MHI devices, IP_HW0 which is, the path to the IPA
> (IP accelerator) on qcom modem, And IP_SW0 which is the software
> driven IP path (to modem CPU).
>
> Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
> +static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
> +{
> + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
> + struct mhi_device *mdev = mhi_netdev->mdev;
> + int err;
> +
> + /* mhi_queue_skb is not thread-safe, but xmit is serialized by the
> + * network core. Once MHI core will be thread save, migrate to
nit: thread-safe.
I also wonder whether you'd gain much from converting to
driver-internal locking, perhaps this comment is premature?
> +static void mhi_net_rx_refill_work(struct work_struct *work)
> +{
> + struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev,
> + rx_refill.work);
> + struct net_device *ndev = mhi_netdev->ndev;
> + struct mhi_device *mdev = mhi_netdev->mdev;
> + int size = READ_ONCE(ndev->mtu);
> + struct sk_buff *skb;
> + int err;
> +
> + do {
> + skb = netdev_alloc_skb(ndev, size);
> + if (unlikely(!skb))
> + break;
> +
> + err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT);
> + if (err) {
> + if (unlikely(err != -ENOMEM))
> + net_err_ratelimited("%s: Failed to queue RX buf (%d)\n",
> + ndev->name, err);
> + kfree_skb(skb);
> + break;
> + }
> +
> + atomic_inc(&mhi_netdev->stats.rx_queued);
> +
> + /* Do not hog the CPU if rx buffers are completed faster than
> + * queued (unlikely).
> + */
> + cond_resched();
> + } while (1);
This function has to allocate + kfree_skb one too many skbs to break
out of the loop? Can it not observe the queue full based on rx_queued?
> +
> + /* If we're still starved of rx buffers, reschedule latter */
nit: later
> + if (unlikely(!atomic_read(&mhi_netdev->stats.rx_queued)))
> + schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
what is the rationale for HZ / 2?
> +}
> +
> +static int mhi_net_probe(struct mhi_device *mhi_dev,
> + const struct mhi_device_id *id)
> +{
> + const char *netname = (char *)id->driver_data;
> + struct mhi_net_dev *mhi_netdev;
> + struct net_device *ndev;
> + struct device *dev = &mhi_dev->dev;
> + int err;
> +
> + ndev = alloc_netdev(sizeof(*mhi_netdev), netname, NET_NAME_PREDICTABLE,
> + mhi_net_setup);
> + if (!ndev)
> + return -ENOMEM;
> +
> + mhi_netdev = netdev_priv(ndev);
> + dev_set_drvdata(dev, mhi_netdev);
> + mhi_netdev->ndev = ndev;
> + mhi_netdev->mdev = mhi_dev;
> + SET_NETDEV_DEV(ndev, &mhi_dev->dev);
> +
> + /* All MHI net channels have 128 ring elements (at least for now) */
> + mhi_netdev->rx_queue_sz = 128;
> +
> + INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work);
> + u64_stats_init(&mhi_netdev->stats.rx_syncp);
> + u64_stats_init(&mhi_netdev->stats.tx_syncp);
> +
> + /* Start MHI channels */
> + err = mhi_prepare_for_transfer(mhi_dev);
> + if (err)
> + goto out_err;
> +
> + err = register_netdev(ndev);
> + if (err) {
> + mhi_unprepare_from_transfer(mhi_dev);
> + goto out_err;
It's a bit odd to combine error branches with jump labels. I'd add
out_register_err: below
> + }
> +
> + return 0;
> +
> +out_err:
> + free_netdev(ndev);
> + return err;
> +}
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2020-10-27 21:22 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-27 8:39 [PATCH v7 1/2] bus: mhi: Add mhi_queue_is_full function Loic Poulain
2020-10-27 8:39 ` [PATCH v7 2/2] net: Add mhi-net driver Loic Poulain
2020-10-27 21:21 ` Willem de Bruijn
2020-10-27 21:03 ` [PATCH v7 1/2] bus: mhi: Add mhi_queue_is_full function Willem de Bruijn
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).