All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC] net: add support for threaded NAPI polling
@ 2020-07-26 16:31 Felix Fietkau
  2020-07-26 16:49 ` Eric Dumazet
  0 siblings, 1 reply; 5+ messages in thread
From: Felix Fietkau @ 2020-07-26 16:31 UTC (permalink / raw)
  To: netdev; +Cc: Hillf Danton

For some drivers (especially 802.11 drivers), doing a lot of work in the NAPI
poll function does not perform well. Since NAPI poll is bound to the CPU it
was scheduled from, we can easily end up with a few very busy CPUs spending
most of their time in softirq/ksoftirqd and some idle ones.

Introduce threaded NAPI for such drivers based on a workqueue. The API is the
same except for using netif_threaded_napi_add instead of netif_napi_add.

In my tests with mt76 on MT7621 using threaded NAPI + a thread for tx scheduling
improves LAN->WLAN bridging throughput by 10-50%. Throughput without threaded
NAPI is wildly inconsistent, depending on the CPU that runs the tx scheduling
thread.

With threaded NAPI, throughput seems stable and consistent (and higher than
the best results I got without it).

Based on a patch by Hillf Danton

Cc: Hillf Danton <hdanton@sina.com>
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
 include/linux/netdevice.h | 23 ++++++++++++++++++++++
 net/core/dev.c            | 40 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 63 insertions(+)

diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ac2cd3f49aba..3a39211c7598 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -347,6 +347,7 @@ struct napi_struct {
 	struct list_head	dev_list;
 	struct hlist_node	napi_hash_node;
 	unsigned int		napi_id;
+	struct work_struct	work;
 };
 
 enum {
@@ -357,6 +358,7 @@ enum {
 	NAPI_STATE_HASHED,	/* In NAPI hash (busy polling possible) */
 	NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
 	NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
+	NAPI_STATE_THREADED,	/* Use threaded NAPI */
 };
 
 enum {
@@ -367,6 +369,7 @@ enum {
 	NAPIF_STATE_HASHED	 = BIT(NAPI_STATE_HASHED),
 	NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
 	NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
+	NAPIF_STATE_THREADED	 = BIT(NAPI_STATE_THREADED),
 };
 
 enum gro_result {
@@ -2315,6 +2318,26 @@ static inline void *netdev_priv(const struct net_device *dev)
 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
 		    int (*poll)(struct napi_struct *, int), int weight);
 
+/**
+ *	netif_threaded_napi_add - initialize a NAPI context
+ *	@dev:  network device
+ *	@napi: NAPI context
+ *	@poll: polling function
+ *	@weight: default weight
+ *
+ * This variant of netif_napi_add() should be used from drivers using NAPI
+ * with CPU intensive poll functions.
+ * This will schedule polling from a high priority workqueue that
+ */
+static inline void netif_threaded_napi_add(struct net_device *dev,
+					   struct napi_struct *napi,
+					   int (*poll)(struct napi_struct *, int),
+					   int weight)
+{
+	set_bit(NAPI_STATE_THREADED, &napi->state);
+	netif_napi_add(dev, napi, poll, weight);
+}
+
 /**
  *	netif_tx_napi_add - initialize a NAPI context
  *	@dev:  network device
diff --git a/net/core/dev.c b/net/core/dev.c
index 19f1abc26fcd..e140b6a9d5eb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -158,6 +158,7 @@ static DEFINE_SPINLOCK(offload_lock);
 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
 struct list_head ptype_all __read_mostly;	/* Taps */
 static struct list_head offload_base __read_mostly;
+static struct workqueue_struct *napi_workq __read_mostly;
 
 static int netif_rx_internal(struct sk_buff *skb);
 static int call_netdevice_notifiers_info(unsigned long val,
@@ -6286,6 +6287,11 @@ void __napi_schedule(struct napi_struct *n)
 {
 	unsigned long flags;
 
+	if (test_bit(NAPI_STATE_THREADED, &n->state)) {
+		queue_work(napi_workq, &n->work);
+		return;
+	}
+
 	local_irq_save(flags);
 	____napi_schedule(this_cpu_ptr(&softnet_data), n);
 	local_irq_restore(flags);
@@ -6333,6 +6339,11 @@ EXPORT_SYMBOL(napi_schedule_prep);
  */
 void __napi_schedule_irqoff(struct napi_struct *n)
 {
+	if (test_bit(NAPI_STATE_THREADED, &n->state)) {
+		queue_work(napi_workq, &n->work);
+		return;
+	}
+
 	____napi_schedule(this_cpu_ptr(&softnet_data), n);
 }
 EXPORT_SYMBOL(__napi_schedule_irqoff);
@@ -6601,6 +6612,30 @@ static void init_gro_hash(struct napi_struct *napi)
 	napi->gro_bitmask = 0;
 }
 
+static void napi_workfn(struct work_struct *work)
+{
+	struct napi_struct *n = container_of(work, struct napi_struct, work);
+
+	for (;;) {
+		if (!test_bit(NAPI_STATE_SCHED, &n->state))
+			return;
+
+		if (n->poll(n, n->weight) < n->weight)
+			return;
+
+		if (!need_resched())
+			continue;
+
+		/*
+		 * have to pay for the latency of task switch even if
+		 * napi is scheduled
+		 */
+		if (test_bit(NAPI_STATE_SCHED, &n->state))
+			queue_work(napi_workq, work);
+		return;
+	}
+}
+
 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
 		    int (*poll)(struct napi_struct *, int), int weight)
 {
@@ -6621,6 +6656,7 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
 #ifdef CONFIG_NETPOLL
 	napi->poll_owner = -1;
 #endif
+	INIT_WORK(&napi->work, napi_workfn);
 	set_bit(NAPI_STATE_SCHED, &napi->state);
 	napi_hash_add(napi);
 }
@@ -10676,6 +10712,10 @@ static int __init net_dev_init(void)
 		sd->backlog.weight = weight_p;
 	}
 
+	napi_workq = alloc_workqueue("napi_workq", WQ_UNBOUND | WQ_HIGHPRI,
+				     WQ_UNBOUND_MAX_ACTIVE);
+	BUG_ON(!napi_workq);
+
 	dev_boot_phase = 0;
 
 	/* The loopback device is special if any other network devices
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [RFC] net: add support for threaded NAPI polling
  2020-07-26 16:31 [RFC] net: add support for threaded NAPI polling Felix Fietkau
@ 2020-07-26 16:49 ` Eric Dumazet
  2020-07-26 17:19   ` Felix Fietkau
  0 siblings, 1 reply; 5+ messages in thread
From: Eric Dumazet @ 2020-07-26 16:49 UTC (permalink / raw)
  To: Felix Fietkau, netdev; +Cc: Hillf Danton



On 7/26/20 9:31 AM, Felix Fietkau wrote:
> For some drivers (especially 802.11 drivers), doing a lot of work in the NAPI
> poll function does not perform well. Since NAPI poll is bound to the CPU it
> was scheduled from, we can easily end up with a few very busy CPUs spending
> most of their time in softirq/ksoftirqd and some idle ones.
> 
> Introduce threaded NAPI for such drivers based on a workqueue. The API is the
> same except for using netif_threaded_napi_add instead of netif_napi_add.
> 
> In my tests with mt76 on MT7621 using threaded NAPI + a thread for tx scheduling
> improves LAN->WLAN bridging throughput by 10-50%. Throughput without threaded
> NAPI is wildly inconsistent, depending on the CPU that runs the tx scheduling
> thread.
> 
> With threaded NAPI, throughput seems stable and consistent (and higher than
> the best results I got without it).

Note that even with a threaded NAPI, you will not be able to use more than one cpu
to process the traffic.

Also I wonder how this will scale to more than one device using this ?

Say we need 4 NAPI, how the different work queues will mix together ?

We invented years ago RPS and RFS, to be able to spread incoming traffic
to more cpus, for devices having one hardware queue.


> 
> Based on a patch by Hillf Danton
> 
> Cc: Hillf Danton <hdanton@sina.com>
> Signed-off-by: Felix Fietkau <nbd@nbd.name>
> ---
>  include/linux/netdevice.h | 23 ++++++++++++++++++++++
>  net/core/dev.c            | 40 +++++++++++++++++++++++++++++++++++++++
>  2 files changed, 63 insertions(+)
> 
> diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
> index ac2cd3f49aba..3a39211c7598 100644
> --- a/include/linux/netdevice.h
> +++ b/include/linux/netdevice.h
> @@ -347,6 +347,7 @@ struct napi_struct {
>  	struct list_head	dev_list;
>  	struct hlist_node	napi_hash_node;
>  	unsigned int		napi_id;
> +	struct work_struct	work;
>  };
>  
>  enum {
> @@ -357,6 +358,7 @@ enum {
>  	NAPI_STATE_HASHED,	/* In NAPI hash (busy polling possible) */
>  	NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
>  	NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
> +	NAPI_STATE_THREADED,	/* Use threaded NAPI */
>  };
>  
>  enum {
> @@ -367,6 +369,7 @@ enum {
>  	NAPIF_STATE_HASHED	 = BIT(NAPI_STATE_HASHED),
>  	NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
>  	NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
> +	NAPIF_STATE_THREADED	 = BIT(NAPI_STATE_THREADED),
>  };
>  
>  enum gro_result {
> @@ -2315,6 +2318,26 @@ static inline void *netdev_priv(const struct net_device *dev)
>  void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
>  		    int (*poll)(struct napi_struct *, int), int weight);
>  
> +/**
> + *	netif_threaded_napi_add - initialize a NAPI context
> + *	@dev:  network device
> + *	@napi: NAPI context
> + *	@poll: polling function
> + *	@weight: default weight
> + *
> + * This variant of netif_napi_add() should be used from drivers using NAPI
> + * with CPU intensive poll functions.
> + * This will schedule polling from a high priority workqueue that
> + */
> +static inline void netif_threaded_napi_add(struct net_device *dev,
> +					   struct napi_struct *napi,
> +					   int (*poll)(struct napi_struct *, int),
> +					   int weight)
> +{
> +	set_bit(NAPI_STATE_THREADED, &napi->state);
> +	netif_napi_add(dev, napi, poll, weight);
> +}
> +
>  /**
>   *	netif_tx_napi_add - initialize a NAPI context
>   *	@dev:  network device
> diff --git a/net/core/dev.c b/net/core/dev.c
> index 19f1abc26fcd..e140b6a9d5eb 100644
> --- a/net/core/dev.c
> +++ b/net/core/dev.c
> @@ -158,6 +158,7 @@ static DEFINE_SPINLOCK(offload_lock);
>  struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
>  struct list_head ptype_all __read_mostly;	/* Taps */
>  static struct list_head offload_base __read_mostly;
> +static struct workqueue_struct *napi_workq __read_mostly;
>  
>  static int netif_rx_internal(struct sk_buff *skb);
>  static int call_netdevice_notifiers_info(unsigned long val,
> @@ -6286,6 +6287,11 @@ void __napi_schedule(struct napi_struct *n)
>  {
>  	unsigned long flags;
>  
> +	if (test_bit(NAPI_STATE_THREADED, &n->state)) {
> +		queue_work(napi_workq, &n->work);
> +		return;
> +	}
> +
>  	local_irq_save(flags);
>  	____napi_schedule(this_cpu_ptr(&softnet_data), n);
>  	local_irq_restore(flags);
> @@ -6333,6 +6339,11 @@ EXPORT_SYMBOL(napi_schedule_prep);
>   */
>  void __napi_schedule_irqoff(struct napi_struct *n)
>  {
> +	if (test_bit(NAPI_STATE_THREADED, &n->state)) {
> +		queue_work(napi_workq, &n->work);
> +		return;
> +	}

I do not believe we want to add yet another test in this fast path.

Presumably drivers willing to use thread NAPI can use different interface
and directly call queue_work(), without testing NAPI_STATE_THREADED.

> +
>  	____napi_schedule(this_cpu_ptr(&softnet_data), n);
>  }
>  EXPORT_SYMBOL(__napi_schedule_irqoff);
> @@ -6601,6 +6612,30 @@ static void init_gro_hash(struct napi_struct *napi)
>  	napi->gro_bitmask = 0;
>  }
>  
> +static void napi_workfn(struct work_struct *work)
> +{
> +	struct napi_struct *n = container_of(work, struct napi_struct, work);
> +
> +	for (;;) {
> +		if (!test_bit(NAPI_STATE_SCHED, &n->state))

This all looks wrong, some important GRO logic is implemented in napi_poll()

You can not bypass napi_poll()

> +			return;
> +
> +		if (n->poll(n, n->weight) < n->weight)
> +			return;
> +
> +		if (!need_resched())
> +			continue;
> +


Why not simply using cond_resched() ?

> +		/*
> +		 * have to pay for the latency of task switch even if
> +		 * napi is scheduled
> +		 */
> +		if (test_bit(NAPI_STATE_SCHED, &n->state))
> +			queue_work(napi_workq, work);
> +		return;
> +	}
> +}
> +
>  void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
>  		    int (*poll)(struct napi_struct *, int), int weight)
>  {
> @@ -6621,6 +6656,7 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
>  #ifdef CONFIG_NETPOLL
>  	napi->poll_owner = -1;
>  #endif
> +	INIT_WORK(&napi->work, napi_workfn);
>  	set_bit(NAPI_STATE_SCHED, &napi->state);
>  	napi_hash_add(napi);
>  }
> @@ -10676,6 +10712,10 @@ static int __init net_dev_init(void)
>  		sd->backlog.weight = weight_p;
>  	}
>  
> +	napi_workq = alloc_workqueue("napi_workq", WQ_UNBOUND | WQ_HIGHPRI,
> +				     WQ_UNBOUND_MAX_ACTIVE);
> +	BUG_ON(!napi_workq);
> +
>  	dev_boot_phase = 0;
>  
>  	/* The loopback device is special if any other network devices
> 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [RFC] net: add support for threaded NAPI polling
  2020-07-26 16:49 ` Eric Dumazet
@ 2020-07-26 17:19   ` Felix Fietkau
  2020-07-26 17:58     ` Eric Dumazet
  0 siblings, 1 reply; 5+ messages in thread
From: Felix Fietkau @ 2020-07-26 17:19 UTC (permalink / raw)
  To: Eric Dumazet, netdev; +Cc: Hillf Danton

On 2020-07-26 18:49, Eric Dumazet wrote:
> On 7/26/20 9:31 AM, Felix Fietkau wrote:
>> For some drivers (especially 802.11 drivers), doing a lot of work in the NAPI
>> poll function does not perform well. Since NAPI poll is bound to the CPU it
>> was scheduled from, we can easily end up with a few very busy CPUs spending
>> most of their time in softirq/ksoftirqd and some idle ones.
>> 
>> Introduce threaded NAPI for such drivers based on a workqueue. The API is the
>> same except for using netif_threaded_napi_add instead of netif_napi_add.
>> 
>> In my tests with mt76 on MT7621 using threaded NAPI + a thread for tx scheduling
>> improves LAN->WLAN bridging throughput by 10-50%. Throughput without threaded
>> NAPI is wildly inconsistent, depending on the CPU that runs the tx scheduling
>> thread.
>> 
>> With threaded NAPI, throughput seems stable and consistent (and higher than
>> the best results I got without it).
> 
> Note that even with a threaded NAPI, you will not be able to use more than one cpu
> to process the traffic.
For a single threaded NAPI user that's correct. The main difference here
is that the CPU running the poll function does not have to be the same
as the CPU that scheduled it, and it can change based on the load.
That makes a huge difference in my tests.

> Also I wonder how this will scale to more than one device using this ?
The workqueue creates multiple workers that pick up poll work, so it
should scale nicely.

> Say we need 4 NAPI, how the different work queues will mix together ?
> 
> We invented years ago RPS and RFS, to be able to spread incoming traffic
> to more cpus, for devices having one hardware queue.
Unfortunately that does not work well at all for my use case (802.11
drivers). A really large chunk of the work (e.g. 802.11 -> 802.3 header
conversion, state checks, etc.) is being done inside the poll function,
before it even goes anywhere near the network stack and RPS/RFS.

I did a lot of experiments trying to parallelize the work by tuning RFS,
IRQ affinity, etc. on MT7621. I didn't get anything close to the
consistent performance I get by adding threaded NAPI to mt76 along with
moving some other CPU intensive work from tasklets to threads.

- Felix

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [RFC] net: add support for threaded NAPI polling
  2020-07-26 17:19   ` Felix Fietkau
@ 2020-07-26 17:58     ` Eric Dumazet
  2020-07-26 18:24       ` Felix Fietkau
  0 siblings, 1 reply; 5+ messages in thread
From: Eric Dumazet @ 2020-07-26 17:58 UTC (permalink / raw)
  To: Felix Fietkau, Eric Dumazet, netdev; +Cc: Hillf Danton



On 7/26/20 10:19 AM, Felix Fietkau wrote:
> On 2020-07-26 18:49, Eric Dumazet wrote:
>> On 7/26/20 9:31 AM, Felix Fietkau wrote:
>>> For some drivers (especially 802.11 drivers), doing a lot of work in the NAPI
>>> poll function does not perform well. Since NAPI poll is bound to the CPU it
>>> was scheduled from, we can easily end up with a few very busy CPUs spending
>>> most of their time in softirq/ksoftirqd and some idle ones.
>>>
>>> Introduce threaded NAPI for such drivers based on a workqueue. The API is the
>>> same except for using netif_threaded_napi_add instead of netif_napi_add.
>>>
>>> In my tests with mt76 on MT7621 using threaded NAPI + a thread for tx scheduling
>>> improves LAN->WLAN bridging throughput by 10-50%. Throughput without threaded
>>> NAPI is wildly inconsistent, depending on the CPU that runs the tx scheduling
>>> thread.
>>>
>>> With threaded NAPI, throughput seems stable and consistent (and higher than
>>> the best results I got without it).
>>
>> Note that even with a threaded NAPI, you will not be able to use more than one cpu
>> to process the traffic.
> For a single threaded NAPI user that's correct. The main difference here
> is that the CPU running the poll function does not have to be the same
> as the CPU that scheduled it, and it can change based on the load.
> That makes a huge difference in my tests.

This really looks like there is a problem in the driver itself.

Have you first checked that this patch was not hurting your use case ?

commit 4cd13c21b207e80ddb1144c576500098f2d5f882    softirq: Let ksoftirqd do its job

If yes, your proposal would again possibly hurt user space threads competing
with a high priority workqueue, and packets would not be consumed by user applications.
Having cpus burning 100% of cycles in kernel space is useless.


It seems you need two cpus per queue, I guess this might be because
you use a single NAPI for both tx and rx ?

Have you simply tried to use two NAPI, as some Ethernet drivers do ?

Do not get me wrong, but scheduling a thread only to process one packet at a time
will hurt common cases.

Really I do not mind if you add a threaded NAPI, but it seems you missed
a lot of NAPI requirements in the proposed patch.

For instance, many ->poll() handlers assume BH are disabled.

Also part of RPS logic depends on net_rx_action() calling net_rps_action_and_irq_enable()



^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [RFC] net: add support for threaded NAPI polling
  2020-07-26 17:58     ` Eric Dumazet
@ 2020-07-26 18:24       ` Felix Fietkau
  0 siblings, 0 replies; 5+ messages in thread
From: Felix Fietkau @ 2020-07-26 18:24 UTC (permalink / raw)
  To: Eric Dumazet, Eric Dumazet, netdev; +Cc: Hillf Danton

On 2020-07-26 19:58, Eric Dumazet wrote:
> 
> 
> On 7/26/20 10:19 AM, Felix Fietkau wrote:
>> On 2020-07-26 18:49, Eric Dumazet wrote:
>>> On 7/26/20 9:31 AM, Felix Fietkau wrote:
>>>> For some drivers (especially 802.11 drivers), doing a lot of work in the NAPI
>>>> poll function does not perform well. Since NAPI poll is bound to the CPU it
>>>> was scheduled from, we can easily end up with a few very busy CPUs spending
>>>> most of their time in softirq/ksoftirqd and some idle ones.
>>>>
>>>> Introduce threaded NAPI for such drivers based on a workqueue. The API is the
>>>> same except for using netif_threaded_napi_add instead of netif_napi_add.
>>>>
>>>> In my tests with mt76 on MT7621 using threaded NAPI + a thread for tx scheduling
>>>> improves LAN->WLAN bridging throughput by 10-50%. Throughput without threaded
>>>> NAPI is wildly inconsistent, depending on the CPU that runs the tx scheduling
>>>> thread.
>>>>
>>>> With threaded NAPI, throughput seems stable and consistent (and higher than
>>>> the best results I got without it).
>>>
>>> Note that even with a threaded NAPI, you will not be able to use more than one cpu
>>> to process the traffic.
>> For a single threaded NAPI user that's correct. The main difference here
>> is that the CPU running the poll function does not have to be the same
>> as the CPU that scheduled it, and it can change based on the load.
>> That makes a huge difference in my tests.
> 
> This really looks like there is a problem in the driver itself.
> 
> Have you first checked that this patch was not hurting your use case ?
> 
> commit 4cd13c21b207e80ddb1144c576500098f2d5f882    softirq: Let ksoftirqd do its job
> 
> If yes, your proposal would again possibly hurt user space threads competing
> with a high priority workqueue, and packets would not be consumed by user applications.
> Having cpus burning 100% of cycles in kernel space is useless.
I already checked that a while back, and this patch is not hurting my
use case. I think it is actually helping, since I'm putting on enough
load to keep most softirq activity in ksoftirqd.

One thing to consider about my use case is that I'm bridging traffic
between an Ethernet interface and an 802.11 interface. Those packets do
not go through user space. If I push enough traffic, the ksoftirqd
instance running NAPI of the 802.11 device keeps the CPU 100% busy. I do
not have any signficant user space activity during my tests.

Since tx and rx NAPI are scheduled from the same IRQ which only fires on
one CPU, they end up in the same ksoftirqd instance.

Considering that one CPU is not enough to handle entire NAPI workload
for the device, threaded NAPI helps by letting other (otherwise mostly
idle) CPUs pick up some of the workload.

> It seems you need two cpus per queue, I guess this might be because
> you use a single NAPI for both tx and rx ?
> 
> Have you simply tried to use two NAPI, as some Ethernet drivers do ?
I'm already doing that.

> Do not get me wrong, but scheduling a thread only to process one packet at a time
> will hurt common cases.
> 
> Really I do not mind if you add a threaded NAPI, but it seems you missed
> a lot of NAPI requirements in the proposed patch.
> 
> For instance, many ->poll() handlers assume BH are disabled.
> 
> Also part of RPS logic depends on net_rx_action() calling net_rps_action_and_irq_enable()
I will look into that, thanks.

- Felix

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2020-07-26 18:24 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-26 16:31 [RFC] net: add support for threaded NAPI polling Felix Fietkau
2020-07-26 16:49 ` Eric Dumazet
2020-07-26 17:19   ` Felix Fietkau
2020-07-26 17:58     ` Eric Dumazet
2020-07-26 18:24       ` Felix Fietkau

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.