All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
@ 2009-11-19 23:46 Eric Dumazet
  2009-11-20  2:12 ` Changli Gao
                   ` (5 more replies)
  0 siblings, 6 replies; 28+ messages in thread
From: Eric Dumazet @ 2009-11-19 23:46 UTC (permalink / raw)
  To: David S. Miller; +Cc: Tom Herbert, Linux Netdev List

Here is first version of XPS.

Goal of XPS is to free TX completed skbs by the cpu that submitted the transmit.

Because I chose to union skb->iif with skb->sending_cpu, I chose
to introduce a new xps_consume_skb(skb), and not generalize consume_skb() itself.

This means that selected drivers must use new function to benefit from XPS

Preliminary tests are quite good, especially on NUMA machines.

Only NAPI drivers can use this new infrastructure (xps_consume_skb() cannot
be called from hardirq context, only from softirq)

I converted tg3 and pktgen for my tests

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
---
 drivers/net/tg3.c      |    2
 include/linux/skbuff.h |   14 +++
 net/core/Makefile      |    1
 net/core/dev.c         |    8 +-
 net/core/pktgen.c      |    1
 net/core/xps.c         |  145 +++++++++++++++++++++++++++++++++++++++
 6 files changed, 165 insertions(+), 6 deletions(-)

diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 6e6db95..bc756e6 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4379,7 +4379,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
 			sw_idx = NEXT_TX(sw_idx);
 		}
 
-		dev_kfree_skb(skb);
+		xps_consume_skb(skb);
 
 		if (unlikely(tx_bug)) {
 			tg3_tx_recover(tp);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 63f4742..e8e4795 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -366,7 +366,10 @@ struct sk_buff {
 	struct nf_bridge_info	*nf_bridge;
 #endif
 
-	int			iif;
+	union {
+		int		iif;
+		int		sending_cpu;
+	};
 #ifdef CONFIG_NET_SCHED
 	__u16			tc_index;	/* traffic control index */
 #ifdef CONFIG_NET_CLS_ACT
@@ -441,6 +444,15 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb)
 
 extern void kfree_skb(struct sk_buff *skb);
 extern void consume_skb(struct sk_buff *skb);
+#if defined(CONFIG_SMP)
+extern void xps_consume_skb(struct sk_buff *skb);
+extern void xps_flush(void);
+extern void xps_init(void);
+#else
+#define xps_consume_skb(skb) consume_skb(skb)
+static inline void xps_flush(void) {}
+static inline void xps_init(void) {}
+#endif
 extern void	       __kfree_skb(struct sk_buff *skb);
 extern struct sk_buff *__alloc_skb(unsigned int size,
 				   gfp_t priority, int fclone, int node);
diff --git a/net/core/Makefile b/net/core/Makefile
index 796f46e..eacd3d8 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -19,4 +19,5 @@ obj-$(CONFIG_NET_DMA) += user_dma.o
 obj-$(CONFIG_FIB_RULES) += fib_rules.o
 obj-$(CONFIG_TRACEPOINTS) += net-traces.o
 obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o
+obj-$(CONFIG_SMP) += xps.o
 
diff --git a/net/core/dev.c b/net/core/dev.c
index 9977288..9e134f6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1965,6 +1965,7 @@ int dev_queue_xmit(struct sk_buff *skb)
 	struct netdev_queue *txq;
 	struct Qdisc *q;
 	int rc = -ENOMEM;
+	int cpu;
 
 	/* GSO will handle the following emulations directly. */
 	if (netif_needs_gso(dev, skb))
@@ -2000,6 +2001,7 @@ gso:
 	 */
 	rcu_read_lock_bh();
 
+	skb->sending_cpu = cpu = smp_processor_id();
 	txq = dev_pick_tx(dev, skb);
 	q = rcu_dereference(txq->qdisc);
 
@@ -2024,8 +2026,6 @@ gso:
 	   Either shot noqueue qdisc, it is even simpler 8)
 	 */
 	if (dev->flags & IFF_UP) {
-		int cpu = smp_processor_id(); /* ok because BHs are off */
-
 		if (txq->xmit_lock_owner != cpu) {
 
 			HARD_TX_LOCK(dev, txq, cpu);
@@ -2967,7 +2967,7 @@ static void net_rx_action(struct softirq_action *h)
 	}
 out:
 	local_irq_enable();
-
+	xps_flush();
 #ifdef CONFIG_NET_DMA
 	/*
 	 * There may not be any more sk_buffs coming right now, so push
@@ -5798,7 +5798,7 @@ static int __init net_dev_init(void)
 		queue->backlog.gro_list = NULL;
 		queue->backlog.gro_count = 0;
 	}
-
+	xps_init();
 	dev_boot_phase = 0;
 
 	/* The loopback device is special if any other network devices
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index d38470a..b41b794 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3435,6 +3435,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
 			pkt_dev->clone_count--;	/* back out increment, OOM */
 			return;
 		}
+		pkt_dev->skb->sending_cpu = smp_processor_id();
 		pkt_dev->last_pkt_size = pkt_dev->skb->len;
 		pkt_dev->allocated_skbs++;
 		pkt_dev->clone_count = 0;	/* reset counter */
diff --git a/net/core/xps.c b/net/core/xps.c
index e69de29..e580159 100644
--- a/net/core/xps.c
+++ b/net/core/xps.c
@@ -0,0 +1,145 @@
+/*
+ * XPS : Xmit Packet Steering
+ *
+ * TX completion packet freeing is performed on cpu that sent packet.
+ */
+#if defined(CONFIG_SMP)
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/kmemcheck.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+
+
+struct xps_pcpu_queue {
+	struct call_single_data csd;
+	struct sk_buff_head	list;
+};
+
+static DEFINE_PER_CPU(cpumask_t, xps_cpus);
+
+static DEFINE_PER_CPU(struct xps_pcpu_queue, xps_pcpu_queue);
+
+static struct sk_buff_head *xps_array; /* nr_cpu_ids elems */
+
+
+
+/* called from softirq context only */
+void xps_consume_skb(struct sk_buff *skb)
+{
+	unsigned int remote;
+	int thiscpu;
+	struct sk_buff_head *head;
+	/*
+	 * Might be stupid to dirty this cache line, but might
+	 * also be stupid to send an IPI if skb is not to be freed :(
+	 * One solution to this problem would be to move ->users in first cache line
+	 * (shared with ->next & ->sending_cpu fields ), so that this cpu dirties
+	 * only one cache line per queued skb.
+	 */
+	if (!atomic_dec_and_test(&skb->users))
+		return;
+
+	remote = skb->sending_cpu;
+	thiscpu = smp_processor_id();
+	if (remote >= nr_cpu_ids || remote == thiscpu) {
+		__kfree_skb(skb);
+		return;
+	}
+	head = &per_cpu_ptr(xps_array, thiscpu)[remote];
+
+	__skb_queue_head(head, skb);
+
+	/* IPI to remote processor will be sent later by xps_flush(),
+	 * to coalesce as much as possible skbs
+	 */
+	cpu_set(remote, __get_cpu_var(xps_cpus));
+}
+EXPORT_SYMBOL(xps_consume_skb);
+
+/*
+ * called at end of net_rx_action()
+ * preemption (and cpu migration/offline/online) disabled
+ */
+void xps_flush(void)
+{
+	int cpu, prevlen;
+	struct sk_buff_head *head = per_cpu_ptr(xps_array, smp_processor_id());
+	struct xps_pcpu_queue *q;
+	struct sk_buff *skb;
+
+	for_each_cpu_mask_nr(cpu, __get_cpu_var(xps_cpus)) {
+		q = &per_cpu(xps_pcpu_queue, cpu);
+		if (cpu_online(cpu)) {
+			spin_lock(&q->list.lock);
+			prevlen = skb_queue_len(&q->list);
+			skb_queue_splice_init(&head[cpu], &q->list);
+			spin_unlock(&q->list.lock);
+			/*
+			 * We hope remote cpu will be fast enough to transfert
+			 * this list to its completion queue before our
+			 * next xps_flush() call
+			 */
+			if (!prevlen)
+				__smp_call_function_single(cpu, &q->csd, 0);
+			continue;
+		}
+		/*
+		 * ok, we must free these skbs, even if we tried to avoid it :)
+		 */
+		while ((skb = __skb_dequeue(&head[cpu])) != NULL)
+			__kfree_skb(skb);
+	}
+	cpus_clear(__get_cpu_var(xps_cpus));
+}
+
+/*
+ * called from hardirq (IPI) context
+ */
+static void remote_free_skb_list(void *arg)
+{
+	struct sk_buff *last;
+	struct softnet_data *sd;
+	struct xps_pcpu_queue *q = arg; /* &__get_cpu_var(xps_pcpu_queue); */
+
+	spin_lock(&q->list.lock);
+
+	last = q->list.prev;
+	sd = &__get_cpu_var(softnet_data);
+	last->next = sd->completion_queue;
+	sd->completion_queue = q->list.next;
+	__skb_queue_head_init(&q->list);
+
+	spin_unlock(&q->list.lock);
+
+	raise_softirq_irqoff(NET_TX_SOFTIRQ);
+}
+
+void __init xps_init(void)
+{
+	int cpu, remote;
+	struct sk_buff_head *head;
+
+	xps_array = __alloc_percpu(nr_cpu_ids * sizeof(struct sk_buff_head),
+				   __alignof__(struct sk_buff_head));
+	if (!xps_array)
+		panic("XPS: Could not allocate xps_array\n");
+
+	for_each_possible_cpu(cpu) {
+		skb_queue_head_init(&per_cpu(xps_pcpu_queue, cpu).list);
+		per_cpu(xps_pcpu_queue, cpu).csd.func = remote_free_skb_list;
+		per_cpu(xps_pcpu_queue, cpu).csd.info = &per_cpu(xps_pcpu_queue, cpu);
+		head = per_cpu_ptr(xps_array, cpu);
+		for (remote = 0; remote < nr_cpu_ids; remote++)
+			__skb_queue_head_init(head + remote);
+	}
+}
+
+#endif /* CONFIG_SMP */

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-19 23:46 [PATCH net-next-2.6] net: Xmit Packet Steering (XPS) Eric Dumazet
@ 2009-11-20  2:12 ` Changli Gao
  2009-11-20  4:58   ` Eric Dumazet
       [not found] ` <65634d660911191641o4210a797mf1e8168dd8dd8b60@mail.gmail.com>
                   ` (4 subsequent siblings)
  5 siblings, 1 reply; 28+ messages in thread
From: Changli Gao @ 2009-11-20  2:12 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: David S. Miller, Tom Herbert, Linux Netdev List

On Fri, Nov 20, 2009 at 7:46 AM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
>
> diff --git a/net/core/dev.c b/net/core/dev.c
> index 9977288..9e134f6 100644
> --- a/net/core/dev.c
> +++ b/net/core/dev.c
> @@ -2000,6 +2001,7 @@ gso:
>         */
>        rcu_read_lock_bh();
>
> +       skb->sending_cpu = cpu = smp_processor_id();
>        txq = dev_pick_tx(dev, skb);
>        q = rcu_dereference(txq->qdisc);

I think assigning cpu to skb->sending_cpu just before calling
hard_start_xmit is better, because the CPU which dequeues the skb will
be another one.

>
> @@ -2024,8 +2026,6 @@ gso:
>           Either shot noqueue qdisc, it is even simpler 8)
>         */
>        if (dev->flags & IFF_UP) {
> -               int cpu = smp_processor_id(); /* ok because BHs are off */
> -
>                if (txq->xmit_lock_owner != cpu) {
>
>                        HARD_TX_LOCK(dev, txq, cpu);
> @@ -2967,7 +2967,7 @@ static void net_rx_action(struct softirq_action *h)
>        }
>  out:
>        local_irq_enable();
> -
> +       xps_flush();

If there isn't any new skbs, the memory will be hold forever. I know
you want to eliminate unnecessary IPI, how about sending IPI only when
the remote xps_pcpu_queues are changed from empty to nonempty?


-- 
Regards,
Changli Gao(xiaosuo@gmail.com)

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-20  2:12 ` Changli Gao
@ 2009-11-20  4:58   ` Eric Dumazet
  2009-11-20  5:11     ` Changli Gao
  0 siblings, 1 reply; 28+ messages in thread
From: Eric Dumazet @ 2009-11-20  4:58 UTC (permalink / raw)
  To: Changli Gao; +Cc: David S. Miller, Tom Herbert, Linux Netdev List

Changli Gao a écrit :
> On Fri, Nov 20, 2009 at 7:46 AM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
>> diff --git a/net/core/dev.c b/net/core/dev.c
>> index 9977288..9e134f6 100644
>> --- a/net/core/dev.c
>> +++ b/net/core/dev.c
>> @@ -2000,6 +2001,7 @@ gso:
>>         */
>>        rcu_read_lock_bh();
>>
>> +       skb->sending_cpu = cpu = smp_processor_id();
>>        txq = dev_pick_tx(dev, skb);
>>        q = rcu_dereference(txq->qdisc);
> 
> I think assigning cpu to skb->sending_cpu just before calling
> hard_start_xmit is better, because the CPU which dequeues the skb will
> be another one.

I want to record the application CPU, because I want the application CPU
to call sock_wfree(), not the CPU that happened to dequeue skb to transmit it
in case of txq contention.

> 
>> @@ -2024,8 +2026,6 @@ gso:
>>           Either shot noqueue qdisc, it is even simpler 8)
>>         */
>>        if (dev->flags & IFF_UP) {
>> -               int cpu = smp_processor_id(); /* ok because BHs are off */
>> -
>>                if (txq->xmit_lock_owner != cpu) {
>>
>>                        HARD_TX_LOCK(dev, txq, cpu);
>> @@ -2967,7 +2967,7 @@ static void net_rx_action(struct softirq_action *h)
>>        }
>>  out:
>>        local_irq_enable();
>> -
>> +       xps_flush();
> 
> If there isn't any new skbs, the memory will be hold forever. I know
> you want to eliminate unnecessary IPI, how about sending IPI only when
> the remote xps_pcpu_queues are changed from empty to nonempty?

I dont understand your remark, and dont see the problem, yet.

I send IPI only on cpus I know I have at least one skb queueud for them.
For each cpu taking TX completion interrupts I have :

One bitmask (xps_cpus) of cpus I will eventually send IPI at end of net_rx_action()

One array of skb lists per remote cpu, allocated on cpu node memory, thanks
to __alloc_percpu() at boot time.

I say _eventually_ because the algo is :

+		if (cpu_online(cpu)) {
+			spin_lock(&q->list.lock);
+			prevlen = skb_queue_len(&q->list);
+			skb_queue_splice_init(&head[cpu], &q->list);
+			spin_unlock(&q->list.lock);
+			/*
+			 * We hope remote cpu will be fast enough to transfert
+			 * this list to its completion queue before our
+			 * next xps_flush() call
+			 */
+			if (!prevlen)
+				__smp_call_function_single(cpu, &q->csd, 0);
+			continue;

So I send an IPI only if needed, once for the whole skb list.

With my pktgen (no skb cloning setup) tests, and

ethtool -C eth3 tx-usecs 1000 tx-frames 100

I really saw batches of 100 frames given from CPU X (NIC interrupts) to CPU Y (pktgen cpu)

What memory is hold forever ?

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
       [not found] ` <65634d660911191641o4210a797mf1e8168dd8dd8b60@mail.gmail.com>
@ 2009-11-20  5:08   ` Eric Dumazet
  0 siblings, 0 replies; 28+ messages in thread
From: Eric Dumazet @ 2009-11-20  5:08 UTC (permalink / raw)
  To: Tom Herbert; +Cc: David S. Miller, Linux Netdev List

Tom Herbert a écrit :
> 
> 
> On Thu, Nov 19, 2009 at 3:46 PM, Eric Dumazet <eric.dumazet@gmail.com
> <mailto:eric.dumazet@gmail.com>> wrote:
> 
>     Here is first version of XPS.
> 
> Very cool!  The infrastructure to move lists of skb's from cpu to
> another and the IPI to kick processing look like something that could be
> consolidated between rps and xps :-)

Sure, but RPS seems quite slow to integrate (no offense Tom) :)
I wanted to cook a patch on top on yours, but got no sign you were
about to release RPS version 4 soon.

> 
>     Goal of XPS is to free TX completed skbs by the cpu that submitted
>     the transmit.
> 
>     Because I chose to union skb->iif with skb->sending_cpu, I chose
>     to introduce a new xps_consume_skb(skb), and not generalize
>     consume_skb() itself.
> 
>     This means that selected drivers must use new function to benefit
>     from XPS
> 
> 
> Is this better than modifying consume_skb so this can be used by any driver?

consume_skb() is also used by RX side, and this side doesnt want XPS.

Adding a flag in skb to differentiate the use might be possible,
but we add a new test in hot paths...

>  
> 
>      
> 
>     Preliminary tests are quite good, especially on NUMA machines.
> 
>     Only NAPI drivers can use this new infrastructure (xps_consume_skb()
>     cannot
>     be called from hardirq context, only from softirq)
> 
> Is this a strict requirement, especially considering devices with
> separate TX interrupts?  For a hardirq we could we just put the skb on
> local percpu queue and schedule a softirq to do the ipi.

I chose this way because any sane and up2date driver should really
not use hardirqs TX completion. I want fast processing, without
masking local interrupts in xps_consume_skb(), and wihout testing
our context.

Note : if TX completion and RX are run in different NAPI contexts,
there is no problem using xps_consume_skb().


Thanks for reviewing Tom.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-20  4:58   ` Eric Dumazet
@ 2009-11-20  5:11     ` Changli Gao
  2009-11-20  5:24       ` Eric Dumazet
  0 siblings, 1 reply; 28+ messages in thread
From: Changli Gao @ 2009-11-20  5:11 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: David S. Miller, Tom Herbert, Linux Netdev List

On Fri, Nov 20, 2009 at 12:58 PM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
> Changli Gao a écrit :
>> On Fri, Nov 20, 2009 at 7:46 AM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
>>> diff --git a/net/core/dev.c b/net/core/dev.c
>>> index 9977288..9e134f6 100644
>>> --- a/net/core/dev.c
>>> +++ b/net/core/dev.c
>>> @@ -2000,6 +2001,7 @@ gso:
>>>         */
>>>        rcu_read_lock_bh();
>>>
>>> +       skb->sending_cpu = cpu = smp_processor_id();
>>>        txq = dev_pick_tx(dev, skb);
>>>        q = rcu_dereference(txq->qdisc);
>>
>> I think assigning cpu to skb->sending_cpu just before calling
>> hard_start_xmit is better, because the CPU which dequeues the skb will
>> be another one.
>
> I want to record the application CPU, because I want the application CPU
> to call sock_wfree(), not the CPU that happened to dequeue skb to transmit it
> in case of txq contention.
>

got it.

>>
>>> @@ -2024,8 +2026,6 @@ gso:
>>>           Either shot noqueue qdisc, it is even simpler 8)
>>>         */
>>>        if (dev->flags & IFF_UP) {
>>> -               int cpu = smp_processor_id(); /* ok because BHs are off */
>>> -
>>>                if (txq->xmit_lock_owner != cpu) {
>>>
>>>                        HARD_TX_LOCK(dev, txq, cpu);
>>> @@ -2967,7 +2967,7 @@ static void net_rx_action(struct softirq_action *h)
>>>        }
>>>  out:
>>>        local_irq_enable();
>>> -
>>> +       xps_flush();
>>
>> If there isn't any new skbs, the memory will be hold forever. I know
>> you want to eliminate unnecessary IPI, how about sending IPI only when
>> the remote xps_pcpu_queues are changed from empty to nonempty?
>
> I dont understand your remark, and dont see the problem, yet.
>
> I send IPI only on cpus I know I have at least one skb queueud for them.
> For each cpu taking TX completion interrupts I have :
>
> One bitmask (xps_cpus) of cpus I will eventually send IPI at end of net_rx_action()
>

You call xps_flush() in net_rx_aciton(). It means that if no new
packet arrives, xps_flush() won't be called forever, and the memory
used by skbs will be hold forever. Did I misunderstand? Your algorithm
only works with packet forwarding but sending packets from local
sockets.

-- 
Regards,
Changli Gao(xiaosuo@gmail.com)

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-20  5:11     ` Changli Gao
@ 2009-11-20  5:24       ` Eric Dumazet
  2009-11-20  5:34         ` Changli Gao
  0 siblings, 1 reply; 28+ messages in thread
From: Eric Dumazet @ 2009-11-20  5:24 UTC (permalink / raw)
  To: Changli Gao; +Cc: David S. Miller, Tom Herbert, Linux Netdev List

Changli Gao a écrit :
> You call xps_flush() in net_rx_aciton(). It means that if no new
> packet arrives, xps_flush() won't be called forever, and the memory
> used by skbs will be hold forever. Did I misunderstand? Your algorithm
> only works with packet forwarding but sending packets from local
> sockets.
> 

Please re-read my patch, you misunderstood it, or I dont get you.

If xps_consume_skb(skb) is ever called (from one to XXX times),
then we xps_flush() them, from net_rx_action()

net_rx_action()
{
	while (has_work) {
		perform_napi_things(); // calls xps_consume_skb()
	}
	xps_flush(); // post things to remote cpus, and dont leak memory
}

This net_rx_action() is same for forwarding and localy generated packets.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-20  5:24       ` Eric Dumazet
@ 2009-11-20  5:34         ` Changli Gao
  2009-11-20  5:42           ` Eric Dumazet
  0 siblings, 1 reply; 28+ messages in thread
From: Changli Gao @ 2009-11-20  5:34 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: David S. Miller, Tom Herbert, Linux Netdev List

On Fri, Nov 20, 2009 at 1:24 PM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
> Changli Gao a écrit :
>
> Please re-read my patch, you misunderstood it, or I dont get you.

I think I didn't misunderstand it. If local socket only sends packets,
which don't need replies from receiver, so new NIC RX IRQ, and NET_RX
softirq won't be triggered. Who will call xps_flush() to free the
memory used by locally generated packets?

>
> If xps_consume_skb(skb) is ever called (from one to XXX times),
> then we xps_flush() them, from net_rx_action()
>
> net_rx_action()
> {
>        while (has_work) {
>                perform_napi_things(); // calls xps_consume_skb()
>        }
>        xps_flush(); // post things to remote cpus, and dont leak memory
> }
>
> This net_rx_action() is same for forwarding and localy generated packets.
>



-- 
Regards,
Changli Gao(xiaosuo@gmail.com)

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-20  5:34         ` Changli Gao
@ 2009-11-20  5:42           ` Eric Dumazet
  2009-11-20  5:50             ` Changli Gao
  0 siblings, 1 reply; 28+ messages in thread
From: Eric Dumazet @ 2009-11-20  5:42 UTC (permalink / raw)
  To: Changli Gao; +Cc: David S. Miller, Tom Herbert, Linux Netdev List

Changli Gao a écrit :
> On Fri, Nov 20, 2009 at 1:24 PM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
>> Changli Gao a écrit :
>>
>> Please re-read my patch, you misunderstood it, or I dont get you.
> 
> I think I didn't misunderstand it. If local socket only sends packets,
> which don't need replies from receiver, so new NIC RX IRQ, and NET_RX
> softirq won't be triggered. Who will call xps_flush() to free the
> memory used by locally generated packets?
> 

Changli, when we transmit a skb on NIC, NIC is supposed to have a TX completion
call back, to free this skb.

These completion calls are running from net_rx_action(), if driver is NAPI enabled.

Only NAPI enabled drivers are allowed to use XPS infrastructure.

I sent 100.000.000 packets in my pktgen+tg3 tests, without receiving a single packet
in return, I can tell you all packets were correctly freed :)



^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-20  5:42           ` Eric Dumazet
@ 2009-11-20  5:50             ` Changli Gao
  0 siblings, 0 replies; 28+ messages in thread
From: Changli Gao @ 2009-11-20  5:50 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: David S. Miller, Tom Herbert, Linux Netdev List

On Fri, Nov 20, 2009 at 1:42 PM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
> Changli Gao a écrit :
>> On Fri, Nov 20, 2009 at 1:24 PM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
>>> Changli Gao a écrit :
>>>
>>> Please re-read my patch, you misunderstood it, or I dont get you.
>>
>> I think I didn't misunderstand it. If local socket only sends packets,
>> which don't need replies from receiver, so new NIC RX IRQ, and NET_RX
>> softirq won't be triggered. Who will call xps_flush() to free the
>> memory used by locally generated packets?
>>
>
> Changli, when we transmit a skb on NIC, NIC is supposed to have a TX completion
> call back, to free this skb.
>
> These completion calls are running from net_rx_action(), if driver is NAPI enabled.
>
> Only NAPI enabled drivers are allowed to use XPS infrastructure.
>
> I sent 100.000.000 packets in my pktgen+tg3 tests, without receiving a single packet
> in return, I can tell you all packets were correctly freed :)
>
>

Oh, I am so sorry. It seems I missed sth. I need to review the new
NAPI. Thanks for your patience and help.

-- 
Regards,
Changli Gao(xiaosuo@gmail.com)

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-19 23:46 [PATCH net-next-2.6] net: Xmit Packet Steering (XPS) Eric Dumazet
  2009-11-20  2:12 ` Changli Gao
       [not found] ` <65634d660911191641o4210a797mf1e8168dd8dd8b60@mail.gmail.com>
@ 2009-11-20 13:32 ` Jarek Poplawski
  2009-11-20 14:45   ` Eric Dumazet
  2009-11-20 20:51 ` Andi Kleen
                   ` (2 subsequent siblings)
  5 siblings, 1 reply; 28+ messages in thread
From: Jarek Poplawski @ 2009-11-20 13:32 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: David S. Miller, Tom Herbert, Linux Netdev List

On 20-11-2009 00:46, Eric Dumazet wrote:
> Here is first version of XPS.
> 
> Goal of XPS is to free TX completed skbs by the cpu that submitted the transmit.

But why?... OK, you write in another message about sock_wfree(). Then
how about users, who don't sock_wfree (routers)? Will there be any way
to disable it?

> 
> Because I chose to union skb->iif with skb->sending_cpu, I chose
> to introduce a new xps_consume_skb(skb), and not generalize consume_skb() itself.
> 
> This means that selected drivers must use new function to benefit from XPS
> 
> Preliminary tests are quite good, especially on NUMA machines.
> 
> Only NAPI drivers can use this new infrastructure (xps_consume_skb() cannot
> be called from hardirq context, only from softirq)
> 
> I converted tg3 and pktgen for my tests
> 
> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
> ---
...
> diff --git a/net/core/xps.c b/net/core/xps.c
> index e69de29..e580159 100644
> --- a/net/core/xps.c
> +++ b/net/core/xps.c
> @@ -0,0 +1,145 @@
> +/*
> + * XPS : Xmit Packet Steering
> + *
> + * TX completion packet freeing is performed on cpu that sent packet.
> + */
> +#if defined(CONFIG_SMP)

Shouldn't it be in the Makefile?

...
> +/*
> + * called at end of net_rx_action()
> + * preemption (and cpu migration/offline/online) disabled
> + */
> +void xps_flush(void)
> +{
> +	int cpu, prevlen;
> +	struct sk_buff_head *head = per_cpu_ptr(xps_array, smp_processor_id());
> +	struct xps_pcpu_queue *q;
> +	struct sk_buff *skb;
> +
> +	for_each_cpu_mask_nr(cpu, __get_cpu_var(xps_cpus)) {
> +		q = &per_cpu(xps_pcpu_queue, cpu);
> +		if (cpu_online(cpu)) {
> +			spin_lock(&q->list.lock);

This lock probably needs irq disabling: let's say 2 cpus run this at
the same time and both are interrupted with these (previously
scheduled) IPIs?

> +			prevlen = skb_queue_len(&q->list);
> +			skb_queue_splice_init(&head[cpu], &q->list);
> +			spin_unlock(&q->list.lock);
> +			/*
> +			 * We hope remote cpu will be fast enough to transfert
> +			 * this list to its completion queue before our
> +			 * next xps_flush() call
> +			 */
> +			if (!prevlen)
> +				__smp_call_function_single(cpu, &q->csd, 0);
> +			continue;
> +		}
> +		/*
> +		 * ok, we must free these skbs, even if we tried to avoid it :)
> +		 */
> +		while ((skb = __skb_dequeue(&head[cpu])) != NULL)
> +			__kfree_skb(skb);
> +	}
> +	cpus_clear(__get_cpu_var(xps_cpus));
> +}
> +
> +/*
> + * called from hardirq (IPI) context
> + */
> +static void remote_free_skb_list(void *arg)
> +{
> +	struct sk_buff *last;
> +	struct softnet_data *sd;
> +	struct xps_pcpu_queue *q = arg; /* &__get_cpu_var(xps_pcpu_queue); */
> +
> +	spin_lock(&q->list.lock);
> +
> +	last = q->list.prev;

Is q->list handled in case this cpu goes down before this IPI is
triggered?

Jarek P.

> +	sd = &__get_cpu_var(softnet_data);
> +	last->next = sd->completion_queue;
> +	sd->completion_queue = q->list.next;
> +	__skb_queue_head_init(&q->list);
> +
> +	spin_unlock(&q->list.lock);
> +
> +	raise_softirq_irqoff(NET_TX_SOFTIRQ);
> +}
...

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-20 13:32 ` Jarek Poplawski
@ 2009-11-20 14:45   ` Eric Dumazet
  2009-11-20 20:04     ` Jarek Poplawski
  0 siblings, 1 reply; 28+ messages in thread
From: Eric Dumazet @ 2009-11-20 14:45 UTC (permalink / raw)
  To: Jarek Poplawski; +Cc: David S. Miller, Tom Herbert, Linux Netdev List

Jarek Poplawski a écrit :
> On 20-11-2009 00:46, Eric Dumazet wrote:
>> Here is first version of XPS.
>>
>> Goal of XPS is to free TX completed skbs by the cpu that submitted the transmit.
> 
> But why?... OK, you write in another message about sock_wfree(). Then
> how about users, who don't sock_wfree (routers)? Will there be any way
> to disable it?


This is open for discussion, but I saw no problem with routing workloads.

sock_wfree() is not that expensive for tcp anyway.
You also have a cost of kfreeing() two blocks of memory per skb, if allocation was done by another cpu.

If this happens to be a problem, we can immediately free packet if it 
has no destructors :

At xmit time, initialize skb->sending_cpu like that

skb->sending_cpu = (skb->destructor) ? smp_processor_id() : 0xFFFF;

to make sure we dont touch too many cache lines at tx completion time.


>> +/*
>> + * XPS : Xmit Packet Steering
>> + *
>> + * TX completion packet freeing is performed on cpu that sent packet.
>> + */
>> +#if defined(CONFIG_SMP)
> 
> Shouldn't it be in the Makefile?

It is in Makefile too, I let it in prelim code to make it clear this was CONFIG_SMP only.

> 
> ...
>> +/*
>> + * called at end of net_rx_action()
>> + * preemption (and cpu migration/offline/online) disabled
>> + */
>> +void xps_flush(void)
>> +{
>> +	int cpu, prevlen;
>> +	struct sk_buff_head *head = per_cpu_ptr(xps_array, smp_processor_id());
>> +	struct xps_pcpu_queue *q;
>> +	struct sk_buff *skb;
>> +
>> +	for_each_cpu_mask_nr(cpu, __get_cpu_var(xps_cpus)) {
>> +		q = &per_cpu(xps_pcpu_queue, cpu);
>> +		if (cpu_online(cpu)) {
>> +			spin_lock(&q->list.lock);
> 
> This lock probably needs irq disabling: let's say 2 cpus run this at
> the same time and both are interrupted with these (previously
> scheduled) IPIs?

Repeat after me :

lockdep is my friend, lockdep is my friend, lockdep is my friend... :)

Seriously, I must think again on this locking schem.

>> +static void remote_free_skb_list(void *arg)
>> +{
>> +	struct sk_buff *last;
>> +	struct softnet_data *sd;
>> +	struct xps_pcpu_queue *q = arg; /* &__get_cpu_var(xps_pcpu_queue); */
>> +
>> +	spin_lock(&q->list.lock);
>> +
>> +	last = q->list.prev;
> 
> Is q->list handled in case this cpu goes down before this IPI is
> triggered?


[block migration] (how ? this is the question)

if (cpu_online(cpu)) { 
	give_work_to_cpu(cpu);
	trigger IPI
} else {
	handle_work_ourself()
}

[unblock migration]

General problem is : what guards cpu going off line between the if (cpu_online(cpu))
and the IPI.
I dont know yet, but it seems that disabling preemption is enough to get this
guarantee. This seems strange.

We can add a notifier (or better call a function from existing one : dev_cpu_callback()) to 
flush this queue when necessary.

Thanks


^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-20 14:45   ` Eric Dumazet
@ 2009-11-20 20:04     ` Jarek Poplawski
  2009-11-20 21:43       ` Eric Dumazet
  0 siblings, 1 reply; 28+ messages in thread
From: Jarek Poplawski @ 2009-11-20 20:04 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: David S. Miller, Tom Herbert, Linux Netdev List

On Fri, Nov 20, 2009 at 03:45:42PM +0100, Eric Dumazet wrote:
> Jarek Poplawski a écrit :
> > On 20-11-2009 00:46, Eric Dumazet wrote:
> >> Here is first version of XPS.
> >>
> >> Goal of XPS is to free TX completed skbs by the cpu that submitted the transmit.
> > 
> > But why?... OK, you write in another message about sock_wfree(). Then
> > how about users, who don't sock_wfree (routers)? Will there be any way
> > to disable it?
> 
> 
> This is open for discussion, but I saw no problem with routing workloads.

IMHO, it should depend on testing: if you can  prove there is a
distinct gain in "common" use case (which isn't probably a numa box
used e.g. for google.com or even kernel.org yet), and no visible
slowdown for such a router, then we could probably forget about
disabling, and look more at optimizations of the fast path.

> 
> sock_wfree() is not that expensive for tcp anyway.
> You also have a cost of kfreeing() two blocks of memory per skb, if allocation was done by another cpu.
> 
> If this happens to be a problem, we can immediately free packet if it 
> has no destructors :
> 
> At xmit time, initialize skb->sending_cpu like that
> 
> skb->sending_cpu = (skb->destructor) ? smp_processor_id() : 0xFFFF;
> 
> to make sure we dont touch too many cache lines at tx completion time.
> 
> 
> >> +/*
> >> + * XPS : Xmit Packet Steering
> >> + *
> >> + * TX completion packet freeing is performed on cpu that sent packet.
> >> + */
> >> +#if defined(CONFIG_SMP)
> > 
> > Shouldn't it be in the Makefile?
> 
> It is in Makefile too, I let it in prelim code to make it clear this was CONFIG_SMP only.

Aha! Now it's clear why I made this mistake. ;-)
> 
> > 
> > ...
> >> +/*
> >> + * called at end of net_rx_action()
> >> + * preemption (and cpu migration/offline/online) disabled
> >> + */
> >> +void xps_flush(void)
> >> +{
> >> +	int cpu, prevlen;
> >> +	struct sk_buff_head *head = per_cpu_ptr(xps_array, smp_processor_id());
> >> +	struct xps_pcpu_queue *q;
> >> +	struct sk_buff *skb;
> >> +
> >> +	for_each_cpu_mask_nr(cpu, __get_cpu_var(xps_cpus)) {
> >> +		q = &per_cpu(xps_pcpu_queue, cpu);
> >> +		if (cpu_online(cpu)) {
> >> +			spin_lock(&q->list.lock);
> > 
> > This lock probably needs irq disabling: let's say 2 cpus run this at
> > the same time and both are interrupted with these (previously
> > scheduled) IPIs?
> 
> Repeat after me :
> 
> lockdep is my friend, lockdep is my friend, lockdep is my friend... :)

Hmm... Actually, why did I have to do lockdep's job...
> 
> Seriously, I must think again on this locking schem.
> 
> >> +static void remote_free_skb_list(void *arg)
> >> +{
> >> +	struct sk_buff *last;
> >> +	struct softnet_data *sd;
> >> +	struct xps_pcpu_queue *q = arg; /* &__get_cpu_var(xps_pcpu_queue); */
> >> +
> >> +	spin_lock(&q->list.lock);
> >> +
> >> +	last = q->list.prev;
> > 
> > Is q->list handled in case this cpu goes down before this IPI is
> > triggered?
> 
> 
> [block migration] (how ? this is the question)
> 
> if (cpu_online(cpu)) { 
> 	give_work_to_cpu(cpu);
> 	trigger IPI
> } else {
> 	handle_work_ourself()
> }
> 
> [unblock migration]
> 
> General problem is : what guards cpu going off line between the if (cpu_online(cpu))
> and the IPI.
> I dont know yet, but it seems that disabling preemption is enough to get this
> guarantee. This seems strange.
> 
> We can add a notifier (or better call a function from existing one : dev_cpu_callback()) to 
> flush this queue when necessary.

Using dev_cpu_callback() looks quite obvious to me.

Jarek P.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-19 23:46 [PATCH net-next-2.6] net: Xmit Packet Steering (XPS) Eric Dumazet
                   ` (2 preceding siblings ...)
  2009-11-20 13:32 ` Jarek Poplawski
@ 2009-11-20 20:51 ` Andi Kleen
  2009-11-20 20:53   ` David Miller
  2009-11-20 22:30   ` Eric Dumazet
  2009-11-20 20:53 ` Jarek Poplawski
  2009-11-20 22:32 ` David Miller
  5 siblings, 2 replies; 28+ messages in thread
From: Andi Kleen @ 2009-11-20 20:51 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: David S. Miller, Tom Herbert, Linux Netdev List

Eric Dumazet <eric.dumazet@gmail.com> writes:

> Here is first version of XPS.
>
> Goal of XPS is to free TX completed skbs by the cpu that submitted the transmit.
>
> Because I chose to union skb->iif with skb->sending_cpu, I chose
> to introduce a new xps_consume_skb(skb), and not generalize consume_skb() itself.
>
> This means that selected drivers must use new function to benefit from XPS
>
> Preliminary tests are quite good, especially on NUMA machines.
>
> Only NAPI drivers can use this new infrastructure (xps_consume_skb() cannot
> be called from hardirq context, only from softirq)
>
> I converted tg3 and pktgen for my tests

Do you have numbers on this? It seems like a lot of effort to avoid transfering
a few cache lines.

-Andi (who is a bit sceptical and would rather see generic work for this in slab)

-- 
ak@linux.intel.com -- Speaking for myself only.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-20 20:51 ` Andi Kleen
@ 2009-11-20 20:53   ` David Miller
  2009-11-20 22:30   ` Eric Dumazet
  1 sibling, 0 replies; 28+ messages in thread
From: David Miller @ 2009-11-20 20:53 UTC (permalink / raw)
  To: andi; +Cc: eric.dumazet, therbert, netdev

From: Andi Kleen <andi@firstfloor.org>
Date: Fri, 20 Nov 2009 21:51:02 +0100

> -Andi (who is a bit sceptical and would rather see generic work for
> -this in slab)

SLAB does this to an extent, but it's the other things about
SKB freeing that are expensive and hits lots of cache misses
in these cases.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-19 23:46 [PATCH net-next-2.6] net: Xmit Packet Steering (XPS) Eric Dumazet
                   ` (3 preceding siblings ...)
  2009-11-20 20:51 ` Andi Kleen
@ 2009-11-20 20:53 ` Jarek Poplawski
  2009-11-20 21:35   ` Eric Dumazet
  2009-11-20 22:32 ` David Miller
  5 siblings, 1 reply; 28+ messages in thread
From: Jarek Poplawski @ 2009-11-20 20:53 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: David S. Miller, Tom Herbert, Linux Netdev List

Eric Dumazet wrote, On 11/20/2009 12:46 AM:

> Here is first version of XPS.
> 
> Goal of XPS is to free TX completed skbs by the cpu that submitted the transmit.
> 
> Because I chose to union skb->iif with skb->sending_cpu, I chose
> to introduce a new xps_consume_skb(skb), and not generalize consume_skb() itself.

...

> diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
> index 63f4742..e8e4795 100644
> --- a/include/linux/skbuff.h
> +++ b/include/linux/skbuff.h
> @@ -366,7 +366,10 @@ struct sk_buff {
>  	struct nf_bridge_info	*nf_bridge;
>  #endif
>  
> -	int			iif;
> +	union {
> +		int		iif;
> +		int		sending_cpu;
> +	};

...

> diff --git a/net/core/dev.c b/net/core/dev.c
> index 9977288..9e134f6 100644
> --- a/net/core/dev.c
> +++ b/net/core/dev.c
> @@ -1965,6 +1965,7 @@ int dev_queue_xmit(struct sk_buff *skb)
>  	struct netdev_queue *txq;
>  	struct Qdisc *q;
>  	int rc = -ENOMEM;
> +	int cpu;
>  
>  	/* GSO will handle the following emulations directly. */
>  	if (netif_needs_gso(dev, skb))
> @@ -2000,6 +2001,7 @@ gso:
>  	 */
>  	rcu_read_lock_bh();
>  
> +	skb->sending_cpu = cpu = smp_processor_id();

There is one more problem: this will break things like act_mirred + ifb,
and other cases using skb->iif e.g. for filtering on virtual devices at
the xmit path.

Jarek P.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-20 20:53 ` Jarek Poplawski
@ 2009-11-20 21:35   ` Eric Dumazet
  2009-11-20 21:43     ` Joe Perches
  2009-11-20 22:34     ` David Miller
  0 siblings, 2 replies; 28+ messages in thread
From: Eric Dumazet @ 2009-11-20 21:35 UTC (permalink / raw)
  To: Jarek Poplawski; +Cc: David S. Miller, Tom Herbert, Linux Netdev List

Jarek Poplawski a écrit :
> 
> There is one more problem: this will break things like act_mirred + ifb,
> and other cases using skb->iif e.g. for filtering on virtual devices at
> the xmit path.
> 

Following patch might help us to locate real uses of this obscure field :)

[PATCH net-next-2.6] net: rename skb->iif to skb->skb_iif

To help grep games, rename iif to skb_iif

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
---
 drivers/net/ifb.c                 |    6 +++---
 include/linux/skbuff.h            |    4 ++--
 include/net/pkt_cls.h             |    4 ++--
 net/core/dev.c                    |    6 +++---
 net/core/skbuff.c                 |    2 +-
 net/netlabel/netlabel_unlabeled.c |    2 +-
 net/sched/act_mirred.c            |    2 +-
 net/sched/cls_flow.c              |    2 +-
 security/selinux/hooks.c          |    6 +++---
 security/smack/smack_lsm.c        |    4 ++--
 10 files changed, 19 insertions(+), 19 deletions(-)

diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 69c2566..f4081c0 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -99,7 +99,7 @@ static void ri_tasklet(unsigned long dev)
 		stats->tx_bytes +=skb->len;
 
 		rcu_read_lock();
-		skb->dev = dev_get_by_index_rcu(&init_net, skb->iif);
+		skb->dev = dev_get_by_index_rcu(&init_net, skb->skb_iif);
 		if (!skb->dev) {
 			rcu_read_unlock();
 			dev_kfree_skb(skb);
@@ -107,7 +107,7 @@ static void ri_tasklet(unsigned long dev)
 			break;
 		}
 		rcu_read_unlock();
-		skb->iif = _dev->ifindex;
+		skb->skb_iif = _dev->ifindex;
 
 		if (from & AT_EGRESS) {
 			dp->st_rx_frm_egr++;
@@ -172,7 +172,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
 	stats->rx_packets++;
 	stats->rx_bytes+=skb->len;
 
-	if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->iif) {
+	if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) {
 		dev_kfree_skb(skb);
 		stats->rx_dropped++;
 		return NETDEV_TX_OK;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 63f4742..89eed8c 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -299,7 +299,7 @@ typedef unsigned char *sk_buff_data_t;
  *	@nfctinfo: Relationship of this skb to the connection
  *	@nfct_reasm: netfilter conntrack re-assembly pointer
  *	@nf_bridge: Saved data about a bridged frame - see br_netfilter.c
- *	@iif: ifindex of device we arrived on
+ *	@skb_iif: ifindex of device we arrived on
  *	@queue_mapping: Queue mapping for multiqueue devices
  *	@tc_index: Traffic control index
  *	@tc_verd: traffic control verdict
@@ -366,7 +366,7 @@ struct sk_buff {
 	struct nf_bridge_info	*nf_bridge;
 #endif
 
-	int			iif;
+	int			skb_iif;
 #ifdef CONFIG_NET_SCHED
 	__u16			tc_index;	/* traffic control index */
 #ifdef CONFIG_NET_CLS_ACT
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 3dd210d..dd3031a 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -343,9 +343,9 @@ tcf_match_indev(struct sk_buff *skb, char *indev)
 	struct net_device *dev;
 
 	if (indev[0]) {
-		if  (!skb->iif)
+		if  (!skb->skb_iif)
 			return 0;
-		dev = __dev_get_by_index(dev_net(skb->dev), skb->iif);
+		dev = __dev_get_by_index(dev_net(skb->dev), skb->skb_iif);
 		if (!dev || strcmp(indev, dev->name))
 			return 0;
 	}
diff --git a/net/core/dev.c b/net/core/dev.c
index 9977288..09f3d6b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2287,7 +2287,7 @@ static int ing_filter(struct sk_buff *skb)
 	if (MAX_RED_LOOP < ttl++) {
 		printk(KERN_WARNING
 		       "Redir loop detected Dropping packet (%d->%d)\n",
-		       skb->iif, dev->ifindex);
+		       skb->skb_iif, dev->ifindex);
 		return TC_ACT_SHOT;
 	}
 
@@ -2395,8 +2395,8 @@ int netif_receive_skb(struct sk_buff *skb)
 	if (netpoll_receive_skb(skb))
 		return NET_RX_DROP;
 
-	if (!skb->iif)
-		skb->iif = skb->dev->ifindex;
+	if (!skb->skb_iif)
+		skb->skb_iif = skb->dev->ifindex;
 
 	null_or_orig = NULL;
 	orig_dev = skb->dev;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 739b8f4..bfa3e78 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -549,7 +549,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 #endif
 	new->protocol		= old->protocol;
 	new->mark		= old->mark;
-	new->iif		= old->iif;
+	new->skb_iif		= old->skb_iif;
 	__nf_copy(new, old);
 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 3dfe2ba..98ed22e 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -1550,7 +1550,7 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb,
 	struct netlbl_unlhsh_iface *iface;
 
 	rcu_read_lock();
-	iface = netlbl_unlhsh_search_iface_def(skb->iif);
+	iface = netlbl_unlhsh_search_iface_def(skb->skb_iif);
 	if (iface == NULL)
 		goto unlabel_getattr_nolabel;
 	switch (family) {
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 7974793..d329170 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -185,7 +185,7 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
 		skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at);
 
 	skb2->dev = dev;
-	skb2->iif = skb->dev->ifindex;
+	skb2->skb_iif = skb->dev->ifindex;
 	dev_queue_xmit(skb2);
 	err = 0;
 
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 9402a7f..e054c62 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -171,7 +171,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb)
 
 static u32 flow_get_iif(const struct sk_buff *skb)
 {
-	return skb->iif;
+	return skb->skb_iif;
 }
 
 static u32 flow_get_priority(const struct sk_buff *skb)
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index bb230d5..83a4aad 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -4085,7 +4085,7 @@ static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb,
 	char *addrp;
 
 	COMMON_AUDIT_DATA_INIT(&ad, NET);
-	ad.u.net.netif = skb->iif;
+	ad.u.net.netif = skb->skb_iif;
 	ad.u.net.family = family;
 	err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
 	if (err)
@@ -4147,7 +4147,7 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
 		return 0;
 
 	COMMON_AUDIT_DATA_INIT(&ad, NET);
-	ad.u.net.netif = skb->iif;
+	ad.u.net.netif = skb->skb_iif;
 	ad.u.net.family = family;
 	err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
 	if (err)
@@ -4159,7 +4159,7 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
 		err = selinux_skb_peerlbl_sid(skb, family, &peer_sid);
 		if (err)
 			return err;
-		err = selinux_inet_sys_rcv_skb(skb->iif, addrp, family,
+		err = selinux_inet_sys_rcv_skb(skb->skb_iif, addrp, family,
 					       peer_sid, &ad);
 		if (err) {
 			selinux_netlbl_err(skb, err, 0);
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index c33b6bb..529c9ca 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -2602,7 +2602,7 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
 #ifdef CONFIG_AUDIT
 	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
 	ad.a.u.net.family = sk->sk_family;
-	ad.a.u.net.netif = skb->iif;
+	ad.a.u.net.netif = skb->skb_iif;
 	ipv4_skb_to_auditdata(skb, &ad.a, NULL);
 #endif
 	/*
@@ -2757,7 +2757,7 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
 #ifdef CONFIG_AUDIT
 	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
 	ad.a.u.net.family = family;
-	ad.a.u.net.netif = skb->iif;
+	ad.a.u.net.netif = skb->skb_iif;
 	ipv4_skb_to_auditdata(skb, &ad.a, NULL);
 #endif
 	/*

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-20 21:35   ` Eric Dumazet
@ 2009-11-20 21:43     ` Joe Perches
  2009-11-20 21:49       ` David Miller
  2009-11-20 22:01       ` Eric Dumazet
  2009-11-20 22:34     ` David Miller
  1 sibling, 2 replies; 28+ messages in thread
From: Joe Perches @ 2009-11-20 21:43 UTC (permalink / raw)
  To: Eric Dumazet
  Cc: Jarek Poplawski, David S. Miller, Tom Herbert, Linux Netdev List

On Fri, 2009-11-20 at 22:35 +0100, Eric Dumazet wrote:
> Jarek Poplawski a écrit :
> > There is one more problem: this will break things like act_mirred + ifb,
> > and other cases using skb->iif e.g. for filtering on virtual devices at
> > the xmit path.
> Following patch might help us to locate real uses of this obscure field :)
> [PATCH net-next-2.6] net: rename skb->iif to skb->skb_iif

Prefixing member names generally doesn't end well.
Prefixing selected member names? ick.


^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-20 20:04     ` Jarek Poplawski
@ 2009-11-20 21:43       ` Eric Dumazet
  2009-11-20 22:08         ` Jarek Poplawski
  0 siblings, 1 reply; 28+ messages in thread
From: Eric Dumazet @ 2009-11-20 21:43 UTC (permalink / raw)
  To: Jarek Poplawski; +Cc: David S. Miller, Tom Herbert, Linux Netdev List

Jarek Poplawski a écrit :
> On Fri, Nov 20, 2009 at 03:45:42PM +0100, Eric Dumazet wrote:
>> Jarek Poplawski a écrit :
>>> On 20-11-2009 00:46, Eric Dumazet wrote:
>>> scheduled) IPIs?
>> Repeat after me :
>>
>> lockdep is my friend, lockdep is my friend, lockdep is my friend... :)
> 
> Hmm... Actually, why did I have to do lockdep's job...

In fact I could not find why irq masking is necessary, and lockdep is
fine with my code and my testings. Care to explain what problem you spotted ?


In fact, I originally used  cmpxchg() and xchg() (no spinlock),
but had to find the end of skb list in the IPI handler, so I chose
to use a skb_head instead to let the IPI handler be as short as possible.


^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-20 21:43     ` Joe Perches
@ 2009-11-20 21:49       ` David Miller
  2009-11-20 22:01       ` Eric Dumazet
  1 sibling, 0 replies; 28+ messages in thread
From: David Miller @ 2009-11-20 21:49 UTC (permalink / raw)
  To: joe; +Cc: eric.dumazet, jarkao2, therbert, netdev

From: Joe Perches <joe@perches.com>
Date: Fri, 20 Nov 2009 13:43:34 -0800

> On Fri, 2009-11-20 at 22:35 +0100, Eric Dumazet wrote:
>> Jarek Poplawski a écrit :
>> > There is one more problem: this will break things like act_mirred + ifb,
>> > and other cases using skb->iif e.g. for filtering on virtual devices at
>> > the xmit path.
>> Following patch might help us to locate real uses of this obscure field :)
>> [PATCH net-next-2.6] net: rename skb->iif to skb->skb_iif
> 
> Prefixing member names generally doesn't end well.
> Prefixing selected member names? ick.

But I highly encourage what Eric is doing, I hate having to grep the
tree very carefully just to find uses of a certain SKB struct member.

And since SKB usage covers so many thousands of files in the tree,
doing one at a time like Eric is here really can make sense.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-20 21:43     ` Joe Perches
  2009-11-20 21:49       ` David Miller
@ 2009-11-20 22:01       ` Eric Dumazet
  1 sibling, 0 replies; 28+ messages in thread
From: Eric Dumazet @ 2009-11-20 22:01 UTC (permalink / raw)
  To: Joe Perches
  Cc: Jarek Poplawski, David S. Miller, Tom Herbert, Linux Netdev List

Joe Perches a écrit :
> 
> Prefixing member names generally doesn't end well.
> Prefixing selected member names? ick.

Yes, but Rome was not built in one day my friend :)

This patch makes Jarek point more obvious : Dont reuse iif or risk
new bugs.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-20 21:43       ` Eric Dumazet
@ 2009-11-20 22:08         ` Jarek Poplawski
  2009-11-20 22:21           ` Eric Dumazet
  0 siblings, 1 reply; 28+ messages in thread
From: Jarek Poplawski @ 2009-11-20 22:08 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: David S. Miller, Tom Herbert, Linux Netdev List

On Fri, Nov 20, 2009 at 10:43:55PM +0100, Eric Dumazet wrote:
> Jarek Poplawski a écrit :
> > On Fri, Nov 20, 2009 at 03:45:42PM +0100, Eric Dumazet wrote:
> >> Jarek Poplawski a écrit :
> >>> On 20-11-2009 00:46, Eric Dumazet wrote:
> >>> scheduled) IPIs?
> >> Repeat after me :
> >>
> >> lockdep is my friend, lockdep is my friend, lockdep is my friend... :)
> > 
> > Hmm... Actually, why did I have to do lockdep's job...
> 
> In fact I could not find why irq masking is necessary, and lockdep is
> fine with my code and my testings. Care to explain what problem you spotted ?
> 

CPU1						 CPU2
net_rx_action()					 net_rx_action()
 xps_flush()				  	  xps_flush()
  q = &per_cpu(xps_pcpu_queue, cpu2)		   q = &per_cpu(xps_pcpu_queue, cpu1)
  spin_lock(&q->list.lock of cpu2)		   spin_lock(&q->list.lock of cpu1)

<IPI>						 <IPI>
remote_free_skb_list()				 remote_free_skb_list()
waiting on spin_lock(&q->list.lock of cpu1)	 waiting on spin_lock(&q->list.lock of cpu2)


IPIs triggerered e.g. by CPU3 (or/and CPU4...) doing net_rx_action as well.

Jarek P.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-20 22:08         ` Jarek Poplawski
@ 2009-11-20 22:21           ` Eric Dumazet
  0 siblings, 0 replies; 28+ messages in thread
From: Eric Dumazet @ 2009-11-20 22:21 UTC (permalink / raw)
  To: Jarek Poplawski; +Cc: David S. Miller, Tom Herbert, Linux Netdev List

Jarek Poplawski a écrit :
> On Fri, Nov 20, 2009 at 10:43:55PM +0100, Eric Dumazet wrote:
>>> Hmm... Actually, why did I have to do lockdep's job...
>> In fact I could not find why irq masking is necessary, and lockdep is
>> fine with my code and my testings. Care to explain what problem you spotted ?
>>
> 
> CPU1						 CPU2
> net_rx_action()					 net_rx_action()
>  xps_flush()				  	  xps_flush()
>   q = &per_cpu(xps_pcpu_queue, cpu2)		   q = &per_cpu(xps_pcpu_queue, cpu1)
>   spin_lock(&q->list.lock of cpu2)		   spin_lock(&q->list.lock of cpu1)
> 
> <IPI>						 <IPI>
> remote_free_skb_list()				 remote_free_skb_list()
> waiting on spin_lock(&q->list.lock of cpu1)	 waiting on spin_lock(&q->list.lock of cpu2)
> 
> 
> IPIs triggerered e.g. by CPU3 (or/and CPU4...) doing net_rx_action as well.
> 

:) Now I am convinced :)

Thanks jarek



^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-20 20:51 ` Andi Kleen
  2009-11-20 20:53   ` David Miller
@ 2009-11-20 22:30   ` Eric Dumazet
  2009-11-20 22:37     ` Andi Kleen
  1 sibling, 1 reply; 28+ messages in thread
From: Eric Dumazet @ 2009-11-20 22:30 UTC (permalink / raw)
  To: Andi Kleen; +Cc: David S. Miller, Tom Herbert, Linux Netdev List

Andi Kleen a écrit :
> 
> Do you have numbers on this? It seems like a lot of effort to avoid transfering
> a few cache lines.

Lot of efforts ? hmm...

> 
> -Andi (who is a bit sceptical and would rather see generic work for this in slab)
> 

Yes, I know, but slab/slub is already quite optimized :)

Coding XPS was more a way to try to help push RPS in, I did not benchmarked it BTW.

My new dev machine is up, with two E5530 cpus and a 82599EB 10-Gigabit dual port card :
Pretty amazing :=)


^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-19 23:46 [PATCH net-next-2.6] net: Xmit Packet Steering (XPS) Eric Dumazet
                   ` (4 preceding siblings ...)
  2009-11-20 20:53 ` Jarek Poplawski
@ 2009-11-20 22:32 ` David Miller
  2009-11-20 22:36   ` Eric Dumazet
  5 siblings, 1 reply; 28+ messages in thread
From: David Miller @ 2009-11-20 22:32 UTC (permalink / raw)
  To: eric.dumazet; +Cc: therbert, netdev

From: Eric Dumazet <eric.dumazet@gmail.com>
Date: Fri, 20 Nov 2009 00:46:36 +0100

> Goal of XPS is to free TX completed skbs by the cpu that submitted
> the transmit.
> 
> Because I chose to union skb->iif with skb->sending_cpu, I chose
> to introduce a new xps_consume_skb(skb), and not generalize consume_skb() itself.
> 
> This means that selected drivers must use new function to benefit from XPS
> 
> Preliminary tests are quite good, especially on NUMA machines.
> 
> Only NAPI drivers can use this new infrastructure (xps_consume_skb() cannot
> be called from hardirq context, only from softirq)
> 
> I converted tg3 and pktgen for my tests
> 
> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>

I like this work.  But as you know it still needs a little bit
more work :-)

Let's also pick a more decent name for the free function since
tons of drivers are going to call this thing.  How about
dev_kfree_tx_skb()? :-)

I see Jarek and you have come to a mutual understanding about the
locking.  Since you need to change it anyways to fix the deadlock,
what using a netchannel like scheme to do remote SKB queueing?

PAGE_SIZE queue arrays, lockless access to head and tail pointers, and
if queue is full we local free.

I think that's a reasonable policy and the only detail to work out is
to make sure we never race on the IPI send and thus miss processing
the queue.

What do you think?


^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-20 21:35   ` Eric Dumazet
  2009-11-20 21:43     ` Joe Perches
@ 2009-11-20 22:34     ` David Miller
  1 sibling, 0 replies; 28+ messages in thread
From: David Miller @ 2009-11-20 22:34 UTC (permalink / raw)
  To: eric.dumazet; +Cc: jarkao2, therbert, netdev

From: Eric Dumazet <eric.dumazet@gmail.com>
Date: Fri, 20 Nov 2009 22:35:40 +0100

> Jarek Poplawski a écrit :
>> 
>> There is one more problem: this will break things like act_mirred + ifb,
>> and other cases using skb->iif e.g. for filtering on virtual devices at
>> the xmit path.
>> 
> 
> Following patch might help us to locate real uses of this obscure field :)
> 
> [PATCH net-next-2.6] net: rename skb->iif to skb->skb_iif
> 
> To help grep games, rename iif to skb_iif
> 
> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>

Applied, thanks Eric.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-20 22:32 ` David Miller
@ 2009-11-20 22:36   ` Eric Dumazet
  0 siblings, 0 replies; 28+ messages in thread
From: Eric Dumazet @ 2009-11-20 22:36 UTC (permalink / raw)
  To: David Miller; +Cc: therbert, netdev

David Miller a écrit :
> 
> I like this work.  But as you know it still needs a little bit
> more work :-)
> 
> Let's also pick a more decent name for the free function since
> tons of drivers are going to call this thing.  How about
> dev_kfree_tx_skb()? :-)
> 
> I see Jarek and you have come to a mutual understanding about the
> locking.  Since you need to change it anyways to fix the deadlock,
> what using a netchannel like scheme to do remote SKB queueing?
> 
> PAGE_SIZE queue arrays, lockless access to head and tail pointers, and
> if queue is full we local free.
> 
> I think that's a reasonable policy and the only detail to work out is
> to make sure we never race on the IPI send and thus miss processing
> the queue.
> 
> What do you think?
> 

Thats good ideas David, I'll work on them next week, and do benchmarks as well
before sending a new version.

Thanks

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
  2009-11-20 22:30   ` Eric Dumazet
@ 2009-11-20 22:37     ` Andi Kleen
       [not found]       ` <65634d660911201642k3930dc78vd576e0e89dc0c794@mail.gmail.com>
  0 siblings, 1 reply; 28+ messages in thread
From: Andi Kleen @ 2009-11-20 22:37 UTC (permalink / raw)
  To: Eric Dumazet; +Cc: Andi Kleen, David S. Miller, Tom Herbert, Linux Netdev List

On Fri, Nov 20, 2009 at 11:30:36PM +0100, Eric Dumazet wrote:
> Andi Kleen a écrit :
> > 
> > Do you have numbers on this? It seems like a lot of effort to avoid transfering
> > a few cache lines.
> 
> Lot of efforts ? hmm...

Well lots of code at least. Perhaps I'm old fashioned, but I always like to have
each code justify its complexity.

It seems like a very narrow special case. Or perhaps this is something that
should be a general library function for all allocator users?

> Yes, I know, but slab/slub is already quite optimized :)

Well it still has a lot of problems, other benchmarks suffer too.

-Andi
-- 
ak@linux.intel.com -- Speaking for myself only.

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH net-next-2.6] net: Xmit Packet Steering (XPS)
       [not found]       ` <65634d660911201642k3930dc78vd576e0e89dc0c794@mail.gmail.com>
@ 2009-11-21  6:58         ` Eric Dumazet
  0 siblings, 0 replies; 28+ messages in thread
From: Eric Dumazet @ 2009-11-21  6:58 UTC (permalink / raw)
  To: Tom Herbert; +Cc: Andi Kleen, David S. Miller, Linux Netdev List

Tom Herbert a écrit :
> 
> It probably is a special case, but addresses a real problem.  Like rps,
> this addresses the case where a single queue NIC is the bottleneck on a
> system.  Anything that takes work off the interrupting core and spreads
> it to other cores may alleviate the bottleneck. 
> 


Yes, and this brings back an idea I mentioned earlier in April, for a multicast
problem we had with AthenaCR.

XPS could be 'started' only if caller runs from ksoftirqd, eg we are in a stress
situation, and additional cost of XPS is nothing compared to huge gains we have
to spread part of work to more CPUS.

(Costs of XPS includes : ~500 bytes of ICACHE, and IPI)



^ permalink raw reply	[flat|nested] 28+ messages in thread

end of thread, other threads:[~2009-11-21  6:58 UTC | newest]

Thread overview: 28+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-11-19 23:46 [PATCH net-next-2.6] net: Xmit Packet Steering (XPS) Eric Dumazet
2009-11-20  2:12 ` Changli Gao
2009-11-20  4:58   ` Eric Dumazet
2009-11-20  5:11     ` Changli Gao
2009-11-20  5:24       ` Eric Dumazet
2009-11-20  5:34         ` Changli Gao
2009-11-20  5:42           ` Eric Dumazet
2009-11-20  5:50             ` Changli Gao
     [not found] ` <65634d660911191641o4210a797mf1e8168dd8dd8b60@mail.gmail.com>
2009-11-20  5:08   ` Eric Dumazet
2009-11-20 13:32 ` Jarek Poplawski
2009-11-20 14:45   ` Eric Dumazet
2009-11-20 20:04     ` Jarek Poplawski
2009-11-20 21:43       ` Eric Dumazet
2009-11-20 22:08         ` Jarek Poplawski
2009-11-20 22:21           ` Eric Dumazet
2009-11-20 20:51 ` Andi Kleen
2009-11-20 20:53   ` David Miller
2009-11-20 22:30   ` Eric Dumazet
2009-11-20 22:37     ` Andi Kleen
     [not found]       ` <65634d660911201642k3930dc78vd576e0e89dc0c794@mail.gmail.com>
2009-11-21  6:58         ` Eric Dumazet
2009-11-20 20:53 ` Jarek Poplawski
2009-11-20 21:35   ` Eric Dumazet
2009-11-20 21:43     ` Joe Perches
2009-11-20 21:49       ` David Miller
2009-11-20 22:01       ` Eric Dumazet
2009-11-20 22:34     ` David Miller
2009-11-20 22:32 ` David Miller
2009-11-20 22:36   ` Eric Dumazet

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.