All of lore.kernel.org
 help / color / mirror / Atom feed
From: Eric Dumazet <eric.dumazet@gmail.com>
To: David Miller <davem@davemloft.net>
Cc: xiaosuo@gmail.com, therbert@google.com, netdev@vger.kernel.org
Subject: [PATCH net-next-2.6] rps: cleanups
Date: Tue, 20 Apr 2010 09:17:14 +0200	[thread overview]
Message-ID: <1271747834.3845.206.camel@edumazet-laptop> (raw)
In-Reply-To: <20100419.132158.143863746.davem@davemloft.net>

Le lundi 19 avril 2010 à 13:21 -0700, David Miller a écrit :

> 
> It is getting increasingly complicated to follow who enables and
> disabled local cpu irqs in these code paths.  We could combat
> this by adding something like "_irq_enable()" to the function
> names.

Yes I agree, we need a general cleanup in this file

Thanks David !

[PATCH net-next-2.6] rps: cleanups

struct softnet_data holds many queues, so consistent use "sd" name
instead of "queue" is better.

Adds a rps_ipi_queued() helper to cleanup enqueue_to_backlog()

Adds a _and_irq_disable suffix to net_rps_action() name, as David
suggested.

incr_input_queue_head() becomes input_queue_head_incr()

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
---
 include/linux/netdevice.h |    4 
 net/core/dev.c            |  149 +++++++++++++++++++-----------------
 2 files changed, 82 insertions(+), 71 deletions(-)

diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 83ab3da..3c5ed5f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1401,10 +1401,10 @@ struct softnet_data {
 	struct napi_struct	backlog;
 };
 
-static inline void incr_input_queue_head(struct softnet_data *queue)
+static inline void input_queue_head_incr(struct softnet_data *sd)
 {
 #ifdef CONFIG_RPS
-	queue->input_queue_head++;
+	sd->input_queue_head++;
 #endif
 }
 
diff --git a/net/core/dev.c b/net/core/dev.c
index 05a2b29..70df048 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -208,17 +208,17 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
 	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
 }
 
-static inline void rps_lock(struct softnet_data *queue)
+static inline void rps_lock(struct softnet_data *sd)
 {
 #ifdef CONFIG_RPS
-	spin_lock(&queue->input_pkt_queue.lock);
+	spin_lock(&sd->input_pkt_queue.lock);
 #endif
 }
 
-static inline void rps_unlock(struct softnet_data *queue)
+static inline void rps_unlock(struct softnet_data *sd)
 {
 #ifdef CONFIG_RPS
-	spin_unlock(&queue->input_pkt_queue.lock);
+	spin_unlock(&sd->input_pkt_queue.lock);
 #endif
 }
 
@@ -2346,63 +2346,74 @@ done:
 }
 
 /* Called from hardirq (IPI) context */
-static void trigger_softirq(void *data)
+static void rps_trigger_softirq(void *data)
 {
-	struct softnet_data *queue = data;
-	__napi_schedule(&queue->backlog);
+	struct softnet_data *sd = data;
+
+	__napi_schedule(&sd->backlog);
 	__get_cpu_var(netdev_rx_stat).received_rps++;
 }
+
 #endif /* CONFIG_RPS */
 
 /*
+ * Check if this softnet_data structure is another cpu one
+ * If yes, queue it to our IPI list and return 1
+ * If no, return 0
+ */
+static int rps_ipi_queued(struct softnet_data *sd)
+{
+#ifdef CONFIG_RPS
+	struct softnet_data *mysd = &__get_cpu_var(softnet_data);
+
+	if (sd != mysd) {
+		sd->rps_ipi_next = mysd->rps_ipi_list;
+		mysd->rps_ipi_list = sd;
+
+		__raise_softirq_irqoff(NET_RX_SOFTIRQ);
+		return 1;
+	}
+#endif /* CONFIG_RPS */
+	return 0;
+}
+
+/*
  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
  * queue (may be a remote CPU queue).
  */
 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
 			      unsigned int *qtail)
 {
-	struct softnet_data *queue;
+	struct softnet_data *sd;
 	unsigned long flags;
 
-	queue = &per_cpu(softnet_data, cpu);
+	sd = &per_cpu(softnet_data, cpu);
 
 	local_irq_save(flags);
 	__get_cpu_var(netdev_rx_stat).total++;
 
-	rps_lock(queue);
-	if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
-		if (queue->input_pkt_queue.qlen) {
+	rps_lock(sd);
+	if (sd->input_pkt_queue.qlen <= netdev_max_backlog) {
+		if (sd->input_pkt_queue.qlen) {
 enqueue:
-			__skb_queue_tail(&queue->input_pkt_queue, skb);
+			__skb_queue_tail(&sd->input_pkt_queue, skb);
 #ifdef CONFIG_RPS
-			*qtail = queue->input_queue_head +
-			    queue->input_pkt_queue.qlen;
+			*qtail = sd->input_queue_head + sd->input_pkt_queue.qlen;
 #endif
-			rps_unlock(queue);
+			rps_unlock(sd);
 			local_irq_restore(flags);
 			return NET_RX_SUCCESS;
 		}
 
 		/* Schedule NAPI for backlog device */
-		if (napi_schedule_prep(&queue->backlog)) {
-#ifdef CONFIG_RPS
-			if (cpu != smp_processor_id()) {
-				struct softnet_data *myqueue;
-
-				myqueue = &__get_cpu_var(softnet_data);
-				queue->rps_ipi_next = myqueue->rps_ipi_list;
-				myqueue->rps_ipi_list = queue;
-
-				__raise_softirq_irqoff(NET_RX_SOFTIRQ);
-				goto enqueue;
-			}
-#endif
-			__napi_schedule(&queue->backlog);
+		if (napi_schedule_prep(&sd->backlog)) {
+			if (!rps_ipi_queued(sd))
+				__napi_schedule(&sd->backlog);
 		}
 		goto enqueue;
 	}
 
-	rps_unlock(queue);
+	rps_unlock(sd);
 
 	__get_cpu_var(netdev_rx_stat).dropped++;
 	local_irq_restore(flags);
@@ -2903,17 +2914,17 @@ EXPORT_SYMBOL(netif_receive_skb);
 static void flush_backlog(void *arg)
 {
 	struct net_device *dev = arg;
-	struct softnet_data *queue = &__get_cpu_var(softnet_data);
+	struct softnet_data *sd = &__get_cpu_var(softnet_data);
 	struct sk_buff *skb, *tmp;
 
-	rps_lock(queue);
-	skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
+	rps_lock(sd);
+	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp)
 		if (skb->dev == dev) {
-			__skb_unlink(skb, &queue->input_pkt_queue);
+			__skb_unlink(skb, &sd->input_pkt_queue);
 			kfree_skb(skb);
-			incr_input_queue_head(queue);
+			input_queue_head_incr(sd);
 		}
-	rps_unlock(queue);
+	rps_unlock(sd);
 }
 
 static int napi_gro_complete(struct sk_buff *skb)
@@ -3219,23 +3230,23 @@ EXPORT_SYMBOL(napi_gro_frags);
 static int process_backlog(struct napi_struct *napi, int quota)
 {
 	int work = 0;
-	struct softnet_data *queue = &__get_cpu_var(softnet_data);
+	struct softnet_data *sd = &__get_cpu_var(softnet_data);
 
 	napi->weight = weight_p;
 	do {
 		struct sk_buff *skb;
 
 		local_irq_disable();
-		rps_lock(queue);
-		skb = __skb_dequeue(&queue->input_pkt_queue);
+		rps_lock(sd);
+		skb = __skb_dequeue(&sd->input_pkt_queue);
 		if (!skb) {
 			__napi_complete(napi);
-			rps_unlock(queue);
+			rps_unlock(sd);
 			local_irq_enable();
 			break;
 		}
-		incr_input_queue_head(queue);
-		rps_unlock(queue);
+		input_queue_head_incr(sd);
+		rps_unlock(sd);
 		local_irq_enable();
 
 		__netif_receive_skb(skb);
@@ -3331,24 +3342,25 @@ EXPORT_SYMBOL(netif_napi_del);
  * net_rps_action sends any pending IPI's for rps.
  * Note: called with local irq disabled, but exits with local irq enabled.
  */
-static void net_rps_action(void)
+static void net_rps_action_and_irq_disable(void)
 {
 #ifdef CONFIG_RPS
-	struct softnet_data *locqueue = &__get_cpu_var(softnet_data);
-	struct softnet_data *remqueue = locqueue->rps_ipi_list;
+	struct softnet_data *sd = &__get_cpu_var(softnet_data);
+	struct softnet_data *remsd = sd->rps_ipi_list;
 
-	if (remqueue) {
-		locqueue->rps_ipi_list = NULL;
+	if (remsd) {
+		sd->rps_ipi_list = NULL;
 
 		local_irq_enable();
 
 		/* Send pending IPI's to kick RPS processing on remote cpus. */
-		while (remqueue) {
-			struct softnet_data *next = remqueue->rps_ipi_next;
-			if (cpu_online(remqueue->cpu))
-				__smp_call_function_single(remqueue->cpu,
-							   &remqueue->csd, 0);
-			remqueue = next;
+		while (remsd) {
+			struct softnet_data *next = remsd->rps_ipi_next;
+
+			if (cpu_online(remsd->cpu))
+				__smp_call_function_single(remsd->cpu,
+							   &remsd->csd, 0);
+			remsd = next;
 		}
 	} else
 #endif
@@ -3423,7 +3435,7 @@ static void net_rx_action(struct softirq_action *h)
 		netpoll_poll_unlock(have);
 	}
 out:
-	net_rps_action();
+	net_rps_action_and_irq_disable();
 
 #ifdef CONFIG_NET_DMA
 	/*
@@ -5595,7 +5607,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
 	/* Process offline CPU's input_pkt_queue */
 	while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
 		netif_rx(skb);
-		incr_input_queue_head(oldsd);
+		input_queue_head_incr(oldsd);
 	}
 
 	return NOTIFY_OK;
@@ -5812,24 +5824,23 @@ static int __init net_dev_init(void)
 	 */
 
 	for_each_possible_cpu(i) {
-		struct softnet_data *queue;
+		struct softnet_data *sd = &per_cpu(softnet_data, i);
 
-		queue = &per_cpu(softnet_data, i);
-		skb_queue_head_init(&queue->input_pkt_queue);
-		queue->completion_queue = NULL;
-		INIT_LIST_HEAD(&queue->poll_list);
+		skb_queue_head_init(&sd->input_pkt_queue);
+		sd->completion_queue = NULL;
+		INIT_LIST_HEAD(&sd->poll_list);
 
 #ifdef CONFIG_RPS
-		queue->csd.func = trigger_softirq;
-		queue->csd.info = queue;
-		queue->csd.flags = 0;
-		queue->cpu = i;
+		sd->csd.func = rps_trigger_softirq;
+		sd->csd.info = sd;
+		sd->csd.flags = 0;
+		sd->cpu = i;
 #endif
 
-		queue->backlog.poll = process_backlog;
-		queue->backlog.weight = weight_p;
-		queue->backlog.gro_list = NULL;
-		queue->backlog.gro_count = 0;
+		sd->backlog.poll = process_backlog;
+		sd->backlog.weight = weight_p;
+		sd->backlog.gro_list = NULL;
+		sd->backlog.gro_count = 0;
 	}
 
 	dev_boot_phase = 0;



  reply	other threads:[~2010-04-20  7:17 UTC|newest]

Thread overview: 86+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-02-07 18:42 rps: question jamal
2010-02-08  5:58 ` Tom Herbert
2010-02-08 15:09   ` jamal
2010-04-14 11:53     ` rps perfomance WAS(Re: " jamal
2010-04-14 17:31       ` Tom Herbert
2010-04-14 18:04         ` Eric Dumazet
2010-04-14 18:53           ` jamal
2010-04-14 19:44             ` Stephen Hemminger
2010-04-14 19:58               ` Eric Dumazet
2010-04-15  8:51                 ` David Miller
2010-04-14 20:22               ` jamal
2010-04-14 20:27                 ` Eric Dumazet
2010-04-14 20:38                   ` jamal
2010-04-14 20:45                   ` Tom Herbert
2010-04-14 20:57                     ` Eric Dumazet
2010-04-14 22:51                       ` Changli Gao
2010-04-14 23:02                         ` Stephen Hemminger
2010-04-15  2:40                           ` Eric Dumazet
2010-04-15  2:50                             ` Changli Gao
2010-04-15  8:57                       ` David Miller
2010-04-15 12:10                       ` jamal
2010-04-15 12:32                         ` Changli Gao
2010-04-15 12:50                           ` jamal
2010-04-15 23:51                             ` Changli Gao
2010-04-15  8:51                 ` David Miller
2010-04-14 20:34               ` Andi Kleen
2010-04-15  8:50               ` David Miller
2010-04-15  8:48             ` David Miller
2010-04-15 11:55               ` jamal
2010-04-15 16:41                 ` Rick Jones
2010-04-15 20:16                   ` jamal
2010-04-15 20:25                     ` Rick Jones
2010-04-15 23:56                     ` Changli Gao
2010-04-16  5:18                       ` Eric Dumazet
2010-04-16  6:02                         ` Changli Gao
2010-04-16  6:28                           ` Tom Herbert
2010-04-16  6:32                           ` Eric Dumazet
2010-04-16 13:42                             ` jamal
2010-04-16  7:15                           ` Andi Kleen
2010-04-16 13:27                             ` jamal
2010-04-16 13:37                               ` Andi Kleen
2010-04-16 13:58                                 ` jamal
2010-04-16 13:21                         ` jamal
2010-04-16 13:34                           ` Changli Gao
2010-04-16 13:49                             ` jamal
2010-04-16 14:10                               ` Changli Gao
2010-04-16 14:43                                 ` jamal
2010-04-16 14:58                                   ` Changli Gao
2010-04-19 12:48                                     ` jamal
2010-04-17  7:35                           ` Eric Dumazet
2010-04-17  8:43                             ` Tom Herbert
2010-04-17  9:23                               ` Eric Dumazet
2010-04-17 14:27                                 ` Eric Dumazet
2010-04-17 17:26                                   ` Tom Herbert
2010-04-17 14:17                               ` [PATCH net-next-2.6] net: remove time limit in process_backlog() Eric Dumazet
2010-04-18  9:36                                 ` David Miller
2010-04-17 17:31                             ` rps perfomance WAS(Re: rps: question jamal
2010-04-18  9:39                               ` Eric Dumazet
2010-04-18 11:34                                 ` Eric Dumazet
2010-04-19  2:09                                   ` jamal
2010-04-19  9:37                                   ` [RFC] rps: shortcut net_rps_action() Eric Dumazet
2010-04-19  9:48                                     ` Changli Gao
2010-04-19 12:14                                       ` Eric Dumazet
2010-04-19 12:28                                         ` Changli Gao
2010-04-19 13:27                                           ` Eric Dumazet
2010-04-19 14:22                                             ` Eric Dumazet
2010-04-19 15:07                                               ` [PATCH net-next-2.6] " Eric Dumazet
2010-04-19 16:02                                                 ` Tom Herbert
2010-04-19 20:21                                                 ` David Miller
2010-04-20  7:17                                                   ` Eric Dumazet [this message]
2010-04-20  8:18                                                     ` [PATCH net-next-2.6] rps: cleanups David Miller
2010-04-19 23:56                                                 ` [PATCH net-next-2.6] rps: shortcut net_rps_action() Changli Gao
2010-04-20  0:32                                                   ` Changli Gao
2010-04-20  5:55                                                     ` Eric Dumazet
2010-04-20 12:02                                   ` rps perfomance WAS(Re: rps: question jamal
2010-04-20 13:13                                     ` Eric Dumazet
     [not found]                                       ` <1271853570.4032.21.camel@bigi>
2010-04-21 19:01                                         ` Eric Dumazet
2010-04-22  1:27                                           ` Changli Gao
2010-04-22 12:12                                           ` jamal
2010-04-25  2:31                                             ` Changli Gao
2010-04-26 11:35                                               ` jamal
2010-04-26 13:35                                                 ` Changli Gao
2010-04-21 21:53                                         ` Rick Jones
2010-04-16 15:57             ` Tom Herbert
2010-04-14 18:53       ` Stephen Hemminger
2010-04-15  8:42       ` David Miller

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1271747834.3845.206.camel@edumazet-laptop \
    --to=eric.dumazet@gmail.com \
    --cc=davem@davemloft.net \
    --cc=netdev@vger.kernel.org \
    --cc=therbert@google.com \
    --cc=xiaosuo@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.