All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next 1/2] tipc: fix excessive network event logging
@ 2015-01-22 16:10 erik.hugne
  2015-01-22 16:10 ` [PATCH net-next 2/2] flow_dissector: add tipc support erik.hugne
                   ` (2 more replies)
  0 siblings, 3 replies; 11+ messages in thread
From: erik.hugne @ 2015-01-22 16:10 UTC (permalink / raw)
  To: richard.alpe, netdev, jon.maloy, ying.xue; +Cc: tipc-discussion, Erik Hugne

From: Erik Hugne <erik.hugne@ericsson.com>

If a large number of namespaces is spawned on a node and TIPC is
enabled in each of these, the excessive printk tracing of network
events will cause the system to grind down to a near halt.
The traces are still of debug value, so instead of removing them
completely we fix it by changing the link state and node availability
logging debug traces.

Signed-off-by: Erik Hugne <erik.hugne@ericsson.com>
---
 net/tipc/link.c | 20 ++++++++++----------
 net/tipc/node.c | 22 +++++++++++-----------
 2 files changed, 21 insertions(+), 21 deletions(-)

diff --git a/net/tipc/link.c b/net/tipc/link.c
index 193bc15..2846ad80 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -538,8 +538,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
 			link_set_timer(l_ptr, cont_intv / 4);
 			break;
 		case RESET_MSG:
-			pr_info("%s<%s>, requested by peer\n", link_rst_msg,
-				l_ptr->name);
+			pr_debug("%s<%s>, requested by peer\n",
+				 link_rst_msg, l_ptr->name);
 			tipc_link_reset(l_ptr);
 			l_ptr->state = RESET_RESET;
 			l_ptr->fsm_msg_cnt = 0;
@@ -549,7 +549,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
 			link_set_timer(l_ptr, cont_intv);
 			break;
 		default:
-			pr_err("%s%u in WW state\n", link_unk_evt, event);
+			pr_debug("%s%u in WW state\n", link_unk_evt, event);
 		}
 		break;
 	case WORKING_UNKNOWN:
@@ -561,8 +561,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
 			link_set_timer(l_ptr, cont_intv);
 			break;
 		case RESET_MSG:
-			pr_info("%s<%s>, requested by peer while probing\n",
-				link_rst_msg, l_ptr->name);
+			pr_debug("%s<%s>, requested by peer while probing\n",
+				 link_rst_msg, l_ptr->name);
 			tipc_link_reset(l_ptr);
 			l_ptr->state = RESET_RESET;
 			l_ptr->fsm_msg_cnt = 0;
@@ -588,8 +588,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
 				l_ptr->fsm_msg_cnt++;
 				link_set_timer(l_ptr, cont_intv / 4);
 			} else {	/* Link has failed */
-				pr_warn("%s<%s>, peer not responding\n",
-					link_rst_msg, l_ptr->name);
+				pr_debug("%s<%s>, peer not responding\n",
+					 link_rst_msg, l_ptr->name);
 				tipc_link_reset(l_ptr);
 				l_ptr->state = RESET_UNKNOWN;
 				l_ptr->fsm_msg_cnt = 0;
@@ -1568,9 +1568,9 @@ static void tipc_link_proto_rcv(struct net *net, struct tipc_link *l_ptr,
 
 		if (msg_linkprio(msg) &&
 		    (msg_linkprio(msg) != l_ptr->priority)) {
-			pr_warn("%s<%s>, priority change %u->%u\n",
-				link_rst_msg, l_ptr->name, l_ptr->priority,
-				msg_linkprio(msg));
+			pr_debug("%s<%s>, priority change %u->%u\n",
+				 link_rst_msg, l_ptr->name,
+				 l_ptr->priority, msg_linkprio(msg));
 			l_ptr->priority = msg_linkprio(msg);
 			tipc_link_reset(l_ptr); /* Enforce change to take effect */
 			break;
diff --git a/net/tipc/node.c b/net/tipc/node.c
index b1eb092..ee5d33c 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -230,8 +230,8 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 	n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP;
 	n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
 
-	pr_info("Established link <%s> on network plane %c\n",
-		l_ptr->name, l_ptr->net_plane);
+	pr_debug("Established link <%s> on network plane %c\n",
+		 l_ptr->name, l_ptr->net_plane);
 
 	if (!active[0]) {
 		active[0] = active[1] = l_ptr;
@@ -239,7 +239,7 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 		goto exit;
 	}
 	if (l_ptr->priority < active[0]->priority) {
-		pr_info("New link <%s> becomes standby\n", l_ptr->name);
+		pr_debug("New link <%s> becomes standby\n", l_ptr->name);
 		goto exit;
 	}
 	tipc_link_dup_queue_xmit(active[0], l_ptr);
@@ -247,9 +247,9 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 		active[0] = l_ptr;
 		goto exit;
 	}
-	pr_info("Old link <%s> becomes standby\n", active[0]->name);
+	pr_debug("Old link <%s> becomes standby\n", active[0]->name);
 	if (active[1] != active[0])
-		pr_info("Old link <%s> becomes standby\n", active[1]->name);
+		pr_debug("Old link <%s> becomes standby\n", active[1]->name);
 	active[0] = active[1] = l_ptr;
 exit:
 	/* Leave room for changeover header when returning 'mtu' to users: */
@@ -297,12 +297,12 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 	n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
 
 	if (!tipc_link_is_active(l_ptr)) {
-		pr_info("Lost standby link <%s> on network plane %c\n",
-			l_ptr->name, l_ptr->net_plane);
+		pr_debug("Lost standby link <%s> on network plane %c\n",
+			 l_ptr->name, l_ptr->net_plane);
 		return;
 	}
-	pr_info("Lost link <%s> on network plane %c\n",
-		l_ptr->name, l_ptr->net_plane);
+	pr_debug("Lost link <%s> on network plane %c\n",
+		 l_ptr->name, l_ptr->net_plane);
 
 	active = &n_ptr->active_links[0];
 	if (active[0] == l_ptr)
@@ -380,8 +380,8 @@ static void node_lost_contact(struct tipc_node *n_ptr)
 	char addr_string[16];
 	u32 i;
 
-	pr_info("Lost contact with %s\n",
-		tipc_addr_string_fill(addr_string, n_ptr->addr));
+	pr_debug("Lost contact with %s\n",
+		 tipc_addr_string_fill(addr_string, n_ptr->addr));
 
 	/* Flush broadcast link info associated with lost node */
 	if (n_ptr->bclink.recv_permitted) {
-- 
2.1.3

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH net-next 2/2] flow_dissector: add tipc support
  2015-01-22 16:10 [PATCH net-next 1/2] tipc: fix excessive network event logging erik.hugne
@ 2015-01-22 16:10 ` erik.hugne
  2015-01-22 17:29   ` Eric Dumazet
  2015-01-27  1:04   ` David Miller
  2015-01-22 17:24 ` [PATCH net-next 1/2] tipc: fix excessive network event logging Joe Perches
  2015-01-27  1:04 ` David Miller
  2 siblings, 2 replies; 11+ messages in thread
From: erik.hugne @ 2015-01-22 16:10 UTC (permalink / raw)
  To: richard.alpe, netdev, jon.maloy, ying.xue; +Cc: tipc-discussion

From: Erik Hugne <erik.hugne@ericsson.com>

The flows are hashed on the sending node address, which allows us
to spread out the TIPC link processing to RPS enabled cores. There
is no point to include the destination address in the hash as that
will always be the same for all inbound links. We have experimented
with a 3-tuple hash over [srcnode, sport, dport], but this showed to
give slightly lower performance because of increased lock contention
when the same link was handled by multiple cores.

Signed-off-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Erik Hugne <erik.hugne@ericsson.com>
Reviewed-by: Jon Maloy <jon.maloy@ericsson.com>
---
 net/core/flow_dissector.c | 14 ++++++++++++++
 1 file changed, 14 insertions(+)

diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 4508493..beb83d1 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -178,6 +178,20 @@ ipv6:
 			return false;
 		}
 	}
+	case htons(ETH_P_TIPC): {
+		struct {
+			__be32 pre[3];
+			__be32 srcnode;
+		} *hdr, _hdr;
+		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
+		if (!hdr)
+			return false;
+		flow->src = hdr->srcnode;
+		flow->dst = 0;
+		flow->n_proto = proto;
+		flow->thoff = (u16)nhoff;
+		return true;
+	}
 	case htons(ETH_P_FCOE):
 		flow->thoff = (u16)(nhoff + FCOE_HEADER_LEN);
 		/* fall through */
-- 
2.1.3


------------------------------------------------------------------------------
New Year. New Location. New Benefits. New Data Center in Ashburn, VA.
GigeNET is offering a free month of service with a new server in Ashburn.
Choose from 2 high performing configs, both with 100TB of bandwidth.
Higher redundancy.Lower latency.Increased capacity.Completely compliant.
http://p.sf.net/sfu/gigenet

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH net-next 1/2] tipc: fix excessive network event logging
  2015-01-22 16:10 [PATCH net-next 1/2] tipc: fix excessive network event logging erik.hugne
  2015-01-22 16:10 ` [PATCH net-next 2/2] flow_dissector: add tipc support erik.hugne
@ 2015-01-22 17:24 ` Joe Perches
  2015-01-23  6:44   ` Erik Hugne
  2015-01-27  1:04 ` David Miller
  2 siblings, 1 reply; 11+ messages in thread
From: Joe Perches @ 2015-01-22 17:24 UTC (permalink / raw)
  To: erik.hugne; +Cc: richard.alpe, netdev, jon.maloy, ying.xue, tipc-discussion

On Thu, 2015-01-22 at 17:10 +0100, erik.hugne@ericsson.com wrote:
> From: Erik Hugne <erik.hugne@ericsson.com>
> 
> If a large number of namespaces is spawned on a node and TIPC is
> enabled in each of these, the excessive printk tracing of network
> events will cause the system to grind down to a near halt.
> The traces are still of debug value, so instead of removing them
> completely we fix it by changing the link state and node availability
> logging debug traces.

Maybe some of these should be net_<level>_ratelimited(fmt, ...)

> diff --git a/net/tipc/link.c b/net/tipc/link.c
[]

> @@ -588,8 +588,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
>  				l_ptr->fsm_msg_cnt++;
>  				link_set_timer(l_ptr, cont_intv / 4);
>  			} else {	/* Link has failed */
> -				pr_warn("%s<%s>, peer not responding\n",
> -					link_rst_msg, l_ptr->name);

Like this one.

> +				pr_debug("%s<%s>, peer not responding\n",
> +					 link_rst_msg, l_ptr->name);
>  				tipc_link_reset(l_ptr);
>  				l_ptr->state = RESET_UNKNOWN;
>  				l_ptr->fsm_msg_cnt = 0;

> @@ -380,8 +380,8 @@ static void node_lost_contact(struct tipc_node *n_ptr)
>  	char addr_string[16];
>  	u32 i;
>  
> -	pr_info("Lost contact with %s\n",
> -		tipc_addr_string_fill(addr_string, n_ptr->addr));
> +	pr_debug("Lost contact with %s\n",
> +		 tipc_addr_string_fill(addr_string, n_ptr->addr));
>  
>  	/* Flush broadcast link info associated with lost node */
>  	if (n_ptr->bclink.recv_permitted) {

And maybe this one too.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH net-next 2/2] flow_dissector: add tipc support
  2015-01-22 16:10 ` [PATCH net-next 2/2] flow_dissector: add tipc support erik.hugne
@ 2015-01-22 17:29   ` Eric Dumazet
  2015-01-22 18:38     ` Jon Maloy
  2015-01-27  0:57     ` David Miller
  2015-01-27  1:04   ` David Miller
  1 sibling, 2 replies; 11+ messages in thread
From: Eric Dumazet @ 2015-01-22 17:29 UTC (permalink / raw)
  To: erik.hugne; +Cc: richard.alpe, netdev, jon.maloy, ying.xue, tipc-discussion

On Thu, 2015-01-22 at 17:10 +0100, erik.hugne@ericsson.com wrote:
> From: Erik Hugne <erik.hugne@ericsson.com>
> 
> The flows are hashed on the sending node address, which allows us
> to spread out the TIPC link processing to RPS enabled cores. There
> is no point to include the destination address in the hash as that
> will always be the same for all inbound links. We have experimented
> with a 3-tuple hash over [srcnode, sport, dport], but this showed to
> give slightly lower performance because of increased lock contention
> when the same link was handled by multiple cores.
> 
> Signed-off-by: Ying Xue <ying.xue@windriver.com>
> Signed-off-by: Erik Hugne <erik.hugne@ericsson.com>
> Reviewed-by: Jon Maloy <jon.maloy@ericsson.com>
> ---
>  net/core/flow_dissector.c | 14 ++++++++++++++
>  1 file changed, 14 insertions(+)
> 
> diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
> index 4508493..beb83d1 100644
> --- a/net/core/flow_dissector.c
> +++ b/net/core/flow_dissector.c
> @@ -178,6 +178,20 @@ ipv6:
>  			return false;
>  		}
>  	}
> +	case htons(ETH_P_TIPC): {
> +		struct {
> +			__be32 pre[3];
> +			__be32 srcnode;
> +		} *hdr, _hdr;

Is this header defined somewhere in an include file ?

This looks a bit ugly to locally define the format...

^ permalink raw reply	[flat|nested] 11+ messages in thread

* RE: [PATCH net-next 2/2] flow_dissector: add tipc support
  2015-01-22 17:29   ` Eric Dumazet
@ 2015-01-22 18:38     ` Jon Maloy
  2015-01-27  0:57     ` David Miller
  1 sibling, 0 replies; 11+ messages in thread
From: Jon Maloy @ 2015-01-22 18:38 UTC (permalink / raw)
  To: Eric Dumazet, Erik Hugne; +Cc: Richard Alpe, netdev, ying.xue, tipc-discussion



> -----Original Message-----
> From: Eric Dumazet [mailto:eric.dumazet@gmail.com]
> Sent: January-22-15 12:29 PM
> To: Erik Hugne
> Cc: Richard Alpe; netdev@vger.kernel.org; Jon Maloy;
> ying.xue@windriver.com; tipc-discussion@lists.sourceforge.net
> Subject: Re: [PATCH net-next 2/2] flow_dissector: add tipc support
> 
> On Thu, 2015-01-22 at 17:10 +0100, erik.hugne@ericsson.com wrote:
> > From: Erik Hugne <erik.hugne@ericsson.com>
> >
> > The flows are hashed on the sending node address, which allows us to
> > spread out the TIPC link processing to RPS enabled cores. There is no
> > point to include the destination address in the hash as that will
> > always be the same for all inbound links. We have experimented with a
> > 3-tuple hash over [srcnode, sport, dport], but this showed to give
> > slightly lower performance because of increased lock contention when
> > the same link was handled by multiple cores.
> >
> > Signed-off-by: Ying Xue <ying.xue@windriver.com>
> > Signed-off-by: Erik Hugne <erik.hugne@ericsson.com>
> > Reviewed-by: Jon Maloy <jon.maloy@ericsson.com>
> > ---
> >  net/core/flow_dissector.c | 14 ++++++++++++++
> >  1 file changed, 14 insertions(+)
> >
> > diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
> > index 4508493..beb83d1 100644
> > --- a/net/core/flow_dissector.c
> > +++ b/net/core/flow_dissector.c
> > @@ -178,6 +178,20 @@ ipv6:
> >  			return false;
> >  		}
> >  	}
> > +	case htons(ETH_P_TIPC): {
> > +		struct {
> > +			__be32 pre[3];
> > +			__be32 srcnode;
> > +		} *hdr, _hdr;
> 
> Is this header defined somewhere in an include file ?
> 
> This looks a bit ugly to locally define the format...

The header is defined in a file (msg.h) inside the TIPC 
module code, and cannot be included from here. It is
also a lot more detailed, and comes with a number of 
inline functions to manipulate the header. The point 
with doing it this way is that we don't want to expose
this header file outside the module code, when it isn't
strictly necessary.

Defining it locally is also a way of making it clear that the
content of word 3 of the header is the only thing that
matters in this context, and that we want to keep the
rest private.

Regards
///jon

> 
> 


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH net-next 1/2] tipc: fix excessive network event logging
  2015-01-22 17:24 ` [PATCH net-next 1/2] tipc: fix excessive network event logging Joe Perches
@ 2015-01-23  6:44   ` Erik Hugne
  0 siblings, 0 replies; 11+ messages in thread
From: Erik Hugne @ 2015-01-23  6:44 UTC (permalink / raw)
  To: Joe Perches; +Cc: richard.alpe, netdev, jon.maloy, ying.xue, tipc-discussion

On Thu, Jan 22, 2015 at 09:24:47AM -0800, Joe Perches wrote:
> On Thu, 2015-01-22 at 17:10 +0100, erik.hugne@ericsson.com wrote:
> > From: Erik Hugne <erik.hugne@ericsson.com>
> > 
> > If a large number of namespaces is spawned on a node and TIPC is
> > enabled in each of these, the excessive printk tracing of network
> > events will cause the system to grind down to a near halt.
> > The traces are still of debug value, so instead of removing them
> > completely we fix it by changing the link state and node availability
> > logging debug traces.
> 
> Maybe some of these should be net_<level>_ratelimited(fmt, ...)

We proposed that initially, but changed all to pr_debug after David's comment:
http://www.spinics.net/lists/netdev/msg312902.html
The topology information (links going up/down) can be accessed
both via netlink ('tipc link list' command) and the topology server
via a subscription API.

//E

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH net-next 2/2] flow_dissector: add tipc support
  2015-01-22 17:29   ` Eric Dumazet
  2015-01-22 18:38     ` Jon Maloy
@ 2015-01-27  0:57     ` David Miller
  2015-01-27 12:08       ` Erik Hugne
  1 sibling, 1 reply; 11+ messages in thread
From: David Miller @ 2015-01-27  0:57 UTC (permalink / raw)
  To: eric.dumazet
  Cc: erik.hugne, richard.alpe, netdev, jon.maloy, ying.xue, tipc-discussion

From: Eric Dumazet <eric.dumazet@gmail.com>
Date: Thu, 22 Jan 2015 09:29:11 -0800

> On Thu, 2015-01-22 at 17:10 +0100, erik.hugne@ericsson.com wrote:
>> From: Erik Hugne <erik.hugne@ericsson.com>
>> 
>> The flows are hashed on the sending node address, which allows us
>> to spread out the TIPC link processing to RPS enabled cores. There
>> is no point to include the destination address in the hash as that
>> will always be the same for all inbound links. We have experimented
>> with a 3-tuple hash over [srcnode, sport, dport], but this showed to
>> give slightly lower performance because of increased lock contention
>> when the same link was handled by multiple cores.
>> 
>> Signed-off-by: Ying Xue <ying.xue@windriver.com>
>> Signed-off-by: Erik Hugne <erik.hugne@ericsson.com>
>> Reviewed-by: Jon Maloy <jon.maloy@ericsson.com>
>> ---
>>  net/core/flow_dissector.c | 14 ++++++++++++++
>>  1 file changed, 14 insertions(+)
>> 
>> diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
>> index 4508493..beb83d1 100644
>> --- a/net/core/flow_dissector.c
>> +++ b/net/core/flow_dissector.c
>> @@ -178,6 +178,20 @@ ipv6:
>>  			return false;
>>  		}
>>  	}
>> +	case htons(ETH_P_TIPC): {
>> +		struct {
>> +			__be32 pre[3];
>> +			__be32 srcnode;
>> +		} *hdr, _hdr;
> 
> Is this header defined somewhere in an include file ?
> 
> This looks a bit ugly to locally define the format...

I'd like this situation improved but I plan to apply this as-is for
now.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH net-next 1/2] tipc: fix excessive network event logging
  2015-01-22 16:10 [PATCH net-next 1/2] tipc: fix excessive network event logging erik.hugne
  2015-01-22 16:10 ` [PATCH net-next 2/2] flow_dissector: add tipc support erik.hugne
  2015-01-22 17:24 ` [PATCH net-next 1/2] tipc: fix excessive network event logging Joe Perches
@ 2015-01-27  1:04 ` David Miller
  2 siblings, 0 replies; 11+ messages in thread
From: David Miller @ 2015-01-27  1:04 UTC (permalink / raw)
  To: erik.hugne; +Cc: richard.alpe, netdev, jon.maloy, ying.xue, tipc-discussion

From: <erik.hugne@ericsson.com>
Date: Thu, 22 Jan 2015 17:10:31 +0100

> From: Erik Hugne <erik.hugne@ericsson.com>
> 
> If a large number of namespaces is spawned on a node and TIPC is
> enabled in each of these, the excessive printk tracing of network
> events will cause the system to grind down to a near halt.
> The traces are still of debug value, so instead of removing them
> completely we fix it by changing the link state and node availability
> logging debug traces.
> 
> Signed-off-by: Erik Hugne <erik.hugne@ericsson.com>

Applied.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH net-next 2/2] flow_dissector: add tipc support
  2015-01-22 16:10 ` [PATCH net-next 2/2] flow_dissector: add tipc support erik.hugne
  2015-01-22 17:29   ` Eric Dumazet
@ 2015-01-27  1:04   ` David Miller
  1 sibling, 0 replies; 11+ messages in thread
From: David Miller @ 2015-01-27  1:04 UTC (permalink / raw)
  To: erik.hugne; +Cc: richard.alpe, netdev, jon.maloy, ying.xue, tipc-discussion

From: <erik.hugne@ericsson.com>
Date: Thu, 22 Jan 2015 17:10:32 +0100

> From: Erik Hugne <erik.hugne@ericsson.com>
> 
> The flows are hashed on the sending node address, which allows us
> to spread out the TIPC link processing to RPS enabled cores. There
> is no point to include the destination address in the hash as that
> will always be the same for all inbound links. We have experimented
> with a 3-tuple hash over [srcnode, sport, dport], but this showed to
> give slightly lower performance because of increased lock contention
> when the same link was handled by multiple cores.
> 
> Signed-off-by: Ying Xue <ying.xue@windriver.com>
> Signed-off-by: Erik Hugne <erik.hugne@ericsson.com>
> Reviewed-by: Jon Maloy <jon.maloy@ericsson.com>

Applied.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH net-next 2/2] flow_dissector: add tipc support
  2015-01-27  0:57     ` David Miller
@ 2015-01-27 12:08       ` Erik Hugne
  2015-01-27 14:40         ` Eric Dumazet
  0 siblings, 1 reply; 11+ messages in thread
From: Erik Hugne @ 2015-01-27 12:08 UTC (permalink / raw)
  To: David Miller; +Cc: jon.maloy, eric.dumazet, netdev, tipc-discussion

On Mon, Jan 26, 2015 at 04:57:49PM -0800, David Miller wrote:
> From: Eric Dumazet <eric.dumazet@gmail.com>
> Date: Thu, 22 Jan 2015 09:29:11 -0800
> 
> > On Thu, 2015-01-22 at 17:10 +0100, erik.hugne@ericsson.com wrote:
> >> From: Erik Hugne <erik.hugne@ericsson.com>
> >> 
> >> The flows are hashed on the sending node address, which allows us
> >> to spread out the TIPC link processing to RPS enabled cores. There
> >> is no point to include the destination address in the hash as that
> >> will always be the same for all inbound links. We have experimented
> >> with a 3-tuple hash over [srcnode, sport, dport], but this showed to
> >> give slightly lower performance because of increased lock contention
> >> when the same link was handled by multiple cores.
> >> 
> >> Signed-off-by: Ying Xue <ying.xue@windriver.com>
> >> Signed-off-by: Erik Hugne <erik.hugne@ericsson.com>
> >> Reviewed-by: Jon Maloy <jon.maloy@ericsson.com>
> >> ---
> >>  net/core/flow_dissector.c | 14 ++++++++++++++
> >>  1 file changed, 14 insertions(+)
> >> 
> >> diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
> >> index 4508493..beb83d1 100644
> >> --- a/net/core/flow_dissector.c
> >> +++ b/net/core/flow_dissector.c
> >> @@ -178,6 +178,20 @@ ipv6:
> >>  			return false;
> >>  		}
> >>  	}
> >> +	case htons(ETH_P_TIPC): {
> >> +		struct {
> >> +			__be32 pre[3];
> >> +			__be32 srcnode;
> >> +		} *hdr, _hdr;
> > 
> > Is this header defined somewhere in an include file ?
> > 
> > This looks a bit ugly to locally define the format...
> 
> I'd like this situation improved but I plan to apply this as-is for
> now.

About time we do something about this.. I'll post a patch with proper header
definitions soon.

//E


------------------------------------------------------------------------------
Dive into the World of Parallel Programming. The Go Parallel Website,
sponsored by Intel and developed in partnership with Slashdot Media, is your
hub for all things parallel software development, from weekly thought
leadership blogs to news, videos, case studies, tutorials and more. Take a
look and join the conversation now. http://goparallel.sourceforge.net/

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH net-next 2/2] flow_dissector: add tipc support
  2015-01-27 12:08       ` Erik Hugne
@ 2015-01-27 14:40         ` Eric Dumazet
  0 siblings, 0 replies; 11+ messages in thread
From: Eric Dumazet @ 2015-01-27 14:40 UTC (permalink / raw)
  To: Erik Hugne
  Cc: David Miller, richard.alpe, netdev, jon.maloy, ying.xue, tipc-discussion

On Tue, 2015-01-27 at 13:08 +0100, Erik Hugne wrote:

> 
> About time we do something about this.. I'll post a patch with proper header
> definitions soon.

Thanks for following up ;)

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2015-01-27 14:40 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-01-22 16:10 [PATCH net-next 1/2] tipc: fix excessive network event logging erik.hugne
2015-01-22 16:10 ` [PATCH net-next 2/2] flow_dissector: add tipc support erik.hugne
2015-01-22 17:29   ` Eric Dumazet
2015-01-22 18:38     ` Jon Maloy
2015-01-27  0:57     ` David Miller
2015-01-27 12:08       ` Erik Hugne
2015-01-27 14:40         ` Eric Dumazet
2015-01-27  1:04   ` David Miller
2015-01-22 17:24 ` [PATCH net-next 1/2] tipc: fix excessive network event logging Joe Perches
2015-01-23  6:44   ` Erik Hugne
2015-01-27  1:04 ` David Miller

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.