All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] bonding: Do not try to send packets over dead link in TLB mode.
@ 2014-07-09  1:09 Mahesh Bandewar
  2014-07-09  7:10 ` Eric Dumazet
                   ` (3 more replies)
  0 siblings, 4 replies; 17+ messages in thread
From: Mahesh Bandewar @ 2014-07-09  1:09 UTC (permalink / raw)
  To: Jay Vosburgh, Veaceslav Falico, Andy Gospodarek, David Miller
  Cc: netdev, Mahesh Bandewar, Eric Dumazet, Maciej Zenczykowski

In TLB mode if tlb_dynamic_lb is NOT set, slaves from the bond
group are selected based on the hash distribution. This does not
exclude dead links which are part of the bond. Also if there is a
temporary link event which brings down the interface, packets
hashed on that interface would be dropped too.

This patch fixes these issues and distributes flows across the
UP links only. Also the array construction of links which are
capable of sending packets happen in the control path leaving
only link-selection duing the data-path.

One possible side effect of this is - at a link event; all
flows will be shuffled to get good distribution. But impact of
this should be minimum with the assumption that a member or
members of the bond group are not available is a very temporary
situation.

Signed-off-by: Mahesh Bandewar <maheshb@google.com>
---
 drivers/net/bonding/bond_alb.c | 52 +++++++++++++++++++++++++++++++++++++-----
 drivers/net/bonding/bond_alb.h | 11 +++++++++
 drivers/net/bonding/bonding.h  |  6 +++++
 3 files changed, 63 insertions(+), 6 deletions(-)

diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 76c0dade233f..1f39d41fde4b 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -195,6 +195,9 @@ static int tlb_initialize(struct bonding *bond)
 
 	_unlock_tx_hashtbl_bh(bond);
 
+	/* Initialize the TLB array spin-lock */
+	spin_lock_init(&bond_info->slave_arr_lock);
+
 	return 0;
 }
 
@@ -209,6 +212,9 @@ static void tlb_deinitialize(struct bonding *bond)
 	bond_info->tx_hashtbl = NULL;
 
 	_unlock_tx_hashtbl_bh(bond);
+
+	if (bond_is_nondyn_tlb(bond) && bond_info->slave_arr)
+		kfree_rcu(bond_info->slave_arr, rcu);
 }
 
 static long long compute_gap(struct slave *slave)
@@ -1406,9 +1412,37 @@ out:
 	return NETDEV_TX_OK;
 }
 
+static int bond_tlb_update_slave_arr(struct bonding *bond)
+{
+	struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+	struct slave *tx_slave;
+	struct list_head *iter;
+	struct tlb_up_slave *new_arr, *old_arr;
+
+	new_arr = kzalloc(offsetof(struct tlb_up_slave, arr[bond->slave_cnt]),
+			  GFP_KERNEL);
+	if (!new_arr)
+		return -ENOMEM;
+
+	bond_for_each_slave(bond, tx_slave, iter) {
+		if (bond_slave_can_tx(tx_slave))
+			new_arr->arr[new_arr->count++] = tx_slave;
+	}
+
+	spin_lock(&bond_info->slave_arr_lock);
+	old_arr = bond_info->slave_arr;
+	rcu_assign_pointer(bond_info->slave_arr, new_arr);
+	spin_unlock(&bond_info->slave_arr_lock);
+	if (old_arr)
+		kfree_rcu(old_arr, rcu);
+
+	return 0;
+}
+
 int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
+	struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
 	struct ethhdr *eth_data;
 	struct slave *tx_slave = NULL;
 	u32 hash_index;
@@ -1429,12 +1463,13 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
 							      hash_index & 0xFF,
 							      skb->len);
 			} else {
-				struct list_head *iter;
-				int idx = hash_index % bond->slave_cnt;
-
-				bond_for_each_slave_rcu(bond, tx_slave, iter)
-					if (--idx < 0)
-						break;
+				struct tlb_up_slave *slaves;
+				rcu_read_lock();
+				slaves = rcu_dereference(bond_info->slave_arr);
+				if (slaves && slaves->count)
+					tx_slave = slaves->arr[hash_index %
+							       slaves->count];
+				rcu_read_unlock();
 			}
 			break;
 		}
@@ -1721,6 +1756,11 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
 			 */
 		}
 	}
+
+	if (bond_is_nondyn_tlb(bond)) {
+		if (bond_tlb_update_slave_arr(bond))
+			pr_err("Failed to build slave-array for TLB mode.\n");
+	}
 }
 
 /**
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 5fc76c01636c..731a8e830639 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -139,12 +139,23 @@ struct tlb_slave_info {
 			 */
 };
 
+struct tlb_up_slave {
+	unsigned int	count;
+	struct rcu_head rcu;
+	struct slave	*arr[0];
+};
+
 struct alb_bond_info {
 	struct tlb_client_info	*tx_hashtbl; /* Dynamically allocated */
 	spinlock_t		tx_hashtbl_lock;
 	u32			unbalanced_load;
 	int			tx_rebalance_counter;
 	int			lp_counter;
+	/* -------- non-dynamic tlb mode only ---------*/
+	struct tlb_up_slave __rcu *slave_arr;	  /* Up slaves */
+	spinlock_t		  slave_arr_lock; /* Lock to manage concurrent
+						   * writers
+						   */
 	/* -------- rlb parameters -------- */
 	int rlb_enabled;
 	struct rlb_client_info	*rx_hashtbl;	/* Receive hash table */
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 0b4d9cde0b05..ad9b3dce07d8 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -265,6 +265,12 @@ static inline bool bond_is_lb(const struct bonding *bond)
 	       BOND_MODE(bond) == BOND_MODE_ALB;
 }
 
+static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
+{
+	return (BOND_MODE(bond) == BOND_MODE_TLB)  &&
+	       (bond->params.tlb_dynamic_lb == 0);
+}
+
 static inline bool bond_mode_uses_arp(int mode)
 {
 	return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB &&
-- 
2.0.0.526.g5318336

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH] bonding: Do not try to send packets over dead link in TLB mode.
  2014-07-09  1:09 [PATCH] bonding: Do not try to send packets over dead link in TLB mode Mahesh Bandewar
@ 2014-07-09  7:10 ` Eric Dumazet
       [not found]   ` <CAF2d9jh1jDL7NMZapGn_Ohy8Y2JzHrWaKA5gFgR0tU=_KydtPg@mail.gmail.com>
  2014-07-09 10:14 ` Nikolay Aleksandrov
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 17+ messages in thread
From: Eric Dumazet @ 2014-07-09  7:10 UTC (permalink / raw)
  To: Mahesh Bandewar
  Cc: Jay Vosburgh, Veaceslav Falico, Andy Gospodarek, David Miller,
	netdev, Eric Dumazet, Maciej Zenczykowski

On Tue, 2014-07-08 at 18:09 -0700, Mahesh Bandewar wrote:
> In TLB mode if tlb_dynamic_lb is NOT set, slaves from the bond
> group are selected based on the hash distribution. This does not
> exclude dead links which are part of the bond. Also if there is a
> temporary link event which brings down the interface, packets
> hashed on that interface would be dropped too.
> 
> This patch fixes these issues and distributes flows across the
> UP links only. Also the array construction of links which are
> capable of sending packets happen in the control path leaving
> only link-selection duing the data-path.

s/duing/during/

Seems a speed improvement as well for bonding of 8 slaves ;)

> 
> One possible side effect of this is - at a link event; all
> flows will be shuffled to get good distribution. But impact of
> this should be minimum with the assumption that a member or
> members of the bond group are not available is a very temporary
> situation.
> 
> Signed-off-by: Mahesh Bandewar <maheshb@google.com>
> ---
>  drivers/net/bonding/bond_alb.c | 52 +++++++++++++++++++++++++++++++++++++-----
>  drivers/net/bonding/bond_alb.h | 11 +++++++++
>  drivers/net/bonding/bonding.h  |  6 +++++
>  3 files changed, 63 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
> index 76c0dade233f..1f39d41fde4b 100644
> --- a/drivers/net/bonding/bond_alb.c
> +++ b/drivers/net/bonding/bond_alb.c
> @@ -195,6 +195,9 @@ static int tlb_initialize(struct bonding *bond)
>  
>  	_unlock_tx_hashtbl_bh(bond);
>  
> +	/* Initialize the TLB array spin-lock */
> +	spin_lock_init(&bond_info->slave_arr_lock);
> +
>  	return 0;
>  }
>  
> @@ -209,6 +212,9 @@ static void tlb_deinitialize(struct bonding *bond)
>  	bond_info->tx_hashtbl = NULL;
>  
>  	_unlock_tx_hashtbl_bh(bond);
> +
> +	if (bond_is_nondyn_tlb(bond) && bond_info->slave_arr)
> +		kfree_rcu(bond_info->slave_arr, rcu);

You could remove the first condition, as slave_arr being NULL or not is
enough to take the decision to call kfree_rcu()

I do not know if a the "bond_is_nondyn_tlb(bond)" can change over the
time for a given bonding device, so feel uncomfortable with a possible
memleak here.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] bonding: Do not try to send packets over dead link in TLB mode.
  2014-07-09  1:09 [PATCH] bonding: Do not try to send packets over dead link in TLB mode Mahesh Bandewar
  2014-07-09  7:10 ` Eric Dumazet
@ 2014-07-09 10:14 ` Nikolay Aleksandrov
  2014-07-09 10:24 ` Veaceslav Falico
  2014-07-10 13:39 ` Jay Vosburgh
  3 siblings, 0 replies; 17+ messages in thread
From: Nikolay Aleksandrov @ 2014-07-09 10:14 UTC (permalink / raw)
  To: Mahesh Bandewar, Jay Vosburgh, Veaceslav Falico, Andy Gospodarek,
	David Miller
  Cc: netdev, Eric Dumazet, Maciej Zenczykowski

On 07/09/2014 03:09 AM, Mahesh Bandewar wrote:
> In TLB mode if tlb_dynamic_lb is NOT set, slaves from the bond
> group are selected based on the hash distribution. This does not
> exclude dead links which are part of the bond. Also if there is a
> temporary link event which brings down the interface, packets
> hashed on that interface would be dropped too.
> 
> This patch fixes these issues and distributes flows across the
> UP links only. Also the array construction of links which are
> capable of sending packets happen in the control path leaving
> only link-selection duing the data-path.
> 
> One possible side effect of this is - at a link event; all
> flows will be shuffled to get good distribution. But impact of
> this should be minimum with the assumption that a member or
> members of the bond group are not available is a very temporary
> situation.
> 
> Signed-off-by: Mahesh Bandewar <maheshb@google.com>
> ---

Hi Mahesh,
I think this should be targeted at net-next (and it actually applies there). I
also think you may be able to drop the spinlock as the
bond_alb_handle_link_change() function and its callers are always run under
RTNL, now I'm not 100% sure about this, it would require a more thorough check
but an ASSERT_RTNL at the correct spot could be useful to make sure and convert
anything left, in any case I'm okay as it is now too, this is merely a suggestion.

A few small nitpicks below. I'll give it a spin and if anything pops up will get
back to you.

Thanks for doing this, I like it :-)

>  drivers/net/bonding/bond_alb.c | 52 +++++++++++++++++++++++++++++++++++++-----
>  drivers/net/bonding/bond_alb.h | 11 +++++++++
>  drivers/net/bonding/bonding.h  |  6 +++++
>  3 files changed, 63 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
> index 76c0dade233f..1f39d41fde4b 100644
> --- a/drivers/net/bonding/bond_alb.c
> +++ b/drivers/net/bonding/bond_alb.c
> @@ -195,6 +195,9 @@ static int tlb_initialize(struct bonding *bond)
>  
>  	_unlock_tx_hashtbl_bh(bond);
>  
> +	/* Initialize the TLB array spin-lock */
> +	spin_lock_init(&bond_info->slave_arr_lock);
> +
>  	return 0;
>  }
>  
> @@ -209,6 +212,9 @@ static void tlb_deinitialize(struct bonding *bond)
>  	bond_info->tx_hashtbl = NULL;
>  
>  	_unlock_tx_hashtbl_bh(bond);
> +
> +	if (bond_is_nondyn_tlb(bond) && bond_info->slave_arr)
> +		kfree_rcu(bond_info->slave_arr, rcu);
>  }
>  
>  static long long compute_gap(struct slave *slave)
> @@ -1406,9 +1412,37 @@ out:
>  	return NETDEV_TX_OK;
>  }
>  
> +static int bond_tlb_update_slave_arr(struct bonding *bond)
> +{
> +	struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
> +	struct slave *tx_slave;
> +	struct list_head *iter;
> +	struct tlb_up_slave *new_arr, *old_arr;
> +
> +	new_arr = kzalloc(offsetof(struct tlb_up_slave, arr[bond->slave_cnt]),
> +			  GFP_KERNEL);
> +	if (!new_arr)
> +		return -ENOMEM;
> +
> +	bond_for_each_slave(bond, tx_slave, iter) {
> +		if (bond_slave_can_tx(tx_slave))
> +			new_arr->arr[new_arr->count++] = tx_slave;
> +	}
> +
> +	spin_lock(&bond_info->slave_arr_lock);
> +	old_arr = bond_info->slave_arr;
> +	rcu_assign_pointer(bond_info->slave_arr, new_arr);
> +	spin_unlock(&bond_info->slave_arr_lock);
> +	if (old_arr)
> +		kfree_rcu(old_arr, rcu);
> +
> +	return 0;
> +}
> +
>  int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
>  {
>  	struct bonding *bond = netdev_priv(bond_dev);
> +	struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
>  	struct ethhdr *eth_data;
>  	struct slave *tx_slave = NULL;
>  	u32 hash_index;
> @@ -1429,12 +1463,13 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
>  							      hash_index & 0xFF,
>  							      skb->len);
>  			} else {
> -				struct list_head *iter;
> -				int idx = hash_index % bond->slave_cnt;
> -
> -				bond_for_each_slave_rcu(bond, tx_slave, iter)
> -					if (--idx < 0)
> -						break;
> +				struct tlb_up_slave *slaves;
Please leave a blank line between local variable definition and the code.

> +				rcu_read_lock();
All bonding xmit functions already run in rcu, so this is not really necessary.


> +				slaves = rcu_dereference(bond_info->slave_arr);
> +				if (slaves && slaves->count)
> +					tx_slave = slaves->arr[hash_index %
> +							       slaves->count];
> +				rcu_read_unlock();
>  			}
>  			break;
>  		}
> @@ -1721,6 +1756,11 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
>  			 */
>  		}
>  	}
> +
> +	if (bond_is_nondyn_tlb(bond)) {
> +		if (bond_tlb_update_slave_arr(bond))
> +			pr_err("Failed to build slave-array for TLB mode.\n");
> +	}
>  }
>  
>  /**
> diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
> index 5fc76c01636c..731a8e830639 100644
> --- a/drivers/net/bonding/bond_alb.h
> +++ b/drivers/net/bonding/bond_alb.h
> @@ -139,12 +139,23 @@ struct tlb_slave_info {
>  			 */
>  };
>  
> +struct tlb_up_slave {
> +	unsigned int	count;
> +	struct rcu_head rcu;
> +	struct slave	*arr[0];
> +};
> +
>  struct alb_bond_info {
>  	struct tlb_client_info	*tx_hashtbl; /* Dynamically allocated */
>  	spinlock_t		tx_hashtbl_lock;
>  	u32			unbalanced_load;
>  	int			tx_rebalance_counter;
>  	int			lp_counter;
> +	/* -------- non-dynamic tlb mode only ---------*/
> +	struct tlb_up_slave __rcu *slave_arr;	  /* Up slaves */
> +	spinlock_t		  slave_arr_lock; /* Lock to manage concurrent
> +						   * writers
> +						   */
>  	/* -------- rlb parameters -------- */
>  	int rlb_enabled;
>  	struct rlb_client_info	*rx_hashtbl;	/* Receive hash table */
> diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
> index 0b4d9cde0b05..ad9b3dce07d8 100644
> --- a/drivers/net/bonding/bonding.h
> +++ b/drivers/net/bonding/bonding.h
> @@ -265,6 +265,12 @@ static inline bool bond_is_lb(const struct bonding *bond)
>  	       BOND_MODE(bond) == BOND_MODE_ALB;
>  }
>  
> +static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
> +{
> +	return (BOND_MODE(bond) == BOND_MODE_TLB)  &&
> +	       (bond->params.tlb_dynamic_lb == 0);
> +}
> +
>  static inline bool bond_mode_uses_arp(int mode)
>  {
>  	return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB &&
> 

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] bonding: Do not try to send packets over dead link in TLB mode.
  2014-07-09  1:09 [PATCH] bonding: Do not try to send packets over dead link in TLB mode Mahesh Bandewar
  2014-07-09  7:10 ` Eric Dumazet
  2014-07-09 10:14 ` Nikolay Aleksandrov
@ 2014-07-09 10:24 ` Veaceslav Falico
  2014-07-09 10:25   ` Nikolay Aleksandrov
  2014-07-10 13:39 ` Jay Vosburgh
  3 siblings, 1 reply; 17+ messages in thread
From: Veaceslav Falico @ 2014-07-09 10:24 UTC (permalink / raw)
  To: Mahesh Bandewar
  Cc: Jay Vosburgh, Andy Gospodarek, David Miller, netdev,
	Eric Dumazet, Maciej Zenczykowski

On Tue, Jul 08, 2014 at 06:09:58PM -0700, Mahesh Bandewar wrote:
>In TLB mode if tlb_dynamic_lb is NOT set, slaves from the bond
>group are selected based on the hash distribution. This does not
>exclude dead links which are part of the bond. Also if there is a
>temporary link event which brings down the interface, packets
>hashed on that interface would be dropped too.
>
>This patch fixes these issues and distributes flows across the
>UP links only. Also the array construction of links which are
>capable of sending packets happen in the control path leaving
>only link-selection duing the data-path.
>
>One possible side effect of this is - at a link event; all
>flows will be shuffled to get good distribution. But impact of
>this should be minimum with the assumption that a member or
>members of the bond group are not available is a very temporary
>situation.

Good one, it indeed will speed up things/fix it.

Some comments:

I didn't see how you handle the case when a slave is removed (i.e.
released) from bonding.

>
>Signed-off-by: Mahesh Bandewar <maheshb@google.com>
...snip...
>+static int bond_tlb_update_slave_arr(struct bonding *bond)
>+{
>+	struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
>+	struct slave *tx_slave;
>+	struct list_head *iter;
>+	struct tlb_up_slave *new_arr, *old_arr;
>+
>+	new_arr = kzalloc(offsetof(struct tlb_up_slave, arr[bond->slave_cnt]),
>+			  GFP_KERNEL);
>+	if (!new_arr)
>+		return -ENOMEM;
>+
>+	bond_for_each_slave(bond, tx_slave, iter) {
>+		if (bond_slave_can_tx(tx_slave))
>+			new_arr->arr[new_arr->count++] = tx_slave;
>+	}
>+
>+	spin_lock(&bond_info->slave_arr_lock);

I don't think you can re-enter bond_alb_handle_link_change(), as it's
protected either by rtnl or write-lock curr_active_slave.

>+	old_arr = bond_info->slave_arr;
>+	rcu_assign_pointer(bond_info->slave_arr, new_arr);
>+	spin_unlock(&bond_info->slave_arr_lock);
>+	if (old_arr)
>+		kfree_rcu(old_arr, rcu);
>+
>+	return 0;
>+}
...snip...

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] bonding: Do not try to send packets over dead link in TLB mode.
  2014-07-09 10:24 ` Veaceslav Falico
@ 2014-07-09 10:25   ` Nikolay Aleksandrov
  2014-07-09 12:04     ` Veaceslav Falico
  2014-07-09 16:52     ` Mahesh Bandewar
  0 siblings, 2 replies; 17+ messages in thread
From: Nikolay Aleksandrov @ 2014-07-09 10:25 UTC (permalink / raw)
  To: Veaceslav Falico, Mahesh Bandewar
  Cc: Jay Vosburgh, Andy Gospodarek, David Miller, netdev,
	Eric Dumazet, Maciej Zenczykowski

On 07/09/2014 12:24 PM, Veaceslav Falico wrote:
> On Tue, Jul 08, 2014 at 06:09:58PM -0700, Mahesh Bandewar wrote:
>> In TLB mode if tlb_dynamic_lb is NOT set, slaves from the bond
>> group are selected based on the hash distribution. This does not
>> exclude dead links which are part of the bond. Also if there is a
>> temporary link event which brings down the interface, packets
>> hashed on that interface would be dropped too.
>>
>> This patch fixes these issues and distributes flows across the
>> UP links only. Also the array construction of links which are
>> capable of sending packets happen in the control path leaving
>> only link-selection duing the data-path.
>>
>> One possible side effect of this is - at a link event; all
>> flows will be shuffled to get good distribution. But impact of
>> this should be minimum with the assumption that a member or
>> members of the bond group are not available is a very temporary
>> situation.
> 
> Good one, it indeed will speed up things/fix it.
> 
> Some comments:
> 
> I didn't see how you handle the case when a slave is removed (i.e.
> released) from bonding.
> 
>>
>> Signed-off-by: Mahesh Bandewar <maheshb@google.com>
> ...snip...
>> +static int bond_tlb_update_slave_arr(struct bonding *bond)
>> +{
>> +    struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
>> +    struct slave *tx_slave;
>> +    struct list_head *iter;
>> +    struct tlb_up_slave *new_arr, *old_arr;
>> +
>> +    new_arr = kzalloc(offsetof(struct tlb_up_slave, arr[bond->slave_cnt]),
>> +              GFP_KERNEL);
>> +    if (!new_arr)
>> +        return -ENOMEM;
>> +
>> +    bond_for_each_slave(bond, tx_slave, iter) {
>> +        if (bond_slave_can_tx(tx_slave))
>> +            new_arr->arr[new_arr->count++] = tx_slave;
>> +    }
>> +
>> +    spin_lock(&bond_info->slave_arr_lock);
> 
> I don't think you can re-enter bond_alb_handle_link_change(), as it's
> protected either by rtnl or write-lock curr_active_slave.
> 
Actually a very good catch :-)
Maybe the allocation above should be done with GFP_ATOMIC.

>> +    old_arr = bond_info->slave_arr;
>> +    rcu_assign_pointer(bond_info->slave_arr, new_arr);
>> +    spin_unlock(&bond_info->slave_arr_lock);
>> +    if (old_arr)
>> +        kfree_rcu(old_arr, rcu);
>> +
>> +    return 0;
>> +}
> ...snip...
> -- 
> To unsubscribe from this list: send the line "unsubscribe netdev" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] bonding: Do not try to send packets over dead link in TLB mode.
  2014-07-09 10:25   ` Nikolay Aleksandrov
@ 2014-07-09 12:04     ` Veaceslav Falico
  2014-07-09 13:17       ` Eric Dumazet
  2014-07-09 16:52     ` Mahesh Bandewar
  1 sibling, 1 reply; 17+ messages in thread
From: Veaceslav Falico @ 2014-07-09 12:04 UTC (permalink / raw)
  To: Nikolay Aleksandrov
  Cc: Mahesh Bandewar, Jay Vosburgh, Andy Gospodarek, David Miller,
	netdev, Eric Dumazet, Maciej Zenczykowski

On Wed, Jul 09, 2014 at 12:25:43PM +0200, Nikolay Aleksandrov wrote:
>On 07/09/2014 12:24 PM, Veaceslav Falico wrote:
>> On Tue, Jul 08, 2014 at 06:09:58PM -0700, Mahesh Bandewar wrote:
...snip...
>>> +    spin_lock(&bond_info->slave_arr_lock);
>>
>> I don't think you can re-enter bond_alb_handle_link_change(), as it's
>> protected either by rtnl or write-lock curr_active_slave.
>>
>Actually a very good catch :-)
>Maybe the allocation above should be done with GFP_ATOMIC.

For the record - it's indeed always under rtnl, so ASSERT_RTNL() (from your
other email) is a good idea.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] bonding: Do not try to send packets over dead link in TLB mode.
  2014-07-09 12:04     ` Veaceslav Falico
@ 2014-07-09 13:17       ` Eric Dumazet
  2014-07-09 13:27         ` Veaceslav Falico
  0 siblings, 1 reply; 17+ messages in thread
From: Eric Dumazet @ 2014-07-09 13:17 UTC (permalink / raw)
  To: Veaceslav Falico
  Cc: Nikolay Aleksandrov, Mahesh Bandewar, Jay Vosburgh,
	Andy Gospodarek, David Miller, netdev, Eric Dumazet,
	Maciej Zenczykowski

On Wed, 2014-07-09 at 14:04 +0200, Veaceslav Falico wrote:
> On Wed, Jul 09, 2014 at 12:25:43PM +0200, Nikolay Aleksandrov wrote:
> >On 07/09/2014 12:24 PM, Veaceslav Falico wrote:
> >> On Tue, Jul 08, 2014 at 06:09:58PM -0700, Mahesh Bandewar wrote:
> ...snip...
> >>> +    spin_lock(&bond_info->slave_arr_lock);
> >>
> >> I don't think you can re-enter bond_alb_handle_link_change(), as it's
> >> protected either by rtnl or write-lock curr_active_slave.
> >>
> >Actually a very good catch :-)
> >Maybe the allocation above should be done with GFP_ATOMIC.
> 
> For the record - it's indeed always under rtnl, so ASSERT_RTNL() (from your
> other email) is a good idea.

Strange. I basically suggested the ASSERT_RTNL() to Mahesh few days ago
and he tried this. But the assert triggered with miimon, so Mahesh added
back the spinlock.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] bonding: Do not try to send packets over dead link in TLB mode.
  2014-07-09 13:17       ` Eric Dumazet
@ 2014-07-09 13:27         ` Veaceslav Falico
  2014-07-09 17:15           ` Mahesh Bandewar
  0 siblings, 1 reply; 17+ messages in thread
From: Veaceslav Falico @ 2014-07-09 13:27 UTC (permalink / raw)
  To: Eric Dumazet
  Cc: Nikolay Aleksandrov, Mahesh Bandewar, Jay Vosburgh,
	Andy Gospodarek, David Miller, netdev, Eric Dumazet,
	Maciej Zenczykowski

On Wed, Jul 09, 2014 at 03:17:29PM +0200, Eric Dumazet wrote:
>On Wed, 2014-07-09 at 14:04 +0200, Veaceslav Falico wrote:
>> On Wed, Jul 09, 2014 at 12:25:43PM +0200, Nikolay Aleksandrov wrote:
>> >On 07/09/2014 12:24 PM, Veaceslav Falico wrote:
>> >> On Tue, Jul 08, 2014 at 06:09:58PM -0700, Mahesh Bandewar wrote:
>> ...snip...
>> >>> +    spin_lock(&bond_info->slave_arr_lock);
>> >>
>> >> I don't think you can re-enter bond_alb_handle_link_change(), as it's
>> >> protected either by rtnl or write-lock curr_active_slave.
>> >>
>> >Actually a very good catch :-)
>> >Maybe the allocation above should be done with GFP_ATOMIC.
>>
>> For the record - it's indeed always under rtnl, so ASSERT_RTNL() (from your
>> other email) is a good idea.
>
>Strange. I basically suggested the ASSERT_RTNL() to Mahesh few days ago
>and he tried this. But the assert triggered with miimon, so Mahesh added
>back the spinlock.

That's indeed strange... From the code:

2103                 if (!rtnl_trylock()) {
2104                         delay = 1;
2105                         should_notify_peers = false;
2106                         goto re_arm;
2107                 }
2108
2109                 bond_miimon_commit(bond);
2110
2111                 rtnl_unlock();  /* might sleep, hold no other locks */

And we can get there only through bond_miimon_commit(), as part of the
miimon.

Maybe you've hit the kmalloc(GFP_KERNEL) warning?

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] bonding: Do not try to send packets over dead link in TLB mode.
  2014-07-09 10:25   ` Nikolay Aleksandrov
  2014-07-09 12:04     ` Veaceslav Falico
@ 2014-07-09 16:52     ` Mahesh Bandewar
  1 sibling, 0 replies; 17+ messages in thread
From: Mahesh Bandewar @ 2014-07-09 16:52 UTC (permalink / raw)
  To: Nikolay Aleksandrov
  Cc: Veaceslav Falico, Jay Vosburgh, Andy Gospodarek, David Miller,
	netdev, Eric Dumazet, Maciej Zenczykowski

On Wed, Jul 9, 2014 at 3:25 AM, Nikolay Aleksandrov <nikolay@redhat.com> wrote:
[...]
>>> +
>>> +    spin_lock(&bond_info->slave_arr_lock);
>>
>> I don't think you can re-enter bond_alb_handle_link_change(), as it's
>> protected either by rtnl or write-lock curr_active_slave.
>>
> Actually a very good catch :-)
> Maybe the allocation above should be done with GFP_ATOMIC.
>
Actually I did try relying on RTNL but looks like in miimon path the
RTNL is not held and RTNL_ASSERT failed. But I do see that
curr_active_slave lock is held but not sure if that is always the
case. So it does make sense to change the alloc to use GFP_ATOMIC in
this case.
[...]

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] bonding: Do not try to send packets over dead link in TLB mode.
       [not found]   ` <CAF2d9jh1jDL7NMZapGn_Ohy8Y2JzHrWaKA5gFgR0tU=_KydtPg@mail.gmail.com>
@ 2014-07-09 17:09     ` Eric Dumazet
  2014-07-09 17:21       ` Mahesh Bandewar
  0 siblings, 1 reply; 17+ messages in thread
From: Eric Dumazet @ 2014-07-09 17:09 UTC (permalink / raw)
  To: Mahesh Bandewar
  Cc: Jay Vosburgh, Veaceslav Falico, Andy Gospodarek, David Miller,
	netdev, Eric Dumazet, Maciej Zenczykowski

On Wed, 2014-07-09 at 09:34 -0700, Mahesh Bandewar wrote:
> 

Please make sure to not send HTML message on netdev Mahesh, your message
was not delivered to the list.
> 
> On Wed, Jul 9, 2014 at 12:10 AM, Eric Dumazet <eric.dumazet@gmail.com>
> wrote:
>         On Tue, 2014-07-08 at 18:09 -0700, Mahesh Bandewar wrote:
>         > In TLB mode if tlb_dynamic_lb is NOT set, slaves from the
>         bond
>         > group are selected based on the hash distribution. This does
>         not
>         > exclude dead links which are part of the bond. Also if there
>         is a
>         > temporary link event which brings down the interface,
>         packets
>         > hashed on that interface would be dropped too.
>         >
>         > This patch fixes these issues and distributes flows across
>         the
>         > UP links only. Also the array construction of links which
>         are
>         > capable of sending packets happen in the control path
>         leaving
>         > only link-selection duing the data-path.
>         
>         
>         s/duing/during/
>         
>         Seems a speed improvement as well for bonding of 8 slaves ;)
>          
>         
>         >
>         > One possible side effect of this is - at a link event; all
>         > flows will be shuffled to get good distribution. But impact
>         of
>         > this should be minimum with the assumption that a member or
>         > members of the bond group are not available is a very
>         temporary
>         > situation.
>         >
>         > Signed-off-by: Mahesh Bandewar <maheshb@google.com>
>         > ---
>         >  drivers/net/bonding/bond_alb.c | 52
>         +++++++++++++++++++++++++++++++++++++-----
>         >  drivers/net/bonding/bond_alb.h | 11 +++++++++
>         >  drivers/net/bonding/bonding.h  |  6 +++++
>         >  3 files changed, 63 insertions(+), 6 deletions(-)
>         >
>         > diff --git a/drivers/net/bonding/bond_alb.c
>         b/drivers/net/bonding/bond_alb.c
>         > index 76c0dade233f..1f39d41fde4b 100644
>         > --- a/drivers/net/bonding/bond_alb.c
>         > +++ b/drivers/net/bonding/bond_alb.c
>         > @@ -195,6 +195,9 @@ static int tlb_initialize(struct bonding
>         *bond)
>         >
>         >       _unlock_tx_hashtbl_bh(bond);
>         >
>         > +     /* Initialize the TLB array spin-lock */
>         > +     spin_lock_init(&bond_info->slave_arr_lock);
>         > +
>         >       return 0;
>         >  }
>         >
>         > @@ -209,6 +212,9 @@ static void tlb_deinitialize(struct
>         bonding *bond)
>         >       bond_info->tx_hashtbl = NULL;
>         >
>         >       _unlock_tx_hashtbl_bh(bond);
>         > +
>         > +     if (bond_is_nondyn_tlb(bond) && bond_info->slave_arr)
>         > +             kfree_rcu(bond_info->slave_arr, rcu);
>         
>         
>         You could remove the first condition, as slave_arr being NULL
>         or not is
>         enough to take the decision to call kfree_rcu()
>         
>         I do not know if a the "bond_is_nondyn_tlb(bond)" can change
>         over the
>         time for a given bonding device, so feel uncomfortable with a
>         possible
>         memleak here.
>         
> It can not change while the bond device is up. It checks for the
> bonding mode and the parameter tlb_dynamic_lb and none of them can be
> changed without bringing down the bond. 
> 
> 
> I wanted to add it as a safe-guard against trying to free this array
> in other mode if there are some random bytes present (which is
> unlikely but having an extra check to full-proof is not going to hurt
> was the thinking behind it). 
> 
Well, field is guaranteed to be NULL at bonding init time, otherwise you
would crash at first kfree_rcu()

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] bonding: Do not try to send packets over dead link in TLB mode.
  2014-07-09 13:27         ` Veaceslav Falico
@ 2014-07-09 17:15           ` Mahesh Bandewar
  2014-07-09 17:24             ` Mahesh Bandewar
  0 siblings, 1 reply; 17+ messages in thread
From: Mahesh Bandewar @ 2014-07-09 17:15 UTC (permalink / raw)
  To: Veaceslav Falico
  Cc: Eric Dumazet, Nikolay Aleksandrov, Jay Vosburgh, Andy Gospodarek,
	David Miller, netdev, Eric Dumazet, Maciej Zenczykowski

On Wed, Jul 9, 2014 at 6:27 AM, Veaceslav Falico <vfalico@redhat.com> wrote:
> On Wed, Jul 09, 2014 at 03:17:29PM +0200, Eric Dumazet wrote:
>>
>> On Wed, 2014-07-09 at 14:04 +0200, Veaceslav Falico wrote:
>>>
>>> On Wed, Jul 09, 2014 at 12:25:43PM +0200, Nikolay Aleksandrov wrote:
>>> >On 07/09/2014 12:24 PM, Veaceslav Falico wrote:
>>> >> On Tue, Jul 08, 2014 at 06:09:58PM -0700, Mahesh Bandewar wrote:
>>> ...snip...
>>> >>> +    spin_lock(&bond_info->slave_arr_lock);
>>> >>
>>> >> I don't think you can re-enter bond_alb_handle_link_change(), as it's
>>> >> protected either by rtnl or write-lock curr_active_slave.
>>> >>
>>> >Actually a very good catch :-)
>>> >Maybe the allocation above should be done with GFP_ATOMIC.
>>>
>>> For the record - it's indeed always under rtnl, so ASSERT_RTNL() (from
>>> your
>>> other email) is a good idea.
>>
>>
>> Strange. I basically suggested the ASSERT_RTNL() to Mahesh few days ago
>> and he tried this. But the assert triggered with miimon, so Mahesh added
>> back the spinlock.
>
>
> That's indeed strange... From the code:
>
> 2103                 if (!rtnl_trylock()) {
> 2104                         delay = 1;
> 2105                         should_notify_peers = false;
> 2106                         goto re_arm;
> 2107                 }
> 2108
> 2109                 bond_miimon_commit(bond);
> 2110
> 2111                 rtnl_unlock();  /* might sleep, hold no other locks */
>
> And we can get there only through bond_miimon_commit(), as part of the
> miimon.
>
> Maybe you've hit the kmalloc(GFP_KERNEL) warning?

Hmm, actually as Eric mentioned, I did try removing the spinlock to
just use RTNL but assert failed and it wasn't sleepable fn called in
atomic context message (I assume that is what you are suggesting from
kmalloc warning).

I managed to find the stack trace for that -

RTNL: assertion failed at drivers/net/bonding/bond_alb.c (1371)
CPU: 2 PID: 3571 Comm: kworker/u16:7 Not tainted 3.11.10-smp-DEV #91
Workqueue: bond0 bond_mii_monitor [bonding]
 ffff8801175e4800 ffff880113b53d38 ffffffff97f97cab 000000000000004d
 ffff8801175c6800 ffff880113b53d58 ffffffffc046a17c ffff8801175c6800
 ffff8801175e4800 ffff880113b53d88 ffffffffc045ef27 0000000000000000
Call Trace:
 [<ffffffff97f97cab>] dump_stack+0x46/0x58
 [<ffffffffc046a17c>] bond_alb_handle_link_change+0x16c/0x180 [bonding]
 [<ffffffffc045ef27>] bond_handle_link_change+0x57/0x80 [bonding]
 [<ffffffffc0462d79>] bond_mii_monitor+0x679/0x6e0 [bonding]
 [<ffffffff97a6a7c0>] process_one_work+0x140/0x3f0
 [<ffffffff97a6aef1>] worker_thread+0x121/0x370
 [<ffffffff97a6add0>] ? rescuer_thread+0x320/0x320
 [<ffffffff97a720a0>] kthread+0xc0/0xd0
 [<ffffffff97a71fe0>] ? flush_kthread_worker+0x80/0x80
 [<ffffffff97fa291c>] ret_from_fork+0x7c/0xb0
 [<ffffffff97a71fe0>] ? flush_kthread_worker+0x80/0x80

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] bonding: Do not try to send packets over dead link in TLB mode.
  2014-07-09 17:09     ` Eric Dumazet
@ 2014-07-09 17:21       ` Mahesh Bandewar
  0 siblings, 0 replies; 17+ messages in thread
From: Mahesh Bandewar @ 2014-07-09 17:21 UTC (permalink / raw)
  To: Eric Dumazet
  Cc: Jay Vosburgh, Veaceslav Falico, Andy Gospodarek, David Miller,
	netdev, Eric Dumazet, Maciej Zenczykowski

On Wed, Jul 9, 2014 at 10:09 AM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
> On Wed, 2014-07-09 at 09:34 -0700, Mahesh Bandewar wrote:
>>
>
> Please make sure to not send HTML message on netdev Mahesh, your message
> was not delivered to the list.
>>
Gmail seems to be having issues remembering my settings about the text
vs. html preference and many times in the flow, I forget to check if
the mode selected is correct jsut before sending the mail. :(

>> On Wed, Jul 9, 2014 at 12:10 AM, Eric Dumazet <eric.dumazet@gmail.com>
>> wrote:
>>         On Tue, 2014-07-08 at 18:09 -0700, Mahesh Bandewar wrote:
>>         > In TLB mode if tlb_dynamic_lb is NOT set, slaves from the
>>         bond
>>         > group are selected based on the hash distribution. This does
>>         not
>>         > exclude dead links which are part of the bond. Also if there
>>         is a
>>         > temporary link event which brings down the interface,
>>         packets
>>         > hashed on that interface would be dropped too.
>>         >
>>         > This patch fixes these issues and distributes flows across
>>         the
>>         > UP links only. Also the array construction of links which
>>         are
>>         > capable of sending packets happen in the control path
>>         leaving
>>         > only link-selection duing the data-path.
>>
>>
>>         s/duing/during/
>>
>>         Seems a speed improvement as well for bonding of 8 slaves ;)
>>
>>
>>         >
>>         > One possible side effect of this is - at a link event; all
>>         > flows will be shuffled to get good distribution. But impact
>>         of
>>         > this should be minimum with the assumption that a member or
>>         > members of the bond group are not available is a very
>>         temporary
>>         > situation.
>>         >
>>         > Signed-off-by: Mahesh Bandewar <maheshb@google.com>
>>         > ---
>>         >  drivers/net/bonding/bond_alb.c | 52
>>         +++++++++++++++++++++++++++++++++++++-----
>>         >  drivers/net/bonding/bond_alb.h | 11 +++++++++
>>         >  drivers/net/bonding/bonding.h  |  6 +++++
>>         >  3 files changed, 63 insertions(+), 6 deletions(-)
>>         >
>>         > diff --git a/drivers/net/bonding/bond_alb.c
>>         b/drivers/net/bonding/bond_alb.c
>>         > index 76c0dade233f..1f39d41fde4b 100644
>>         > --- a/drivers/net/bonding/bond_alb.c
>>         > +++ b/drivers/net/bonding/bond_alb.c
>>         > @@ -195,6 +195,9 @@ static int tlb_initialize(struct bonding
>>         *bond)
>>         >
>>         >       _unlock_tx_hashtbl_bh(bond);
>>         >
>>         > +     /* Initialize the TLB array spin-lock */
>>         > +     spin_lock_init(&bond_info->slave_arr_lock);
>>         > +
>>         >       return 0;
>>         >  }
>>         >
>>         > @@ -209,6 +212,9 @@ static void tlb_deinitialize(struct
>>         bonding *bond)
>>         >       bond_info->tx_hashtbl = NULL;
>>         >
>>         >       _unlock_tx_hashtbl_bh(bond);
>>         > +
>>         > +     if (bond_is_nondyn_tlb(bond) && bond_info->slave_arr)
>>         > +             kfree_rcu(bond_info->slave_arr, rcu);
>>
>>
>>         You could remove the first condition, as slave_arr being NULL
>>         or not is
>>         enough to take the decision to call kfree_rcu()
>>
>>         I do not know if a the "bond_is_nondyn_tlb(bond)" can change
>>         over the
>>         time for a given bonding device, so feel uncomfortable with a
>>         possible
>>         memleak here.
>>
>> It can not change while the bond device is up. It checks for the
>> bonding mode and the parameter tlb_dynamic_lb and none of them can be
>> changed without bringing down the bond.
>>
>>
>> I wanted to add it as a safe-guard against trying to free this array
>> in other mode if there are some random bytes present (which is
>> unlikely but having an extra check to full-proof is not going to hurt
>> was the thinking behind it).
>>
> Well, field is guaranteed to be NULL at bonding init time, otherwise you
> would crash at first kfree_rcu()
>
I can remove the check.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] bonding: Do not try to send packets over dead link in TLB mode.
  2014-07-09 17:15           ` Mahesh Bandewar
@ 2014-07-09 17:24             ` Mahesh Bandewar
  2014-07-09 21:07               ` Nikolay Aleksandrov
  0 siblings, 1 reply; 17+ messages in thread
From: Mahesh Bandewar @ 2014-07-09 17:24 UTC (permalink / raw)
  To: Veaceslav Falico
  Cc: Eric Dumazet, Nikolay Aleksandrov, Jay Vosburgh, Andy Gospodarek,
	David Miller, netdev, Eric Dumazet, Maciej Zenczykowski

On Wed, Jul 9, 2014 at 10:15 AM, Mahesh Bandewar <maheshb@google.com> wrote:
> On Wed, Jul 9, 2014 at 6:27 AM, Veaceslav Falico <vfalico@redhat.com> wrote:
>> On Wed, Jul 09, 2014 at 03:17:29PM +0200, Eric Dumazet wrote:
>>>
>>> On Wed, 2014-07-09 at 14:04 +0200, Veaceslav Falico wrote:
>>>>
>>>> On Wed, Jul 09, 2014 at 12:25:43PM +0200, Nikolay Aleksandrov wrote:
>>>> >On 07/09/2014 12:24 PM, Veaceslav Falico wrote:
>>>> >> On Tue, Jul 08, 2014 at 06:09:58PM -0700, Mahesh Bandewar wrote:
>>>> ...snip...
>>>> >>> +    spin_lock(&bond_info->slave_arr_lock);
>>>> >>
>>>> >> I don't think you can re-enter bond_alb_handle_link_change(), as it's
>>>> >> protected either by rtnl or write-lock curr_active_slave.
>>>> >>
>>>> >Actually a very good catch :-)
>>>> >Maybe the allocation above should be done with GFP_ATOMIC.
>>>>
>>>> For the record - it's indeed always under rtnl, so ASSERT_RTNL() (from
>>>> your
>>>> other email) is a good idea.
>>>
>>>
>>> Strange. I basically suggested the ASSERT_RTNL() to Mahesh few days ago
>>> and he tried this. But the assert triggered with miimon, so Mahesh added
>>> back the spinlock.
>>
>>
>> That's indeed strange... From the code:
>>
>> 2103                 if (!rtnl_trylock()) {
>> 2104                         delay = 1;
>> 2105                         should_notify_peers = false;
>> 2106                         goto re_arm;
>> 2107                 }
>> 2108
>> 2109                 bond_miimon_commit(bond);
>> 2110
>> 2111                 rtnl_unlock();  /* might sleep, hold no other locks */
>>
>> And we can get there only through bond_miimon_commit(), as part of the
>> miimon.
>>
>> Maybe you've hit the kmalloc(GFP_KERNEL) warning?
>
> Hmm, actually as Eric mentioned, I did try removing the spinlock to
> just use RTNL but assert failed and it wasn't sleepable fn called in
> atomic context message (I assume that is what you are suggesting from
> kmalloc warning).
>
> I managed to find the stack trace for that -
>
> RTNL: assertion failed at drivers/net/bonding/bond_alb.c (1371)
> CPU: 2 PID: 3571 Comm: kworker/u16:7 Not tainted 3.11.10-smp-DEV #91
> Workqueue: bond0 bond_mii_monitor [bonding]
>  ffff8801175e4800 ffff880113b53d38 ffffffff97f97cab 000000000000004d
>  ffff8801175c6800 ffff880113b53d58 ffffffffc046a17c ffff8801175c6800
>  ffff8801175e4800 ffff880113b53d88 ffffffffc045ef27 0000000000000000
> Call Trace:
>  [<ffffffff97f97cab>] dump_stack+0x46/0x58
>  [<ffffffffc046a17c>] bond_alb_handle_link_change+0x16c/0x180 [bonding]
>  [<ffffffffc045ef27>] bond_handle_link_change+0x57/0x80 [bonding]
>  [<ffffffffc0462d79>] bond_mii_monitor+0x679/0x6e0 [bonding]
>  [<ffffffff97a6a7c0>] process_one_work+0x140/0x3f0
>  [<ffffffff97a6aef1>] worker_thread+0x121/0x370
>  [<ffffffff97a6add0>] ? rescuer_thread+0x320/0x320
>  [<ffffffff97a720a0>] kthread+0xc0/0xd0
>  [<ffffffff97a71fe0>] ? flush_kthread_worker+0x80/0x80
>  [<ffffffff97fa291c>] ret_from_fork+0x7c/0xb0
>  [<ffffffff97a71fe0>] ? flush_kthread_worker+0x80/0x80

Please note that I got this assert-fail on 3.10 kernel and haven't
verified if things have changed in mii_monitor context since then.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] bonding: Do not try to send packets over dead link in TLB mode.
  2014-07-09 17:24             ` Mahesh Bandewar
@ 2014-07-09 21:07               ` Nikolay Aleksandrov
  2014-07-10  8:25                 ` Maciej Żenczykowski
  0 siblings, 1 reply; 17+ messages in thread
From: Nikolay Aleksandrov @ 2014-07-09 21:07 UTC (permalink / raw)
  To: Mahesh Bandewar, Veaceslav Falico
  Cc: Eric Dumazet, Andy Gospodarek, David Miller, netdev,
	Eric Dumazet, Maciej Zenczykowski, Jay Vosburgh

On 07/09/2014 07:24 PM, Mahesh Bandewar wrote:
> On Wed, Jul 9, 2014 at 10:15 AM, Mahesh Bandewar <maheshb@google.com> wrote:
>> On Wed, Jul 9, 2014 at 6:27 AM, Veaceslav Falico <vfalico@redhat.com> wrote:
>>> On Wed, Jul 09, 2014 at 03:17:29PM +0200, Eric Dumazet wrote:
>>>>
>>>> On Wed, 2014-07-09 at 14:04 +0200, Veaceslav Falico wrote:
>>>>>
>>>>> On Wed, Jul 09, 2014 at 12:25:43PM +0200, Nikolay Aleksandrov wrote:
>>>>>> On 07/09/2014 12:24 PM, Veaceslav Falico wrote:
>>>>>>> On Tue, Jul 08, 2014 at 06:09:58PM -0700, Mahesh Bandewar wrote:
>>>>> ...snip...
>>>>>>>> +    spin_lock(&bond_info->slave_arr_lock);
>>>>>>>
>>>>>>> I don't think you can re-enter bond_alb_handle_link_change(), as it's
>>>>>>> protected either by rtnl or write-lock curr_active_slave.
>>>>>>>
>>>>>> Actually a very good catch :-)
>>>>>> Maybe the allocation above should be done with GFP_ATOMIC.
>>>>>
>>>>> For the record - it's indeed always under rtnl, so ASSERT_RTNL() (from
>>>>> your
>>>>> other email) is a good idea.
>>>>
>>>>
>>>> Strange. I basically suggested the ASSERT_RTNL() to Mahesh few days ago
>>>> and he tried this. But the assert triggered with miimon, so Mahesh added
>>>> back the spinlock.
>>>
>>>
>>> That's indeed strange... From the code:
>>>
>>> 2103                 if (!rtnl_trylock()) {
>>> 2104                         delay = 1;
>>> 2105                         should_notify_peers = false;
>>> 2106                         goto re_arm;
>>> 2107                 }
>>> 2108
>>> 2109                 bond_miimon_commit(bond);
>>> 2110
>>> 2111                 rtnl_unlock();  /* might sleep, hold no other locks */
>>>
>>> And we can get there only through bond_miimon_commit(), as part of the
>>> miimon.
>>>
>>> Maybe you've hit the kmalloc(GFP_KERNEL) warning?
>>
>> Hmm, actually as Eric mentioned, I did try removing the spinlock to
>> just use RTNL but assert failed and it wasn't sleepable fn called in
>> atomic context message (I assume that is what you are suggesting from
>> kmalloc warning).
>>
>> I managed to find the stack trace for that -
>>
>> RTNL: assertion failed at drivers/net/bonding/bond_alb.c (1371)
>> CPU: 2 PID: 3571 Comm: kworker/u16:7 Not tainted 3.11.10-smp-DEV #91
>> Workqueue: bond0 bond_mii_monitor [bonding]
>>  ffff8801175e4800 ffff880113b53d38 ffffffff97f97cab 000000000000004d
>>  ffff8801175c6800 ffff880113b53d58 ffffffffc046a17c ffff8801175c6800
>>  ffff8801175e4800 ffff880113b53d88 ffffffffc045ef27 0000000000000000
>> Call Trace:
>>  [<ffffffff97f97cab>] dump_stack+0x46/0x58
>>  [<ffffffffc046a17c>] bond_alb_handle_link_change+0x16c/0x180 [bonding]
>>  [<ffffffffc045ef27>] bond_handle_link_change+0x57/0x80 [bonding]
>>  [<ffffffffc0462d79>] bond_mii_monitor+0x679/0x6e0 [bonding]
>>  [<ffffffff97a6a7c0>] process_one_work+0x140/0x3f0
>>  [<ffffffff97a6aef1>] worker_thread+0x121/0x370
>>  [<ffffffff97a6add0>] ? rescuer_thread+0x320/0x320
>>  [<ffffffff97a720a0>] kthread+0xc0/0xd0
>>  [<ffffffff97a71fe0>] ? flush_kthread_worker+0x80/0x80
>>  [<ffffffff97fa291c>] ret_from_fork+0x7c/0xb0
>>  [<ffffffff97a71fe0>] ? flush_kthread_worker+0x80/0x80
> 
> Please note that I got this assert-fail on 3.10 kernel and haven't
> verified if things have changed in mii_monitor context since then.
> 
Okay, this is definitely it. Moreover I cannot find
"bond_handle_link_change" function in any bonding version, google search
also shows up empty.

Could you please test this patch with the tree that it is intended for ?
I'm pretty certain that there won't be any problem.

And one more thing, please update the CC list, Jay's email now is:
j.vosburgh@gmail.com
according to the MAINTAINERS file.

Thanks,
 Nik

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] bonding: Do not try to send packets over dead link in TLB mode.
  2014-07-09 21:07               ` Nikolay Aleksandrov
@ 2014-07-10  8:25                 ` Maciej Żenczykowski
  0 siblings, 0 replies; 17+ messages in thread
From: Maciej Żenczykowski @ 2014-07-10  8:25 UTC (permalink / raw)
  To: Nikolay Aleksandrov
  Cc: Mahesh Bandewar, Veaceslav Falico, Eric Dumazet, Andy Gospodarek,
	David Miller, netdev, Eric Dumazet, Jay Vosburgh

>> Please note that I got this assert-fail on 3.10 kernel and haven't
>> verified if things have changed in mii_monitor context since then.
>>
> Okay, this is definitely it. Moreover I cannot find
> "bond_handle_link_change" function in any bonding version, google search
> also shows up empty.
>
> Could you please test this patch with the tree that it is intended for ?
> I'm pretty certain that there won't be any problem.
>
> And one more thing, please update the CC list, Jay's email now is:
> j.vosburgh@gmail.com
> according to the MAINTAINERS file.

Mahesh you're running into one of the 2 or 3 internal bonding mode
changes that haven't been upstreamed.
Some sort of lacp churn machine and faster/better link up/down/change
notification propagation are among them.
We really should clean those up and get them submitted.

You should probably do this development on upstream 3.15 ;-)

- Maciej

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] bonding: Do not try to send packets over dead link in TLB mode.
  2014-07-09  1:09 [PATCH] bonding: Do not try to send packets over dead link in TLB mode Mahesh Bandewar
                   ` (2 preceding siblings ...)
  2014-07-09 10:24 ` Veaceslav Falico
@ 2014-07-10 13:39 ` Jay Vosburgh
  2014-07-10 14:40   ` Mahesh Bandewar
  3 siblings, 1 reply; 17+ messages in thread
From: Jay Vosburgh @ 2014-07-10 13:39 UTC (permalink / raw)
  To: Mahesh Bandewar
  Cc: Veaceslav Falico, Andy Gospodarek, David Miller, netdev,
	Eric Dumazet, Maciej Zenczykowski

Mahesh Bandewar <maheshb@google.com> wrote:

>In TLB mode if tlb_dynamic_lb is NOT set, slaves from the bond
>group are selected based on the hash distribution. This does not
>exclude dead links which are part of the bond. Also if there is a
>temporary link event which brings down the interface, packets
>hashed on that interface would be dropped too.
>
>This patch fixes these issues and distributes flows across the
>UP links only. Also the array construction of links which are
>capable of sending packets happen in the control path leaving
>only link-selection duing the data-path.
>
>One possible side effect of this is - at a link event; all
>flows will be shuffled to get good distribution. But impact of
>this should be minimum with the assumption that a member or
>members of the bond group are not available is a very temporary
>situation.

	Why limit this to just TLB mode?  Other similar situations,
e.g., the TX slave selection in 802.3ad or balance-xor via
bond_xmit_slave_id, would presumably see a performance gain from using
an array lookup instead of traversing a linked list (even though those
modes do already skip slaves that are down).

	The 802.3ad case is a bit more complex, as an array would have
to include only slaves that are members of the active aggregator, but
the bond_xmit_slave_id case shouldn't be much more difficult than what
you've done below.

	-J

>Signed-off-by: Mahesh Bandewar <maheshb@google.com>
>---
> drivers/net/bonding/bond_alb.c | 52 +++++++++++++++++++++++++++++++++++++-----
> drivers/net/bonding/bond_alb.h | 11 +++++++++
> drivers/net/bonding/bonding.h  |  6 +++++
> 3 files changed, 63 insertions(+), 6 deletions(-)
>
>diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
>index 76c0dade233f..1f39d41fde4b 100644
>--- a/drivers/net/bonding/bond_alb.c
>+++ b/drivers/net/bonding/bond_alb.c
>@@ -195,6 +195,9 @@ static int tlb_initialize(struct bonding *bond)
> 
> 	_unlock_tx_hashtbl_bh(bond);
> 
>+	/* Initialize the TLB array spin-lock */
>+	spin_lock_init(&bond_info->slave_arr_lock);
>+
> 	return 0;
> }
> 
>@@ -209,6 +212,9 @@ static void tlb_deinitialize(struct bonding *bond)
> 	bond_info->tx_hashtbl = NULL;
> 
> 	_unlock_tx_hashtbl_bh(bond);
>+
>+	if (bond_is_nondyn_tlb(bond) && bond_info->slave_arr)
>+		kfree_rcu(bond_info->slave_arr, rcu);
> }
> 
> static long long compute_gap(struct slave *slave)
>@@ -1406,9 +1412,37 @@ out:
> 	return NETDEV_TX_OK;
> }
> 
>+static int bond_tlb_update_slave_arr(struct bonding *bond)
>+{
>+	struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
>+	struct slave *tx_slave;
>+	struct list_head *iter;
>+	struct tlb_up_slave *new_arr, *old_arr;
>+
>+	new_arr = kzalloc(offsetof(struct tlb_up_slave, arr[bond->slave_cnt]),
>+			  GFP_KERNEL);
>+	if (!new_arr)
>+		return -ENOMEM;
>+
>+	bond_for_each_slave(bond, tx_slave, iter) {
>+		if (bond_slave_can_tx(tx_slave))
>+			new_arr->arr[new_arr->count++] = tx_slave;
>+	}
>+
>+	spin_lock(&bond_info->slave_arr_lock);
>+	old_arr = bond_info->slave_arr;
>+	rcu_assign_pointer(bond_info->slave_arr, new_arr);
>+	spin_unlock(&bond_info->slave_arr_lock);
>+	if (old_arr)
>+		kfree_rcu(old_arr, rcu);
>+
>+	return 0;
>+}
>+
> int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
> {
> 	struct bonding *bond = netdev_priv(bond_dev);
>+	struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
> 	struct ethhdr *eth_data;
> 	struct slave *tx_slave = NULL;
> 	u32 hash_index;
>@@ -1429,12 +1463,13 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
> 							      hash_index & 0xFF,
> 							      skb->len);
> 			} else {
>-				struct list_head *iter;
>-				int idx = hash_index % bond->slave_cnt;
>-
>-				bond_for_each_slave_rcu(bond, tx_slave, iter)
>-					if (--idx < 0)
>-						break;
>+				struct tlb_up_slave *slaves;
>+				rcu_read_lock();
>+				slaves = rcu_dereference(bond_info->slave_arr);
>+				if (slaves && slaves->count)
>+					tx_slave = slaves->arr[hash_index %
>+							       slaves->count];
>+				rcu_read_unlock();
> 			}
> 			break;
> 		}
>@@ -1721,6 +1756,11 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
> 			 */
> 		}
> 	}
>+
>+	if (bond_is_nondyn_tlb(bond)) {
>+		if (bond_tlb_update_slave_arr(bond))
>+			pr_err("Failed to build slave-array for TLB mode.\n");
>+	}
> }
> 
> /**
>diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
>index 5fc76c01636c..731a8e830639 100644
>--- a/drivers/net/bonding/bond_alb.h
>+++ b/drivers/net/bonding/bond_alb.h
>@@ -139,12 +139,23 @@ struct tlb_slave_info {
> 			 */
> };
> 
>+struct tlb_up_slave {
>+	unsigned int	count;
>+	struct rcu_head rcu;
>+	struct slave	*arr[0];
>+};
>+
> struct alb_bond_info {
> 	struct tlb_client_info	*tx_hashtbl; /* Dynamically allocated */
> 	spinlock_t		tx_hashtbl_lock;
> 	u32			unbalanced_load;
> 	int			tx_rebalance_counter;
> 	int			lp_counter;
>+	/* -------- non-dynamic tlb mode only ---------*/
>+	struct tlb_up_slave __rcu *slave_arr;	  /* Up slaves */
>+	spinlock_t		  slave_arr_lock; /* Lock to manage concurrent
>+						   * writers
>+						   */
> 	/* -------- rlb parameters -------- */
> 	int rlb_enabled;
> 	struct rlb_client_info	*rx_hashtbl;	/* Receive hash table */
>diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
>index 0b4d9cde0b05..ad9b3dce07d8 100644
>--- a/drivers/net/bonding/bonding.h
>+++ b/drivers/net/bonding/bonding.h
>@@ -265,6 +265,12 @@ static inline bool bond_is_lb(const struct bonding *bond)
> 	       BOND_MODE(bond) == BOND_MODE_ALB;
> }
> 
>+static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
>+{
>+	return (BOND_MODE(bond) == BOND_MODE_TLB)  &&
>+	       (bond->params.tlb_dynamic_lb == 0);
>+}
>+
> static inline bool bond_mode_uses_arp(int mode)
> {
> 	return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB &&
>-- 
>2.0.0.526.g5318336

---
	-Jay Vosburgh, jay.vosburgh@canonical.com

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] bonding: Do not try to send packets over dead link in TLB mode.
  2014-07-10 13:39 ` Jay Vosburgh
@ 2014-07-10 14:40   ` Mahesh Bandewar
  0 siblings, 0 replies; 17+ messages in thread
From: Mahesh Bandewar @ 2014-07-10 14:40 UTC (permalink / raw)
  To: Jay Vosburgh
  Cc: Veaceslav Falico, Andy Gospodarek, David Miller, netdev,
	Eric Dumazet, Maciej Zenczykowski

On Thu, Jul 10, 2014 at 6:39 AM, Jay Vosburgh
<jay.vosburgh@canonical.com> wrote:
> Mahesh Bandewar <maheshb@google.com> wrote:
>
>>In TLB mode if tlb_dynamic_lb is NOT set, slaves from the bond
>>group are selected based on the hash distribution. This does not
>>exclude dead links which are part of the bond. Also if there is a
>>temporary link event which brings down the interface, packets
>>hashed on that interface would be dropped too.
>>
>>This patch fixes these issues and distributes flows across the
>>UP links only. Also the array construction of links which are
>>capable of sending packets happen in the control path leaving
>>only link-selection duing the data-path.
>>
>>One possible side effect of this is - at a link event; all
>>flows will be shuffled to get good distribution. But impact of
>>this should be minimum with the assumption that a member or
>>members of the bond group are not available is a very temporary
>>situation.
>
>         Why limit this to just TLB mode?  Other similar situations,
> e.g., the TX slave selection in 802.3ad or balance-xor via
> bond_xmit_slave_id, would presumably see a performance gain from using
> an array lookup instead of traversing a linked list (even though those
> modes do already skip slaves that are down).
>
>         The 802.3ad case is a bit more complex, as an array would have
> to include only slaves that are members of the active aggregator, but
> the bond_xmit_slave_id case shouldn't be much more difficult than what
> you've done below.
>
That's a good point. My original intent was to fix the issue with the
TLB mode. Eric pointed out the gains with array approach and I started
thinking about gains in other modes too. So I'll fix the current patch
(remove spinlock if possible and few other minor nits) but will try to
cook another patch to address all modes later. So the TLB issue gets
addressed first.
>         -J
>
>>Signed-off-by: Mahesh Bandewar <maheshb@google.com>
>>---
>> drivers/net/bonding/bond_alb.c | 52 +++++++++++++++++++++++++++++++++++++-----
>> drivers/net/bonding/bond_alb.h | 11 +++++++++
>> drivers/net/bonding/bonding.h  |  6 +++++
>> 3 files changed, 63 insertions(+), 6 deletions(-)
>>
>>diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
>>index 76c0dade233f..1f39d41fde4b 100644
>>--- a/drivers/net/bonding/bond_alb.c
>>+++ b/drivers/net/bonding/bond_alb.c
>>@@ -195,6 +195,9 @@ static int tlb_initialize(struct bonding *bond)
>>
>>       _unlock_tx_hashtbl_bh(bond);
>>
>>+      /* Initialize the TLB array spin-lock */
>>+      spin_lock_init(&bond_info->slave_arr_lock);
>>+
>>       return 0;
>> }
>>
>>@@ -209,6 +212,9 @@ static void tlb_deinitialize(struct bonding *bond)
>>       bond_info->tx_hashtbl = NULL;
>>
>>       _unlock_tx_hashtbl_bh(bond);
>>+
>>+      if (bond_is_nondyn_tlb(bond) && bond_info->slave_arr)
>>+              kfree_rcu(bond_info->slave_arr, rcu);
>> }
>>
>> static long long compute_gap(struct slave *slave)
>>@@ -1406,9 +1412,37 @@ out:
>>       return NETDEV_TX_OK;
>> }
>>
>>+static int bond_tlb_update_slave_arr(struct bonding *bond)
>>+{
>>+      struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
>>+      struct slave *tx_slave;
>>+      struct list_head *iter;
>>+      struct tlb_up_slave *new_arr, *old_arr;
>>+
>>+      new_arr = kzalloc(offsetof(struct tlb_up_slave, arr[bond->slave_cnt]),
>>+                        GFP_KERNEL);
>>+      if (!new_arr)
>>+              return -ENOMEM;
>>+
>>+      bond_for_each_slave(bond, tx_slave, iter) {
>>+              if (bond_slave_can_tx(tx_slave))
>>+                      new_arr->arr[new_arr->count++] = tx_slave;
>>+      }
>>+
>>+      spin_lock(&bond_info->slave_arr_lock);
>>+      old_arr = bond_info->slave_arr;
>>+      rcu_assign_pointer(bond_info->slave_arr, new_arr);
>>+      spin_unlock(&bond_info->slave_arr_lock);
>>+      if (old_arr)
>>+              kfree_rcu(old_arr, rcu);
>>+
>>+      return 0;
>>+}
>>+
>> int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
>> {
>>       struct bonding *bond = netdev_priv(bond_dev);
>>+      struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
>>       struct ethhdr *eth_data;
>>       struct slave *tx_slave = NULL;
>>       u32 hash_index;
>>@@ -1429,12 +1463,13 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
>>                                                             hash_index & 0xFF,
>>                                                             skb->len);
>>                       } else {
>>-                              struct list_head *iter;
>>-                              int idx = hash_index % bond->slave_cnt;
>>-
>>-                              bond_for_each_slave_rcu(bond, tx_slave, iter)
>>-                                      if (--idx < 0)
>>-                                              break;
>>+                              struct tlb_up_slave *slaves;
>>+                              rcu_read_lock();
>>+                              slaves = rcu_dereference(bond_info->slave_arr);
>>+                              if (slaves && slaves->count)
>>+                                      tx_slave = slaves->arr[hash_index %
>>+                                                             slaves->count];
>>+                              rcu_read_unlock();
>>                       }
>>                       break;
>>               }
>>@@ -1721,6 +1756,11 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
>>                        */
>>               }
>>       }
>>+
>>+      if (bond_is_nondyn_tlb(bond)) {
>>+              if (bond_tlb_update_slave_arr(bond))
>>+                      pr_err("Failed to build slave-array for TLB mode.\n");
>>+      }
>> }
>>
>> /**
>>diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
>>index 5fc76c01636c..731a8e830639 100644
>>--- a/drivers/net/bonding/bond_alb.h
>>+++ b/drivers/net/bonding/bond_alb.h
>>@@ -139,12 +139,23 @@ struct tlb_slave_info {
>>                        */
>> };
>>
>>+struct tlb_up_slave {
>>+      unsigned int    count;
>>+      struct rcu_head rcu;
>>+      struct slave    *arr[0];
>>+};
>>+
>> struct alb_bond_info {
>>       struct tlb_client_info  *tx_hashtbl; /* Dynamically allocated */
>>       spinlock_t              tx_hashtbl_lock;
>>       u32                     unbalanced_load;
>>       int                     tx_rebalance_counter;
>>       int                     lp_counter;
>>+      /* -------- non-dynamic tlb mode only ---------*/
>>+      struct tlb_up_slave __rcu *slave_arr;     /* Up slaves */
>>+      spinlock_t                slave_arr_lock; /* Lock to manage concurrent
>>+                                                 * writers
>>+                                                 */
>>       /* -------- rlb parameters -------- */
>>       int rlb_enabled;
>>       struct rlb_client_info  *rx_hashtbl;    /* Receive hash table */
>>diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
>>index 0b4d9cde0b05..ad9b3dce07d8 100644
>>--- a/drivers/net/bonding/bonding.h
>>+++ b/drivers/net/bonding/bonding.h
>>@@ -265,6 +265,12 @@ static inline bool bond_is_lb(const struct bonding *bond)
>>              BOND_MODE(bond) == BOND_MODE_ALB;
>> }
>>
>>+static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
>>+{
>>+      return (BOND_MODE(bond) == BOND_MODE_TLB)  &&
>>+             (bond->params.tlb_dynamic_lb == 0);
>>+}
>>+
>> static inline bool bond_mode_uses_arp(int mode)
>> {
>>       return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB &&
>>--
>>2.0.0.526.g5318336
>
> ---
>         -Jay Vosburgh, jay.vosburgh@canonical.com

^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2014-07-10 14:40 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-07-09  1:09 [PATCH] bonding: Do not try to send packets over dead link in TLB mode Mahesh Bandewar
2014-07-09  7:10 ` Eric Dumazet
     [not found]   ` <CAF2d9jh1jDL7NMZapGn_Ohy8Y2JzHrWaKA5gFgR0tU=_KydtPg@mail.gmail.com>
2014-07-09 17:09     ` Eric Dumazet
2014-07-09 17:21       ` Mahesh Bandewar
2014-07-09 10:14 ` Nikolay Aleksandrov
2014-07-09 10:24 ` Veaceslav Falico
2014-07-09 10:25   ` Nikolay Aleksandrov
2014-07-09 12:04     ` Veaceslav Falico
2014-07-09 13:17       ` Eric Dumazet
2014-07-09 13:27         ` Veaceslav Falico
2014-07-09 17:15           ` Mahesh Bandewar
2014-07-09 17:24             ` Mahesh Bandewar
2014-07-09 21:07               ` Nikolay Aleksandrov
2014-07-10  8:25                 ` Maciej Żenczykowski
2014-07-09 16:52     ` Mahesh Bandewar
2014-07-10 13:39 ` Jay Vosburgh
2014-07-10 14:40   ` Mahesh Bandewar

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.