All of lore.kernel.org
 help / color / mirror / Atom feed
* [Ocfs2-devel] [patch 1/6] ocfs2: o2hb: add negotiate timer
@ 2016-05-23 21:50 akpm at linux-foundation.org
  2016-05-24 22:35 ` Mark Fasheh
  0 siblings, 1 reply; 9+ messages in thread
From: akpm at linux-foundation.org @ 2016-05-23 21:50 UTC (permalink / raw)
  To: ocfs2-devel

From: Junxiao Bi <junxiao.bi@oracle.com>
Subject: ocfs2: o2hb: add negotiate timer

This series of patches is to fix the issue that when storage down, all
nodes will fence self due to write timeout.

With this patch set, all nodes will keep going until storage back online,
except if the following issue happens, then all nodes will do as before to
fence self.

1. io error got
2. network between nodes down
3. nodes panic

This patch (of 6):

When storage down, all nodes will fence self due to write timeout.  The
negotiate timer is designed to avoid this, with it node will wait until
storage up again.

Negotiate timer working in the following way:

1. The timer expires before write timeout timer, its timeout is half
   of write timeout now.  It is re-queued along with write timeout timer.
   If expires, it will send NEGO_TIMEOUT message to master node(node with
   lowest node number).  This message does nothing but marks a bit in a
   bitmap recording which nodes are negotiating timeout on master node.

2. If storage down, nodes will send this message to master node, then
   when master node finds its bitmap including all online nodes, it sends
   NEGO_APPROVL message to all nodes one by one, this message will
   re-queue write timeout timer and negotiate timer.  For any node doesn't
   receive this message or meets some issue when handling this message, it
   will be fenced.  If storage up at any time, o2hb_thread will run and
   re-queue all the timer, nothing will be affected by these two steps.

Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
Reviewed-by: Ryan Ding <ryan.ding@oracle.com>
Cc: Gang He <ghe@suse.com>
Cc: rwxybh <rwxybh@126.com>
Cc: Mark Fasheh <mfasheh@suse.de>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Joseph Qi <joseph.qi@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 fs/ocfs2/cluster/heartbeat.c |   51 ++++++++++++++++++++++++++++++---
 1 file changed, 47 insertions(+), 4 deletions(-)

diff -puN fs/ocfs2/cluster/heartbeat.c~ocfs2-o2hb-add-negotiate-timer fs/ocfs2/cluster/heartbeat.c
--- a/fs/ocfs2/cluster/heartbeat.c~ocfs2-o2hb-add-negotiate-timer
+++ a/fs/ocfs2/cluster/heartbeat.c
@@ -272,6 +272,10 @@ struct o2hb_region {
 	struct delayed_work	hr_write_timeout_work;
 	unsigned long		hr_last_timeout_start;
 
+	/* negotiate timer, used to negotiate extending hb timeout. */
+	struct delayed_work	hr_nego_timeout_work;
+	unsigned long		hr_nego_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+
 	/* Used during o2hb_check_slot to hold a copy of the block
 	 * being checked because we temporarily have to zero out the
 	 * crc field. */
@@ -319,7 +323,7 @@ static void o2hb_write_timeout(struct wo
 	o2quo_disk_timeout();
 }
 
-static void o2hb_arm_write_timeout(struct o2hb_region *reg)
+static void o2hb_arm_timeout(struct o2hb_region *reg)
 {
 	/* Arm writeout only after thread reaches steady state */
 	if (atomic_read(&reg->hr_steady_iterations) != 0)
@@ -337,11 +341,49 @@ static void o2hb_arm_write_timeout(struc
 	reg->hr_last_timeout_start = jiffies;
 	schedule_delayed_work(&reg->hr_write_timeout_work,
 			      msecs_to_jiffies(O2HB_MAX_WRITE_TIMEOUT_MS));
+
+	cancel_delayed_work(&reg->hr_nego_timeout_work);
+	/* negotiate timeout must be less than write timeout. */
+	schedule_delayed_work(&reg->hr_nego_timeout_work,
+			      msecs_to_jiffies(O2HB_MAX_WRITE_TIMEOUT_MS)/2);
+	memset(reg->hr_nego_node_bitmap, 0, sizeof(reg->hr_nego_node_bitmap));
 }
 
-static void o2hb_disarm_write_timeout(struct o2hb_region *reg)
+static void o2hb_disarm_timeout(struct o2hb_region *reg)
 {
 	cancel_delayed_work_sync(&reg->hr_write_timeout_work);
+	cancel_delayed_work_sync(&reg->hr_nego_timeout_work);
+}
+
+static void o2hb_nego_timeout(struct work_struct *work)
+{
+	unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+	int master_node;
+	struct o2hb_region *reg;
+
+	reg = container_of(work, struct o2hb_region, hr_nego_timeout_work.work);
+	o2hb_fill_node_map(live_node_bitmap, sizeof(live_node_bitmap));
+	/* lowest node as master node to make negotiate decision. */
+	master_node = find_next_bit(live_node_bitmap, O2NM_MAX_NODES, 0);
+
+	if (master_node == o2nm_this_node()) {
+		set_bit(master_node, reg->hr_nego_node_bitmap);
+		if (memcmp(reg->hr_nego_node_bitmap, live_node_bitmap,
+				sizeof(reg->hr_nego_node_bitmap))) {
+			/* check negotiate bitmap every second to do timeout
+			 * approve decision.
+			 */
+			schedule_delayed_work(&reg->hr_nego_timeout_work,
+				msecs_to_jiffies(1000));
+
+			return;
+		}
+
+		/* approve negotiate timeout request. */
+	} else {
+		/* negotiate timeout with master node. */
+	}
+
 }
 
 static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc)
@@ -1032,7 +1074,7 @@ static int o2hb_do_disk_heartbeat(struct
 	/* Skip disarming the timeout if own slot has stale/bad data */
 	if (own_slot_ok) {
 		o2hb_set_quorum_device(reg);
-		o2hb_arm_write_timeout(reg);
+		o2hb_arm_timeout(reg);
 	}
 
 bail:
@@ -1114,7 +1156,7 @@ static int o2hb_thread(void *data)
 		}
 	}
 
-	o2hb_disarm_write_timeout(reg);
+	o2hb_disarm_timeout(reg);
 
 	/* unclean stop is only used in very bad situation */
 	for(i = 0; !reg->hr_unclean_stop && i < reg->hr_blocks; i++)
@@ -1762,6 +1804,7 @@ static ssize_t o2hb_region_dev_store(str
 	}
 
 	INIT_DELAYED_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout);
+	INIT_DELAYED_WORK(&reg->hr_nego_timeout_work, o2hb_nego_timeout);
 
 	/*
 	 * A node is considered live after it has beat LIVE_THRESHOLD
_

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [Ocfs2-devel] [patch 1/6] ocfs2: o2hb: add negotiate timer
  2016-05-23 21:50 [Ocfs2-devel] [patch 1/6] ocfs2: o2hb: add negotiate timer akpm at linux-foundation.org
@ 2016-05-24 22:35 ` Mark Fasheh
  2016-05-25  1:44   ` Junxiao Bi
  0 siblings, 1 reply; 9+ messages in thread
From: Mark Fasheh @ 2016-05-24 22:35 UTC (permalink / raw)
  To: ocfs2-devel

On Mon, May 23, 2016 at 02:50:28PM -0700, Andrew Morton wrote:
> From: Junxiao Bi <junxiao.bi@oracle.com>
> Subject: ocfs2: o2hb: add negotiate timer

Thank you for the well written patch description by the way.


> This series of patches is to fix the issue that when storage down, all
> nodes will fence self due to write timeout.
> 
> With this patch set, all nodes will keep going until storage back online,
> except if the following issue happens, then all nodes will do as before to
> fence self.
> 
> 1. io error got
> 2. network between nodes down
> 3. nodes panic
> 
> This patch (of 6):
> 
> When storage down, all nodes will fence self due to write timeout.  The
> negotiate timer is designed to avoid this, with it node will wait until
> storage up again.
> 
> Negotiate timer working in the following way:
> 
> 1. The timer expires before write timeout timer, its timeout is half
>    of write timeout now.  It is re-queued along with write timeout timer.
>    If expires, it will send NEGO_TIMEOUT message to master node(node with
>    lowest node number).  This message does nothing but marks a bit in a
>    bitmap recording which nodes are negotiating timeout on master node.

I went through the patch series, and generally feel that the code
is well written and straight forward. I have two issues regarding
how this operates. Otherwise, I like the general direction this
is taking.

The first is easy - we're updating the o2cb network protocol and
need to bump the protocol version otherwise a node that doesn't
speak these new messages could mount and even be selected as the
'master' without actually being able to participate in this scheme.


My other concern is whether the notion of 'lowest node' can
change if one comes online while the cluster is negotiating this
timeout. Obviously in the case where all the disks are unplugged
this couldn't happen because a new node couldn't begin to
heartbeat.

What about a situation where only some nodes are negotiating this
timeout? On the ones which have no disk access, lowest node
number still won't change since they can't read the new
heartbeats. On those with stable access though, can't this value
change? How does that effect this algorithm?

Thanks,
	--Mark

--
Mark Fasheh

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [Ocfs2-devel] [patch 1/6] ocfs2: o2hb: add negotiate timer
  2016-05-24 22:35 ` Mark Fasheh
@ 2016-05-25  1:44   ` Junxiao Bi
  2016-05-25 23:26     ` Mark Fasheh
  0 siblings, 1 reply; 9+ messages in thread
From: Junxiao Bi @ 2016-05-25  1:44 UTC (permalink / raw)
  To: ocfs2-devel

On 05/25/2016 06:35 AM, Mark Fasheh wrote:
> On Mon, May 23, 2016 at 02:50:28PM -0700, Andrew Morton wrote:
>> From: Junxiao Bi <junxiao.bi@oracle.com>
>> Subject: ocfs2: o2hb: add negotiate timer
> 
> Thank you for the well written patch description by the way.
> 
> 
>> This series of patches is to fix the issue that when storage down, all
>> nodes will fence self due to write timeout.
>>
>> With this patch set, all nodes will keep going until storage back online,
>> except if the following issue happens, then all nodes will do as before to
>> fence self.
>>
>> 1. io error got
>> 2. network between nodes down
>> 3. nodes panic
>>
>> This patch (of 6):
>>
>> When storage down, all nodes will fence self due to write timeout.  The
>> negotiate timer is designed to avoid this, with it node will wait until
>> storage up again.
>>
>> Negotiate timer working in the following way:
>>
>> 1. The timer expires before write timeout timer, its timeout is half
>>    of write timeout now.  It is re-queued along with write timeout timer.
>>    If expires, it will send NEGO_TIMEOUT message to master node(node with
>>    lowest node number).  This message does nothing but marks a bit in a
>>    bitmap recording which nodes are negotiating timeout on master node.
> 
> I went through the patch series, and generally feel that the code
> is well written and straight forward. I have two issues regarding
> how this operates. Otherwise, I like the general direction this
> is taking.
> 
> The first is easy - we're updating the o2cb network protocol and
> need to bump the protocol version otherwise a node that doesn't
> speak these new messages could mount and even be selected as the
> 'master' without actually being able to participate in this scheme.
Right. Will add this.
> 
> 
> My other concern is whether the notion of 'lowest node' can
> change if one comes online while the cluster is negotiating this
> timeout. Obviously in the case where all the disks are unplugged
> this couldn't happen because a new node couldn't begin to
> heartbeat.
Yes.
> 
> What about a situation where only some nodes are negotiating this
> timeout? On the ones which have no disk access, lowest node
> number still won't change since they can't read the new
> heartbeats. On those with stable access though, can't this value
> change? How does that effect this algorithm?
The lowest node can change for good nodes, but didn't affect the
algorithm. Because only bad nodes sent NEGO_TIMEOUT message while good
nodes not, so the original lowest node will never receive NEGO_TIMEOUT
messages from all nodes, then it will not approve the timeout, at last
bad nodes will fence self and good nodes keep alive.

Thanks,
Junxiao.
> 
> Thanks,
> 	--Mark
> 
> --
> Mark Fasheh
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [Ocfs2-devel] [patch 1/6] ocfs2: o2hb: add negotiate timer
  2016-05-25  1:44   ` Junxiao Bi
@ 2016-05-25 23:26     ` Mark Fasheh
  0 siblings, 0 replies; 9+ messages in thread
From: Mark Fasheh @ 2016-05-25 23:26 UTC (permalink / raw)
  To: ocfs2-devel

On Wed, May 25, 2016 at 09:44:23AM +0800, Junxiao Bi wrote:
> On 05/25/2016 06:35 AM, Mark Fasheh wrote:
> > I went through the patch series, and generally feel that the code
> > is well written and straight forward. I have two issues regarding
> > how this operates. Otherwise, I like the general direction this
> > is taking.
> > 
> > The first is easy - we're updating the o2cb network protocol and
> > need to bump the protocol version otherwise a node that doesn't
> > speak these new messages could mount and even be selected as the
> > 'master' without actually being able to participate in this scheme.
> Right. Will add this.

Great, thanks!


> > 
> > 
> > My other concern is whether the notion of 'lowest node' can
> > change if one comes online while the cluster is negotiating this
> > timeout. Obviously in the case where all the disks are unplugged
> > this couldn't happen because a new node couldn't begin to
> > heartbeat.
> Yes.
> > 
> > What about a situation where only some nodes are negotiating this
> > timeout? On the ones which have no disk access, lowest node
> > number still won't change since they can't read the new
> > heartbeats. On those with stable access though, can't this value
> > change? How does that effect this algorithm?
> The lowest node can change for good nodes, but didn't affect the
> algorithm. Because only bad nodes sent NEGO_TIMEOUT message while good
> nodes not, so the original lowest node will never receive NEGO_TIMEOUT
> messages from all nodes, then it will not approve the timeout, at last
> bad nodes will fence self and good nodes keep alive.

Ok, in that case you can put:

Reviewed-by: Mark Fasheh <mfasheh@suse.de>

on the patches I've seen (this series). We don't want it to go upstream
until your patch to bump the protocol version though so please cc me on
that patch and the others.

Thanks,
	--Mark

--
Mark Fasheh

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [Ocfs2-devel] [PATCH 1/6] ocfs2: o2hb: add negotiate timer
  2016-01-21 23:42   ` Andrew Morton
@ 2016-01-22  3:23     ` Junxiao Bi
  0 siblings, 0 replies; 9+ messages in thread
From: Junxiao Bi @ 2016-01-22  3:23 UTC (permalink / raw)
  To: ocfs2-devel

Hi Andrew,

On 01/22/2016 07:42 AM, Andrew Morton wrote:
> On Wed, 20 Jan 2016 11:13:34 +0800 Junxiao Bi <junxiao.bi@oracle.com> wrote:
> 
>> When storage down, all nodes will fence self due to write timeout.
>> The negotiate timer is designed to avoid this, with it node will
>> wait until storage up again.
>>
>> Negotiate timer working in the following way:
>>
>> 1. The timer expires before write timeout timer, its timeout is half
>> of write timeout now. It is re-queued along with write timeout timer.
>> If expires, it will send NEGO_TIMEOUT message to master node(node with
>> lowest node number). This message does nothing but marks a bit in a
>> bitmap recording which nodes are negotiating timeout on master node.
>>
>> 2. If storage down, nodes will send this message to master node, then
>> when master node finds its bitmap including all online nodes, it sends
>> NEGO_APPROVL message to all nodes one by one, this message will re-queue
>> write timeout timer and negotiate timer.
>> For any node doesn't receive this message or meets some issue when
>> handling this message, it will be fenced.
>> If storage up at any time, o2hb_thread will run and re-queue all the
>> timer, nothing will be affected by these two steps.
>>
>> ...
>>
>> +static void o2hb_nego_timeout(struct work_struct *work)
>> +{
>> +	struct o2hb_region *reg =
>> +		container_of(work, struct o2hb_region,
>> +			     hr_nego_timeout_work.work);
> 
> It's better to just do
> 
> 	struct o2hb_region *reg;
> 
> 	reg = container_of(work, struct o2hb_region, hr_nego_timeout_work.work);
> 
> and avoid the weird 80-column tricks.
OK. Will update this in V2.

> 
>> +	unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
> 
> the bitmap.h interfaces might be nicer here.  Perhaps.  A little bit.
Will consider this in v2.

> 
>> +	int master_node;
>> +
>> +	o2hb_fill_node_map(live_node_bitmap, sizeof(live_node_bitmap));
>> +	/* lowest node as master node to make negotiate decision. */
>> +	master_node = find_next_bit(live_node_bitmap, O2NM_MAX_NODES, 0);
>> +
>> +	if (master_node == o2nm_this_node()) {
>> +		set_bit(master_node, reg->hr_nego_node_bitmap);
>> +		if (memcmp(reg->hr_nego_node_bitmap, live_node_bitmap,
>> +				sizeof(reg->hr_nego_node_bitmap))) {
>> +			/* check negotiate bitmap every second to do timeout
>> +			 * approve decision.
>> +			 */
>> +			schedule_delayed_work(&reg->hr_nego_timeout_work,
>> +				msecs_to_jiffies(1000));
> 
> One second is long enough to unmount the fs (and to run `rmmod
> ocfs2'!).  Is there anything preventing the work from triggering in
> these situations?
Yes, this delayed work will by sync before the umount.

Thanks,
Junxiao.
> 
>> +
>> +			return;
>> +		}
>> +
>> +		/* approve negotiate timeout request. */
>> +	} else {
>> +		/* negotiate timeout with master node. */
>> +	}
>> +
>>  }
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [Ocfs2-devel] [PATCH 1/6] ocfs2: o2hb: add negotiate timer
  2016-01-22  0:56   ` Joseph Qi
@ 2016-01-22  3:19     ` Junxiao Bi
  0 siblings, 0 replies; 9+ messages in thread
From: Junxiao Bi @ 2016-01-22  3:19 UTC (permalink / raw)
  To: ocfs2-devel

Hi Joseph,

On 01/22/2016 08:56 AM, Joseph Qi wrote:
> Hi Junxiao,
> 
> On 2016/1/20 11:13, Junxiao Bi wrote:
>> When storage down, all nodes will fence self due to write timeout.
>> The negotiate timer is designed to avoid this, with it node will
>> wait until storage up again.
>>
>> Negotiate timer working in the following way:
>>
>> 1. The timer expires before write timeout timer, its timeout is half
>> of write timeout now. It is re-queued along with write timeout timer.
>> If expires, it will send NEGO_TIMEOUT message to master node(node with
>> lowest node number). This message does nothing but marks a bit in a
>> bitmap recording which nodes are negotiating timeout on master node.
>>
>> 2. If storage down, nodes will send this message to master node, then
>> when master node finds its bitmap including all online nodes, it sends
>> NEGO_APPROVL message to all nodes one by one, this message will re-queue
>> write timeout timer and negotiate timer.
>> For any node doesn't receive this message or meets some issue when
>> handling this message, it will be fenced.
>> If storage up at any time, o2hb_thread will run and re-queue all the
>> timer, nothing will be affected by these two steps.
>>
>> Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
>> Reviewed-by: Ryan Ding <ryan.ding@oracle.com>
>> ---
>>  fs/ocfs2/cluster/heartbeat.c |   52 ++++++++++++++++++++++++++++++++++++++----
>>  1 file changed, 48 insertions(+), 4 deletions(-)
>>
>> diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
>> index a3cc6d2fc896..b601ee95de50 100644
>> --- a/fs/ocfs2/cluster/heartbeat.c
>> +++ b/fs/ocfs2/cluster/heartbeat.c
>> @@ -272,6 +272,10 @@ struct o2hb_region {
>>  	struct delayed_work	hr_write_timeout_work;
>>  	unsigned long		hr_last_timeout_start;
>>  
>> +	/* negotiate timer, used to negotiate extending hb timeout. */
>> +	struct delayed_work	hr_nego_timeout_work;
>> +	unsigned long		hr_nego_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
>> +
>>  	/* Used during o2hb_check_slot to hold a copy of the block
>>  	 * being checked because we temporarily have to zero out the
>>  	 * crc field. */
>> @@ -320,7 +324,7 @@ static void o2hb_write_timeout(struct work_struct *work)
>>  	o2quo_disk_timeout();
>>  }
>>  
>> -static void o2hb_arm_write_timeout(struct o2hb_region *reg)
>> +static void o2hb_arm_timeout(struct o2hb_region *reg)
>>  {
>>  	/* Arm writeout only after thread reaches steady state */
>>  	if (atomic_read(&reg->hr_steady_iterations) != 0)
>> @@ -338,11 +342,50 @@ static void o2hb_arm_write_timeout(struct o2hb_region *reg)
>>  	reg->hr_last_timeout_start = jiffies;
>>  	schedule_delayed_work(&reg->hr_write_timeout_work,
>>  			      msecs_to_jiffies(O2HB_MAX_WRITE_TIMEOUT_MS));
>> +
>> +	cancel_delayed_work(&reg->hr_nego_timeout_work);
>> +	/* negotiate timeout must be less than write timeout. */
>> +	schedule_delayed_work(&reg->hr_nego_timeout_work,
>> +			      msecs_to_jiffies(O2HB_MAX_WRITE_TIMEOUT_MS)/2);
>> +	memset(reg->hr_nego_node_bitmap, 0, sizeof(reg->hr_nego_node_bitmap));
>>  }
>>  
>> -static void o2hb_disarm_write_timeout(struct o2hb_region *reg)
>> +static void o2hb_disarm_timeout(struct o2hb_region *reg)
>>  {
>>  	cancel_delayed_work_sync(&reg->hr_write_timeout_work);
>> +	cancel_delayed_work_sync(&reg->hr_nego_timeout_work);
>> +}
>> +
>> +static void o2hb_nego_timeout(struct work_struct *work)
>> +{
>> +	struct o2hb_region *reg =
>> +		container_of(work, struct o2hb_region,
>> +			     hr_nego_timeout_work.work);
>> +	unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
>> +	int master_node;
>> +
>> +	o2hb_fill_node_map(live_node_bitmap, sizeof(live_node_bitmap));
>> +	/* lowest node as master node to make negotiate decision. */
>> +	master_node = find_next_bit(live_node_bitmap, O2NM_MAX_NODES, 0);
>> +
>> +	if (master_node == o2nm_this_node()) {
>> +		set_bit(master_node, reg->hr_nego_node_bitmap);
>> +		if (memcmp(reg->hr_nego_node_bitmap, live_node_bitmap,
>> +				sizeof(reg->hr_nego_node_bitmap))) {
> Should the access to hr_nego_node_bitmap be protected, for example,
> under o2hb_live_lock?
I didn't see need for this. This bitmap is used by negotiation master
node, every set op is ordered by o2net_wq. And master will check the bit
every second to find whether it's set.

Thanks,
Junxiao.
> 
> Thanks,
> Joseph
> 
>> +			/* check negotiate bitmap every second to do timeout
>> +			 * approve decision.
>> +			 */
>> +			schedule_delayed_work(&reg->hr_nego_timeout_work,
>> +				msecs_to_jiffies(1000));
>> +
>> +			return;
>> +		}
>> +
>> +		/* approve negotiate timeout request. */
>> +	} else {
>> +		/* negotiate timeout with master node. */
>> +	}
>> +
>>  }
>>  
>>  static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc)
>> @@ -1033,7 +1076,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
>>  	/* Skip disarming the timeout if own slot has stale/bad data */
>>  	if (own_slot_ok) {
>>  		o2hb_set_quorum_device(reg);
>> -		o2hb_arm_write_timeout(reg);
>> +		o2hb_arm_timeout(reg);
>>  	}
>>  
>>  bail:
>> @@ -1115,7 +1158,7 @@ static int o2hb_thread(void *data)
>>  		}
>>  	}
>>  
>> -	o2hb_disarm_write_timeout(reg);
>> +	o2hb_disarm_timeout(reg);
>>  
>>  	/* unclean stop is only used in very bad situation */
>>  	for(i = 0; !reg->hr_unclean_stop && i < reg->hr_blocks; i++)
>> @@ -1762,6 +1805,7 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
>>  	}
>>  
>>  	INIT_DELAYED_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout);
>> +	INIT_DELAYED_WORK(&reg->hr_nego_timeout_work, o2hb_nego_timeout);
>>  
>>  	/*
>>  	 * A node is considered live after it has beat LIVE_THRESHOLD
>>
> 
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [Ocfs2-devel] [PATCH 1/6] ocfs2: o2hb: add negotiate timer
  2016-01-20  3:13 ` [Ocfs2-devel] [PATCH 1/6] ocfs2: o2hb: add negotiate timer Junxiao Bi
  2016-01-21 23:42   ` Andrew Morton
@ 2016-01-22  0:56   ` Joseph Qi
  2016-01-22  3:19     ` Junxiao Bi
  1 sibling, 1 reply; 9+ messages in thread
From: Joseph Qi @ 2016-01-22  0:56 UTC (permalink / raw)
  To: ocfs2-devel

Hi Junxiao,

On 2016/1/20 11:13, Junxiao Bi wrote:
> When storage down, all nodes will fence self due to write timeout.
> The negotiate timer is designed to avoid this, with it node will
> wait until storage up again.
> 
> Negotiate timer working in the following way:
> 
> 1. The timer expires before write timeout timer, its timeout is half
> of write timeout now. It is re-queued along with write timeout timer.
> If expires, it will send NEGO_TIMEOUT message to master node(node with
> lowest node number). This message does nothing but marks a bit in a
> bitmap recording which nodes are negotiating timeout on master node.
> 
> 2. If storage down, nodes will send this message to master node, then
> when master node finds its bitmap including all online nodes, it sends
> NEGO_APPROVL message to all nodes one by one, this message will re-queue
> write timeout timer and negotiate timer.
> For any node doesn't receive this message or meets some issue when
> handling this message, it will be fenced.
> If storage up at any time, o2hb_thread will run and re-queue all the
> timer, nothing will be affected by these two steps.
> 
> Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
> Reviewed-by: Ryan Ding <ryan.ding@oracle.com>
> ---
>  fs/ocfs2/cluster/heartbeat.c |   52 ++++++++++++++++++++++++++++++++++++++----
>  1 file changed, 48 insertions(+), 4 deletions(-)
> 
> diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
> index a3cc6d2fc896..b601ee95de50 100644
> --- a/fs/ocfs2/cluster/heartbeat.c
> +++ b/fs/ocfs2/cluster/heartbeat.c
> @@ -272,6 +272,10 @@ struct o2hb_region {
>  	struct delayed_work	hr_write_timeout_work;
>  	unsigned long		hr_last_timeout_start;
>  
> +	/* negotiate timer, used to negotiate extending hb timeout. */
> +	struct delayed_work	hr_nego_timeout_work;
> +	unsigned long		hr_nego_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
> +
>  	/* Used during o2hb_check_slot to hold a copy of the block
>  	 * being checked because we temporarily have to zero out the
>  	 * crc field. */
> @@ -320,7 +324,7 @@ static void o2hb_write_timeout(struct work_struct *work)
>  	o2quo_disk_timeout();
>  }
>  
> -static void o2hb_arm_write_timeout(struct o2hb_region *reg)
> +static void o2hb_arm_timeout(struct o2hb_region *reg)
>  {
>  	/* Arm writeout only after thread reaches steady state */
>  	if (atomic_read(&reg->hr_steady_iterations) != 0)
> @@ -338,11 +342,50 @@ static void o2hb_arm_write_timeout(struct o2hb_region *reg)
>  	reg->hr_last_timeout_start = jiffies;
>  	schedule_delayed_work(&reg->hr_write_timeout_work,
>  			      msecs_to_jiffies(O2HB_MAX_WRITE_TIMEOUT_MS));
> +
> +	cancel_delayed_work(&reg->hr_nego_timeout_work);
> +	/* negotiate timeout must be less than write timeout. */
> +	schedule_delayed_work(&reg->hr_nego_timeout_work,
> +			      msecs_to_jiffies(O2HB_MAX_WRITE_TIMEOUT_MS)/2);
> +	memset(reg->hr_nego_node_bitmap, 0, sizeof(reg->hr_nego_node_bitmap));
>  }
>  
> -static void o2hb_disarm_write_timeout(struct o2hb_region *reg)
> +static void o2hb_disarm_timeout(struct o2hb_region *reg)
>  {
>  	cancel_delayed_work_sync(&reg->hr_write_timeout_work);
> +	cancel_delayed_work_sync(&reg->hr_nego_timeout_work);
> +}
> +
> +static void o2hb_nego_timeout(struct work_struct *work)
> +{
> +	struct o2hb_region *reg =
> +		container_of(work, struct o2hb_region,
> +			     hr_nego_timeout_work.work);
> +	unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
> +	int master_node;
> +
> +	o2hb_fill_node_map(live_node_bitmap, sizeof(live_node_bitmap));
> +	/* lowest node as master node to make negotiate decision. */
> +	master_node = find_next_bit(live_node_bitmap, O2NM_MAX_NODES, 0);
> +
> +	if (master_node == o2nm_this_node()) {
> +		set_bit(master_node, reg->hr_nego_node_bitmap);
> +		if (memcmp(reg->hr_nego_node_bitmap, live_node_bitmap,
> +				sizeof(reg->hr_nego_node_bitmap))) {
Should the access to hr_nego_node_bitmap be protected, for example,
under o2hb_live_lock?

Thanks,
Joseph

> +			/* check negotiate bitmap every second to do timeout
> +			 * approve decision.
> +			 */
> +			schedule_delayed_work(&reg->hr_nego_timeout_work,
> +				msecs_to_jiffies(1000));
> +
> +			return;
> +		}
> +
> +		/* approve negotiate timeout request. */
> +	} else {
> +		/* negotiate timeout with master node. */
> +	}
> +
>  }
>  
>  static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc)
> @@ -1033,7 +1076,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
>  	/* Skip disarming the timeout if own slot has stale/bad data */
>  	if (own_slot_ok) {
>  		o2hb_set_quorum_device(reg);
> -		o2hb_arm_write_timeout(reg);
> +		o2hb_arm_timeout(reg);
>  	}
>  
>  bail:
> @@ -1115,7 +1158,7 @@ static int o2hb_thread(void *data)
>  		}
>  	}
>  
> -	o2hb_disarm_write_timeout(reg);
> +	o2hb_disarm_timeout(reg);
>  
>  	/* unclean stop is only used in very bad situation */
>  	for(i = 0; !reg->hr_unclean_stop && i < reg->hr_blocks; i++)
> @@ -1762,6 +1805,7 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
>  	}
>  
>  	INIT_DELAYED_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout);
> +	INIT_DELAYED_WORK(&reg->hr_nego_timeout_work, o2hb_nego_timeout);
>  
>  	/*
>  	 * A node is considered live after it has beat LIVE_THRESHOLD
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [Ocfs2-devel] [PATCH 1/6] ocfs2: o2hb: add negotiate timer
  2016-01-20  3:13 ` [Ocfs2-devel] [PATCH 1/6] ocfs2: o2hb: add negotiate timer Junxiao Bi
@ 2016-01-21 23:42   ` Andrew Morton
  2016-01-22  3:23     ` Junxiao Bi
  2016-01-22  0:56   ` Joseph Qi
  1 sibling, 1 reply; 9+ messages in thread
From: Andrew Morton @ 2016-01-21 23:42 UTC (permalink / raw)
  To: ocfs2-devel

On Wed, 20 Jan 2016 11:13:34 +0800 Junxiao Bi <junxiao.bi@oracle.com> wrote:

> When storage down, all nodes will fence self due to write timeout.
> The negotiate timer is designed to avoid this, with it node will
> wait until storage up again.
> 
> Negotiate timer working in the following way:
> 
> 1. The timer expires before write timeout timer, its timeout is half
> of write timeout now. It is re-queued along with write timeout timer.
> If expires, it will send NEGO_TIMEOUT message to master node(node with
> lowest node number). This message does nothing but marks a bit in a
> bitmap recording which nodes are negotiating timeout on master node.
> 
> 2. If storage down, nodes will send this message to master node, then
> when master node finds its bitmap including all online nodes, it sends
> NEGO_APPROVL message to all nodes one by one, this message will re-queue
> write timeout timer and negotiate timer.
> For any node doesn't receive this message or meets some issue when
> handling this message, it will be fenced.
> If storage up at any time, o2hb_thread will run and re-queue all the
> timer, nothing will be affected by these two steps.
> 
> ...
>
> +static void o2hb_nego_timeout(struct work_struct *work)
> +{
> +	struct o2hb_region *reg =
> +		container_of(work, struct o2hb_region,
> +			     hr_nego_timeout_work.work);

It's better to just do

	struct o2hb_region *reg;

	reg = container_of(work, struct o2hb_region, hr_nego_timeout_work.work);

and avoid the weird 80-column tricks.

> +	unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];

the bitmap.h interfaces might be nicer here.  Perhaps.  A little bit.

> +	int master_node;
> +
> +	o2hb_fill_node_map(live_node_bitmap, sizeof(live_node_bitmap));
> +	/* lowest node as master node to make negotiate decision. */
> +	master_node = find_next_bit(live_node_bitmap, O2NM_MAX_NODES, 0);
> +
> +	if (master_node == o2nm_this_node()) {
> +		set_bit(master_node, reg->hr_nego_node_bitmap);
> +		if (memcmp(reg->hr_nego_node_bitmap, live_node_bitmap,
> +				sizeof(reg->hr_nego_node_bitmap))) {
> +			/* check negotiate bitmap every second to do timeout
> +			 * approve decision.
> +			 */
> +			schedule_delayed_work(&reg->hr_nego_timeout_work,
> +				msecs_to_jiffies(1000));

One second is long enough to unmount the fs (and to run `rmmod
ocfs2'!).  Is there anything preventing the work from triggering in
these situations?

> +
> +			return;
> +		}
> +
> +		/* approve negotiate timeout request. */
> +	} else {
> +		/* negotiate timeout with master node. */
> +	}
> +
>  }

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [Ocfs2-devel] [PATCH 1/6] ocfs2: o2hb: add negotiate timer
  2016-01-20  3:13 [Ocfs2-devel] ocfs2: o2hb: not fence self if storage down Junxiao Bi
@ 2016-01-20  3:13 ` Junxiao Bi
  2016-01-21 23:42   ` Andrew Morton
  2016-01-22  0:56   ` Joseph Qi
  0 siblings, 2 replies; 9+ messages in thread
From: Junxiao Bi @ 2016-01-20  3:13 UTC (permalink / raw)
  To: ocfs2-devel

When storage down, all nodes will fence self due to write timeout.
The negotiate timer is designed to avoid this, with it node will
wait until storage up again.

Negotiate timer working in the following way:

1. The timer expires before write timeout timer, its timeout is half
of write timeout now. It is re-queued along with write timeout timer.
If expires, it will send NEGO_TIMEOUT message to master node(node with
lowest node number). This message does nothing but marks a bit in a
bitmap recording which nodes are negotiating timeout on master node.

2. If storage down, nodes will send this message to master node, then
when master node finds its bitmap including all online nodes, it sends
NEGO_APPROVL message to all nodes one by one, this message will re-queue
write timeout timer and negotiate timer.
For any node doesn't receive this message or meets some issue when
handling this message, it will be fenced.
If storage up at any time, o2hb_thread will run and re-queue all the
timer, nothing will be affected by these two steps.

Signed-off-by: Junxiao Bi <junxiao.bi@oracle.com>
Reviewed-by: Ryan Ding <ryan.ding@oracle.com>
---
 fs/ocfs2/cluster/heartbeat.c |   52 ++++++++++++++++++++++++++++++++++++++----
 1 file changed, 48 insertions(+), 4 deletions(-)

diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index a3cc6d2fc896..b601ee95de50 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -272,6 +272,10 @@ struct o2hb_region {
 	struct delayed_work	hr_write_timeout_work;
 	unsigned long		hr_last_timeout_start;
 
+	/* negotiate timer, used to negotiate extending hb timeout. */
+	struct delayed_work	hr_nego_timeout_work;
+	unsigned long		hr_nego_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+
 	/* Used during o2hb_check_slot to hold a copy of the block
 	 * being checked because we temporarily have to zero out the
 	 * crc field. */
@@ -320,7 +324,7 @@ static void o2hb_write_timeout(struct work_struct *work)
 	o2quo_disk_timeout();
 }
 
-static void o2hb_arm_write_timeout(struct o2hb_region *reg)
+static void o2hb_arm_timeout(struct o2hb_region *reg)
 {
 	/* Arm writeout only after thread reaches steady state */
 	if (atomic_read(&reg->hr_steady_iterations) != 0)
@@ -338,11 +342,50 @@ static void o2hb_arm_write_timeout(struct o2hb_region *reg)
 	reg->hr_last_timeout_start = jiffies;
 	schedule_delayed_work(&reg->hr_write_timeout_work,
 			      msecs_to_jiffies(O2HB_MAX_WRITE_TIMEOUT_MS));
+
+	cancel_delayed_work(&reg->hr_nego_timeout_work);
+	/* negotiate timeout must be less than write timeout. */
+	schedule_delayed_work(&reg->hr_nego_timeout_work,
+			      msecs_to_jiffies(O2HB_MAX_WRITE_TIMEOUT_MS)/2);
+	memset(reg->hr_nego_node_bitmap, 0, sizeof(reg->hr_nego_node_bitmap));
 }
 
-static void o2hb_disarm_write_timeout(struct o2hb_region *reg)
+static void o2hb_disarm_timeout(struct o2hb_region *reg)
 {
 	cancel_delayed_work_sync(&reg->hr_write_timeout_work);
+	cancel_delayed_work_sync(&reg->hr_nego_timeout_work);
+}
+
+static void o2hb_nego_timeout(struct work_struct *work)
+{
+	struct o2hb_region *reg =
+		container_of(work, struct o2hb_region,
+			     hr_nego_timeout_work.work);
+	unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+	int master_node;
+
+	o2hb_fill_node_map(live_node_bitmap, sizeof(live_node_bitmap));
+	/* lowest node as master node to make negotiate decision. */
+	master_node = find_next_bit(live_node_bitmap, O2NM_MAX_NODES, 0);
+
+	if (master_node == o2nm_this_node()) {
+		set_bit(master_node, reg->hr_nego_node_bitmap);
+		if (memcmp(reg->hr_nego_node_bitmap, live_node_bitmap,
+				sizeof(reg->hr_nego_node_bitmap))) {
+			/* check negotiate bitmap every second to do timeout
+			 * approve decision.
+			 */
+			schedule_delayed_work(&reg->hr_nego_timeout_work,
+				msecs_to_jiffies(1000));
+
+			return;
+		}
+
+		/* approve negotiate timeout request. */
+	} else {
+		/* negotiate timeout with master node. */
+	}
+
 }
 
 static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc)
@@ -1033,7 +1076,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
 	/* Skip disarming the timeout if own slot has stale/bad data */
 	if (own_slot_ok) {
 		o2hb_set_quorum_device(reg);
-		o2hb_arm_write_timeout(reg);
+		o2hb_arm_timeout(reg);
 	}
 
 bail:
@@ -1115,7 +1158,7 @@ static int o2hb_thread(void *data)
 		}
 	}
 
-	o2hb_disarm_write_timeout(reg);
+	o2hb_disarm_timeout(reg);
 
 	/* unclean stop is only used in very bad situation */
 	for(i = 0; !reg->hr_unclean_stop && i < reg->hr_blocks; i++)
@@ -1762,6 +1805,7 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
 	}
 
 	INIT_DELAYED_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout);
+	INIT_DELAYED_WORK(&reg->hr_nego_timeout_work, o2hb_nego_timeout);
 
 	/*
 	 * A node is considered live after it has beat LIVE_THRESHOLD
-- 
1.7.9.5

^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2016-05-25 23:26 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-05-23 21:50 [Ocfs2-devel] [patch 1/6] ocfs2: o2hb: add negotiate timer akpm at linux-foundation.org
2016-05-24 22:35 ` Mark Fasheh
2016-05-25  1:44   ` Junxiao Bi
2016-05-25 23:26     ` Mark Fasheh
  -- strict thread matches above, loose matches on Subject: below --
2016-01-20  3:13 [Ocfs2-devel] ocfs2: o2hb: not fence self if storage down Junxiao Bi
2016-01-20  3:13 ` [Ocfs2-devel] [PATCH 1/6] ocfs2: o2hb: add negotiate timer Junxiao Bi
2016-01-21 23:42   ` Andrew Morton
2016-01-22  3:23     ` Junxiao Bi
2016-01-22  0:56   ` Joseph Qi
2016-01-22  3:19     ` Junxiao Bi

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.