ceph-devel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3] ceph: try to reconnect to the export targets
@ 2021-08-18  1:31 xiubli
  2021-08-18 12:31 ` Jeff Layton
  0 siblings, 1 reply; 3+ messages in thread
From: xiubli @ 2021-08-18  1:31 UTC (permalink / raw)
  To: jlayton; +Cc: idryomov, pdonnell, ceph-devel, Xiubo Li

From: Xiubo Li <xiubli@redhat.com>

In case the export MDS is crashed just after the EImportStart journal
is flushed, so when a standby MDS takes over it and when replaying
the EImportStart journal the MDS will wait the client to reconnect,
but the client may never register/open the sessions yet.

We will try to reconnect that MDSes if they're in the export targets
and in RECONNECT state.

Signed-off-by: Xiubo Li <xiubli@redhat.com>
---

V3:
- switch to bitmap and on the stack
- put the ceph_put_mds_session() out of the mdsc->mutex lock scope


 fs/ceph/mds_client.c | 55 +++++++++++++++++++++++++++++++++++++++++++-
 fs/ceph/mdsmap.c     | 10 +++++---
 2 files changed, 61 insertions(+), 4 deletions(-)

diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index e49dbeb6c06f..c2fca06b09a0 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -11,6 +11,7 @@
 #include <linux/ratelimit.h>
 #include <linux/bits.h>
 #include <linux/ktime.h>
+#include <linux/bitmap.h>
 
 #include "super.h"
 #include "mds_client.h"
@@ -4197,13 +4198,19 @@ static void check_new_map(struct ceph_mds_client *mdsc,
 			  struct ceph_mdsmap *newmap,
 			  struct ceph_mdsmap *oldmap)
 {
-	int i;
+	int i, err;
 	int oldstate, newstate;
 	struct ceph_mds_session *s;
+	unsigned long targets[DIV_ROUND_UP(CEPH_MAX_MDS, sizeof(unsigned long))] = {0};
 
 	dout("check_new_map new %u old %u\n",
 	     newmap->m_epoch, oldmap->m_epoch);
 
+	if (newmap->m_info) {
+		for (i = 0; i < newmap->m_info->num_export_targets; i++)
+			set_bit(newmap->m_info->export_targets[i], targets);
+	}
+
 	for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
 		if (!mdsc->sessions[i])
 			continue;
@@ -4257,6 +4264,7 @@ static void check_new_map(struct ceph_mds_client *mdsc,
 		if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
 		    newstate >= CEPH_MDS_STATE_RECONNECT) {
 			mutex_unlock(&mdsc->mutex);
+			clear_bit(i, targets);
 			send_mds_reconnect(mdsc, s);
 			mutex_lock(&mdsc->mutex);
 		}
@@ -4279,6 +4287,51 @@ static void check_new_map(struct ceph_mds_client *mdsc,
 		}
 	}
 
+	/*
+	 * Only open and reconnect sessions that don't exist yet.
+	 */
+	for (i = 0; i < newmap->possible_max_rank; i++) {
+		/*
+		 * In case the import MDS is crashed just after
+		 * the EImportStart journal is flushed, so when
+		 * a standby MDS takes over it and is replaying
+		 * the EImportStart journal the new MDS daemon
+		 * will wait the client to reconnect it, but the
+		 * client may never register/open the session yet.
+		 *
+		 * Will try to reconnect that MDS daemon if the
+		 * rank number is in the export targets array and
+		 * is the up:reconnect state.
+		 */
+		newstate = ceph_mdsmap_get_state(newmap, i);
+		if (!test_bit(i, targets) || newstate != CEPH_MDS_STATE_RECONNECT)
+			continue;
+
+		/*
+		 * The session maybe registered and opened by some
+		 * requests which were choosing random MDSes during
+		 * the mdsc->mutex's unlock/lock gap below in rare
+		 * case. But the related MDS daemon will just queue
+		 * that requests and be still waiting for the client's
+		 * reconnection request in up:reconnect state.
+		 */
+		s = __ceph_lookup_mds_session(mdsc, i);
+		if (likely(!s)) {
+			s = __open_export_target_session(mdsc, i);
+			if (IS_ERR(s)) {
+				err = PTR_ERR(s);
+				pr_err("failed to open export target session, err %d\n",
+				       err);
+				continue;
+			}
+		}
+		dout("send reconnect to export target mds.%d\n", i);
+		mutex_unlock(&mdsc->mutex);
+		send_mds_reconnect(mdsc, s);
+		ceph_put_mds_session(s);
+		mutex_lock(&mdsc->mutex);
+	}
+
 	for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
 		s = mdsc->sessions[i];
 		if (!s)
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 3c444b9cb17b..d995cb02d30c 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -122,6 +122,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
 	int err;
 	u8 mdsmap_v;
 	u16 mdsmap_ev;
+	u32 target;
 
 	m = kzalloc(sizeof(*m), GFP_NOFS);
 	if (!m)
@@ -260,9 +261,12 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
 						       sizeof(u32), GFP_NOFS);
 			if (!info->export_targets)
 				goto nomem;
-			for (j = 0; j < num_export_targets; j++)
-				info->export_targets[j] =
-				       ceph_decode_32(&pexport_targets);
+			for (j = 0; j < num_export_targets; j++) {
+				target = ceph_decode_32(&pexport_targets);
+				if (target >= m->possible_max_rank)
+					goto corrupt;
+				info->export_targets[j] = target;
+			}
 		} else {
 			info->export_targets = NULL;
 		}
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH v3] ceph: try to reconnect to the export targets
  2021-08-18  1:31 [PATCH v3] ceph: try to reconnect to the export targets xiubli
@ 2021-08-18 12:31 ` Jeff Layton
  2021-08-18 12:53   ` Xiubo Li
  0 siblings, 1 reply; 3+ messages in thread
From: Jeff Layton @ 2021-08-18 12:31 UTC (permalink / raw)
  To: xiubli; +Cc: idryomov, pdonnell, ceph-devel

On Wed, 2021-08-18 at 09:31 +0800, xiubli@redhat.com wrote:
> From: Xiubo Li <xiubli@redhat.com>
> 
> In case the export MDS is crashed just after the EImportStart journal
> is flushed, so when a standby MDS takes over it and when replaying
> the EImportStart journal the MDS will wait the client to reconnect,
> but the client may never register/open the sessions yet.
> 
> We will try to reconnect that MDSes if they're in the export targets
> and in RECONNECT state.
> 
> Signed-off-by: Xiubo Li <xiubli@redhat.com>
> ---
> 
> V3:
> - switch to bitmap and on the stack
> - put the ceph_put_mds_session() out of the mdsc->mutex lock scope
> 
> 
>  fs/ceph/mds_client.c | 55 +++++++++++++++++++++++++++++++++++++++++++-
>  fs/ceph/mdsmap.c     | 10 +++++---
>  2 files changed, 61 insertions(+), 4 deletions(-)
> 
> diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
> index e49dbeb6c06f..c2fca06b09a0 100644
> --- a/fs/ceph/mds_client.c
> +++ b/fs/ceph/mds_client.c
> @@ -11,6 +11,7 @@
>  #include <linux/ratelimit.h>
>  #include <linux/bits.h>
>  #include <linux/ktime.h>
> +#include <linux/bitmap.h>
>  
>  #include "super.h"
>  #include "mds_client.h"
> @@ -4197,13 +4198,19 @@ static void check_new_map(struct ceph_mds_client *mdsc,
>  			  struct ceph_mdsmap *newmap,
>  			  struct ceph_mdsmap *oldmap)
>  {
> -	int i;
> +	int i, err;
>  	int oldstate, newstate;
>  	struct ceph_mds_session *s;
> +	unsigned long targets[DIV_ROUND_UP(CEPH_MAX_MDS, sizeof(unsigned long))] = {0};
>  
>  	dout("check_new_map new %u old %u\n",
>  	     newmap->m_epoch, oldmap->m_epoch);
>  
> +	if (newmap->m_info) {
> +		for (i = 0; i < newmap->m_info->num_export_targets; i++)
> +			set_bit(newmap->m_info->export_targets[i], targets);
> +	}
> +

I wasn't aware you could exceed the size of the first unsigned long in
the array with the atomic bitops handlers. Looking at the helpers
themselves though, I don't see why this wouldn't work. Ok!

>  	for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
>  		if (!mdsc->sessions[i])
>  			continue;
> @@ -4257,6 +4264,7 @@ static void check_new_map(struct ceph_mds_client *mdsc,
>  		if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
>  		    newstate >= CEPH_MDS_STATE_RECONNECT) {
>  			mutex_unlock(&mdsc->mutex);
> +			clear_bit(i, targets);
>  			send_mds_reconnect(mdsc, s);
>  			mutex_lock(&mdsc->mutex);
>  		}
> @@ -4279,6 +4287,51 @@ static void check_new_map(struct ceph_mds_client *mdsc,
>  		}
>  	}
>  
> +	/*
> +	 * Only open and reconnect sessions that don't exist yet.
> +	 */
> +	for (i = 0; i < newmap->possible_max_rank; i++) {
> +		/*
> +		 * In case the import MDS is crashed just after
> +		 * the EImportStart journal is flushed, so when
> +		 * a standby MDS takes over it and is replaying
> +		 * the EImportStart journal the new MDS daemon
> +		 * will wait the client to reconnect it, but the
> +		 * client may never register/open the session yet.
> +		 *
> +		 * Will try to reconnect that MDS daemon if the
> +		 * rank number is in the export targets array and
> +		 * is the up:reconnect state.
> +		 */
> +		newstate = ceph_mdsmap_get_state(newmap, i);
> +		if (!test_bit(i, targets) || newstate != CEPH_MDS_STATE_RECONNECT)
> +			continue;
> +
> +		/*
> +		 * The session maybe registered and opened by some
> +		 * requests which were choosing random MDSes during
> +		 * the mdsc->mutex's unlock/lock gap below in rare
> +		 * case. But the related MDS daemon will just queue
> +		 * that requests and be still waiting for the client's
> +		 * reconnection request in up:reconnect state.
> +		 */
> +		s = __ceph_lookup_mds_session(mdsc, i);
> +		if (likely(!s)) {
> +			s = __open_export_target_session(mdsc, i);
> +			if (IS_ERR(s)) {
> +				err = PTR_ERR(s);
> +				pr_err("failed to open export target session, err %d\n",
> +				       err);
> +				continue;
> +			}
> +		}
> +		dout("send reconnect to export target mds.%d\n", i);
> +		mutex_unlock(&mdsc->mutex);
> +		send_mds_reconnect(mdsc, s);
> +		ceph_put_mds_session(s);
> +		mutex_lock(&mdsc->mutex);
> +	}
> +
>  	for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
>  		s = mdsc->sessions[i];
>  		if (!s)
> diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
> index 3c444b9cb17b..d995cb02d30c 100644
> --- a/fs/ceph/mdsmap.c
> +++ b/fs/ceph/mdsmap.c
> @@ -122,6 +122,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
>  	int err;
>  	u8 mdsmap_v;
>  	u16 mdsmap_ev;
> +	u32 target;
>  
>  	m = kzalloc(sizeof(*m), GFP_NOFS);
>  	if (!m)
> @@ -260,9 +261,12 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
>  						       sizeof(u32), GFP_NOFS);
>  			if (!info->export_targets)
>  				goto nomem;
> -			for (j = 0; j < num_export_targets; j++)
> -				info->export_targets[j] =
> -				       ceph_decode_32(&pexport_targets);
> +			for (j = 0; j < num_export_targets; j++) {
> +				target = ceph_decode_32(&pexport_targets);
> +				if (target >= m->possible_max_rank)
> +					goto corrupt;
> +				info->export_targets[j] = target;
> +			}
>  		} else {
>  			info->export_targets = NULL;
>  		}

Looks good. Merged into testing. I also reworded the changelog for
(hopefully) better clarity. Xiubo, let me know if I didn't get the
description right.

Thanks!
-- 
Jeff Layton <jlayton@kernel.org>


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH v3] ceph: try to reconnect to the export targets
  2021-08-18 12:31 ` Jeff Layton
@ 2021-08-18 12:53   ` Xiubo Li
  0 siblings, 0 replies; 3+ messages in thread
From: Xiubo Li @ 2021-08-18 12:53 UTC (permalink / raw)
  To: Jeff Layton; +Cc: idryomov, pdonnell, ceph-devel


On 8/18/21 8:31 PM, Jeff Layton wrote:
> On Wed, 2021-08-18 at 09:31 +0800, xiubli@redhat.com wrote:
>> From: Xiubo Li <xiubli@redhat.com>
>>
>> In case the export MDS is crashed just after the EImportStart journal
>> is flushed, so when a standby MDS takes over it and when replaying
>> the EImportStart journal the MDS will wait the client to reconnect,
>> but the client may never register/open the sessions yet.
>>
>> We will try to reconnect that MDSes if they're in the export targets
>> and in RECONNECT state.
>>
>> Signed-off-by: Xiubo Li <xiubli@redhat.com>
>> ---
>>
>> V3:
>> - switch to bitmap and on the stack
>> - put the ceph_put_mds_session() out of the mdsc->mutex lock scope
>>
>>
>>   fs/ceph/mds_client.c | 55 +++++++++++++++++++++++++++++++++++++++++++-
>>   fs/ceph/mdsmap.c     | 10 +++++---
>>   2 files changed, 61 insertions(+), 4 deletions(-)
>>
>> diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
>> index e49dbeb6c06f..c2fca06b09a0 100644
>> --- a/fs/ceph/mds_client.c
>> +++ b/fs/ceph/mds_client.c
>> @@ -11,6 +11,7 @@
>>   #include <linux/ratelimit.h>
>>   #include <linux/bits.h>
>>   #include <linux/ktime.h>
>> +#include <linux/bitmap.h>
>>   
>>   #include "super.h"
>>   #include "mds_client.h"
>> @@ -4197,13 +4198,19 @@ static void check_new_map(struct ceph_mds_client *mdsc,
>>   			  struct ceph_mdsmap *newmap,
>>   			  struct ceph_mdsmap *oldmap)
>>   {
>> -	int i;
>> +	int i, err;
>>   	int oldstate, newstate;
>>   	struct ceph_mds_session *s;
>> +	unsigned long targets[DIV_ROUND_UP(CEPH_MAX_MDS, sizeof(unsigned long))] = {0};
>>   
>>   	dout("check_new_map new %u old %u\n",
>>   	     newmap->m_epoch, oldmap->m_epoch);
>>   
>> +	if (newmap->m_info) {
>> +		for (i = 0; i < newmap->m_info->num_export_targets; i++)
>> +			set_bit(newmap->m_info->export_targets[i], targets);
>> +	}
>> +
> I wasn't aware you could exceed the size of the first unsigned long in
> the array with the atomic bitops handlers. Looking at the helpers
> themselves though, I don't see why this wouldn't work. Ok!
>
>>   	for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
>>   		if (!mdsc->sessions[i])
>>   			continue;
>> @@ -4257,6 +4264,7 @@ static void check_new_map(struct ceph_mds_client *mdsc,
>>   		if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
>>   		    newstate >= CEPH_MDS_STATE_RECONNECT) {
>>   			mutex_unlock(&mdsc->mutex);
>> +			clear_bit(i, targets);
>>   			send_mds_reconnect(mdsc, s);
>>   			mutex_lock(&mdsc->mutex);
>>   		}
>> @@ -4279,6 +4287,51 @@ static void check_new_map(struct ceph_mds_client *mdsc,
>>   		}
>>   	}
>>   
>> +	/*
>> +	 * Only open and reconnect sessions that don't exist yet.
>> +	 */
>> +	for (i = 0; i < newmap->possible_max_rank; i++) {
>> +		/*
>> +		 * In case the import MDS is crashed just after
>> +		 * the EImportStart journal is flushed, so when
>> +		 * a standby MDS takes over it and is replaying
>> +		 * the EImportStart journal the new MDS daemon
>> +		 * will wait the client to reconnect it, but the
>> +		 * client may never register/open the session yet.
>> +		 *
>> +		 * Will try to reconnect that MDS daemon if the
>> +		 * rank number is in the export targets array and
>> +		 * is the up:reconnect state.
>> +		 */
>> +		newstate = ceph_mdsmap_get_state(newmap, i);
>> +		if (!test_bit(i, targets) || newstate != CEPH_MDS_STATE_RECONNECT)
>> +			continue;
>> +
>> +		/*
>> +		 * The session maybe registered and opened by some
>> +		 * requests which were choosing random MDSes during
>> +		 * the mdsc->mutex's unlock/lock gap below in rare
>> +		 * case. But the related MDS daemon will just queue
>> +		 * that requests and be still waiting for the client's
>> +		 * reconnection request in up:reconnect state.
>> +		 */
>> +		s = __ceph_lookup_mds_session(mdsc, i);
>> +		if (likely(!s)) {
>> +			s = __open_export_target_session(mdsc, i);
>> +			if (IS_ERR(s)) {
>> +				err = PTR_ERR(s);
>> +				pr_err("failed to open export target session, err %d\n",
>> +				       err);
>> +				continue;
>> +			}
>> +		}
>> +		dout("send reconnect to export target mds.%d\n", i);
>> +		mutex_unlock(&mdsc->mutex);
>> +		send_mds_reconnect(mdsc, s);
>> +		ceph_put_mds_session(s);
>> +		mutex_lock(&mdsc->mutex);
>> +	}
>> +
>>   	for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
>>   		s = mdsc->sessions[i];
>>   		if (!s)
>> diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
>> index 3c444b9cb17b..d995cb02d30c 100644
>> --- a/fs/ceph/mdsmap.c
>> +++ b/fs/ceph/mdsmap.c
>> @@ -122,6 +122,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
>>   	int err;
>>   	u8 mdsmap_v;
>>   	u16 mdsmap_ev;
>> +	u32 target;
>>   
>>   	m = kzalloc(sizeof(*m), GFP_NOFS);
>>   	if (!m)
>> @@ -260,9 +261,12 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
>>   						       sizeof(u32), GFP_NOFS);
>>   			if (!info->export_targets)
>>   				goto nomem;
>> -			for (j = 0; j < num_export_targets; j++)
>> -				info->export_targets[j] =
>> -				       ceph_decode_32(&pexport_targets);
>> +			for (j = 0; j < num_export_targets; j++) {
>> +				target = ceph_decode_32(&pexport_targets);
>> +				if (target >= m->possible_max_rank)
>> +					goto corrupt;
>> +				info->export_targets[j] = target;
>> +			}
>>   		} else {
>>   			info->export_targets = NULL;
>>   		}
> Looks good. Merged into testing. I also reworded the changelog for
> (hopefully) better clarity. Xiubo, let me know if I didn't get the
> description right.

Your changes look much better. Thanks Jeff.



>
> Thanks!


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2021-08-18 12:53 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-18  1:31 [PATCH v3] ceph: try to reconnect to the export targets xiubli
2021-08-18 12:31 ` Jeff Layton
2021-08-18 12:53   ` Xiubo Li

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).