ceph-devel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2] ceph: try to reconnect to the export targets
@ 2021-08-17  3:44 xiubli
  2021-08-17 16:18 ` Jeff Layton
  0 siblings, 1 reply; 3+ messages in thread
From: xiubli @ 2021-08-17  3:44 UTC (permalink / raw)
  To: jlayton; +Cc: idryomov, pdonnell, ceph-devel, Xiubo Li

From: Xiubo Li <xiubli@redhat.com>

In case the export MDS is crashed just after the EImportStart journal
is flushed, so when a standby MDS takes over it and when replaying
the EImportStart journal the MDS will wait the client to reconnect,
but the client may never register/open the sessions yet.

We will try to reconnect that MDSes if they're in the export targets
and in RECONNECT state.

Signed-off-by: Xiubo Li <xiubli@redhat.com>
---

- check the export target rank when decoding the mdsmap instead of
BUG_ON
- fix issue that the sessions have been opened during the mutex's
unlock/lock gap


 fs/ceph/mds_client.c | 63 +++++++++++++++++++++++++++++++++++++++++++-
 fs/ceph/mdsmap.c     | 10 ++++---
 2 files changed, 69 insertions(+), 4 deletions(-)

diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index e49dbeb6c06f..1e013fb09d73 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -4197,13 +4197,22 @@ static void check_new_map(struct ceph_mds_client *mdsc,
 			  struct ceph_mdsmap *newmap,
 			  struct ceph_mdsmap *oldmap)
 {
-	int i;
+	int i, err;
+	int *export_targets;
 	int oldstate, newstate;
 	struct ceph_mds_session *s;
+	struct ceph_mds_info *m_info;
 
 	dout("check_new_map new %u old %u\n",
 	     newmap->m_epoch, oldmap->m_epoch);
 
+	m_info = newmap->m_info;
+	export_targets = kcalloc(newmap->possible_max_rank, sizeof(int), GFP_NOFS);
+	if (export_targets && m_info) {
+		for (i = 0; i < m_info->num_export_targets; i++)
+			export_targets[m_info->export_targets[i]] = 1;
+	}
+
 	for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
 		if (!mdsc->sessions[i])
 			continue;
@@ -4257,6 +4266,8 @@ static void check_new_map(struct ceph_mds_client *mdsc,
 		if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
 		    newstate >= CEPH_MDS_STATE_RECONNECT) {
 			mutex_unlock(&mdsc->mutex);
+			if (export_targets)
+				export_targets[i] = 0;
 			send_mds_reconnect(mdsc, s);
 			mutex_lock(&mdsc->mutex);
 		}
@@ -4279,6 +4290,54 @@ static void check_new_map(struct ceph_mds_client *mdsc,
 		}
 	}
 
+	/*
+	 * Only open and reconnect sessions that don't exist yet.
+	 */
+	for (i = 0; i < newmap->possible_max_rank; i++) {
+		if (unlikely(!export_targets))
+			break;
+
+		/*
+		 * In case the import MDS is crashed just after
+		 * the EImportStart journal is flushed, so when
+		 * a standby MDS takes over it and is replaying
+		 * the EImportStart journal the new MDS daemon
+		 * will wait the client to reconnect it, but the
+		 * client may never register/open the session yet.
+		 *
+		 * Will try to reconnect that MDS daemon if the
+		 * rank number is in the export_targets array and
+		 * is the up:reconnect state.
+		 */
+		newstate = ceph_mdsmap_get_state(newmap, i);
+		if (!export_targets[i] || newstate != CEPH_MDS_STATE_RECONNECT)
+			continue;
+
+		/*
+		 * The session maybe registered and opened by some
+		 * requests which were choosing random MDSes during
+		 * the mdsc->mutex's unlock/lock gap below in rare
+		 * case. But the related MDS daemon will just queue
+		 * that requests and be still waiting for the client's
+		 * reconnection request in up:reconnect state.
+		 */
+		s = __ceph_lookup_mds_session(mdsc, i);
+		if (likely(!s)) {
+			s = __open_export_target_session(mdsc, i);
+			if (IS_ERR(s)) {
+				err = PTR_ERR(s);
+				pr_err("failed to open export target session, err %d\n",
+				       err);
+				continue;
+			}
+		}
+		dout("send reconnect to export target mds.%d\n", i);
+		mutex_unlock(&mdsc->mutex);
+		send_mds_reconnect(mdsc, s);
+		mutex_lock(&mdsc->mutex);
+		ceph_put_mds_session(s);
+	}
+
 	for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
 		s = mdsc->sessions[i];
 		if (!s)
@@ -4293,6 +4352,8 @@ static void check_new_map(struct ceph_mds_client *mdsc,
 			__open_export_target_sessions(mdsc, s);
 		}
 	}
+
+	kfree(export_targets);
 }
 
 
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 3c444b9cb17b..d995cb02d30c 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -122,6 +122,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
 	int err;
 	u8 mdsmap_v;
 	u16 mdsmap_ev;
+	u32 target;
 
 	m = kzalloc(sizeof(*m), GFP_NOFS);
 	if (!m)
@@ -260,9 +261,12 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
 						       sizeof(u32), GFP_NOFS);
 			if (!info->export_targets)
 				goto nomem;
-			for (j = 0; j < num_export_targets; j++)
-				info->export_targets[j] =
-				       ceph_decode_32(&pexport_targets);
+			for (j = 0; j < num_export_targets; j++) {
+				target = ceph_decode_32(&pexport_targets);
+				if (target >= m->possible_max_rank)
+					goto corrupt;
+				info->export_targets[j] = target;
+			}
 		} else {
 			info->export_targets = NULL;
 		}
-- 
2.27.0


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH v2] ceph: try to reconnect to the export targets
  2021-08-17  3:44 [PATCH v2] ceph: try to reconnect to the export targets xiubli
@ 2021-08-17 16:18 ` Jeff Layton
  2021-08-18  1:20   ` Xiubo Li
  0 siblings, 1 reply; 3+ messages in thread
From: Jeff Layton @ 2021-08-17 16:18 UTC (permalink / raw)
  To: xiubli; +Cc: idryomov, pdonnell, ceph-devel

On Tue, 2021-08-17 at 11:44 +0800, xiubli@redhat.com wrote:
> From: Xiubo Li <xiubli@redhat.com>
> 
> In case the export MDS is crashed just after the EImportStart journal
> is flushed, so when a standby MDS takes over it and when replaying
> the EImportStart journal the MDS will wait the client to reconnect,
> but the client may never register/open the sessions yet.
> 
> We will try to reconnect that MDSes if they're in the export targets
> and in RECONNECT state.
> 
> Signed-off-by: Xiubo Li <xiubli@redhat.com>
> ---
> 
> - check the export target rank when decoding the mdsmap instead of
> BUG_ON
> - fix issue that the sessions have been opened during the mutex's
> unlock/lock gap
> 
> 
>  fs/ceph/mds_client.c | 63 +++++++++++++++++++++++++++++++++++++++++++-
>  fs/ceph/mdsmap.c     | 10 ++++---
>  2 files changed, 69 insertions(+), 4 deletions(-)
> 
> diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
> index e49dbeb6c06f..1e013fb09d73 100644
> --- a/fs/ceph/mds_client.c
> +++ b/fs/ceph/mds_client.c
> @@ -4197,13 +4197,22 @@ static void check_new_map(struct ceph_mds_client *mdsc,
>  			  struct ceph_mdsmap *newmap,
>  			  struct ceph_mdsmap *oldmap)
>  {
> -	int i;
> +	int i, err;
> +	int *export_targets;
>  	int oldstate, newstate;
>  	struct ceph_mds_session *s;
> +	struct ceph_mds_info *m_info;
>  
>  	dout("check_new_map new %u old %u\n",
>  	     newmap->m_epoch, oldmap->m_epoch);
>  
> +	m_info = newmap->m_info;
> +	export_targets = kcalloc(newmap->possible_max_rank, sizeof(int), GFP_NOFS);

This allocation could fail under low-memory conditions, particularly
since it's GFP_NOFS. One idea would be to make this function return int
so you can just return -ENOMEM if the allocation fails.

Is there a hard max to possible_max_rank? If so and it's not that big,
then another possibility would be to just declare this array on the
stack.

Also, since this is just used as a flag, making an array of bools would
reduce the size of the allocation by a factor of 4.

> +	if (export_targets && m_info) {
> +		for (i = 0; i < m_info->num_export_targets; i++)
> +			export_targets[m_info->export_targets[i]] = 1;
> +	}
> +

If you reverse the sense of the flags then you wouldn't need to
initialize the array at all (assuming you still use kcalloc).

>  	for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
>  		if (!mdsc->sessions[i])
>  			continue;
> @@ -4257,6 +4266,8 @@ static void check_new_map(struct ceph_mds_client *mdsc,
>  		if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
>  		    newstate >= CEPH_MDS_STATE_RECONNECT) {
>  			mutex_unlock(&mdsc->mutex);
> +			if (export_targets)
> +				export_targets[i] = 0;
>  			send_mds_reconnect(mdsc, s);
>  			mutex_lock(&mdsc->mutex);
>  		}
> @@ -4279,6 +4290,54 @@ static void check_new_map(struct ceph_mds_client *mdsc,
>  		}
>  	}
>  
> +	/*
> +	 * Only open and reconnect sessions that don't exist yet.
> +	 */
> +	for (i = 0; i < newmap->possible_max_rank; i++) {
> +		if (unlikely(!export_targets))
> +			break;
> +
> +		/*
> +		 * In case the import MDS is crashed just after
> +		 * the EImportStart journal is flushed, so when
> +		 * a standby MDS takes over it and is replaying
> +		 * the EImportStart journal the new MDS daemon
> +		 * will wait the client to reconnect it, but the
> +		 * client may never register/open the session yet.
> +		 *
> +		 * Will try to reconnect that MDS daemon if the
> +		 * rank number is in the export_targets array and
> +		 * is the up:reconnect state.
> +		 */
> +		newstate = ceph_mdsmap_get_state(newmap, i);
> +		if (!export_targets[i] || newstate != CEPH_MDS_STATE_RECONNECT)
> +			continue;
> +
> +		/*
> +		 * The session maybe registered and opened by some
> +		 * requests which were choosing random MDSes during
> +		 * the mdsc->mutex's unlock/lock gap below in rare
> +		 * case. But the related MDS daemon will just queue
> +		 * that requests and be still waiting for the client's
> +		 * reconnection request in up:reconnect state.
> +		 */
> +		s = __ceph_lookup_mds_session(mdsc, i);
> +		if (likely(!s)) {
> +			s = __open_export_target_session(mdsc, i);
> +			if (IS_ERR(s)) {
> +				err = PTR_ERR(s);
> +				pr_err("failed to open export target session, err %d\n",
> +				       err);
> +				continue;
> +			}
> +		}
> +		dout("send reconnect to export target mds.%d\n", i);
> +		mutex_unlock(&mdsc->mutex);
> +		send_mds_reconnect(mdsc, s);
> +		mutex_lock(&mdsc->mutex);
> +		ceph_put_mds_session(s);

You can put the mds session before you re-take the mutex.

> +	}
> +
>  	for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
>  		s = mdsc->sessions[i];
>  		if (!s)
> @@ -4293,6 +4352,8 @@ static void check_new_map(struct ceph_mds_client *mdsc,
>  			__open_export_target_sessions(mdsc, s);
>  		}
>  	}
> +
> +	kfree(export_targets);
>  }
>  
>  
> diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
> index 3c444b9cb17b..d995cb02d30c 100644
> --- a/fs/ceph/mdsmap.c
> +++ b/fs/ceph/mdsmap.c
> @@ -122,6 +122,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
>  	int err;
>  	u8 mdsmap_v;
>  	u16 mdsmap_ev;
> +	u32 target;
>  
>  	m = kzalloc(sizeof(*m), GFP_NOFS);
>  	if (!m)
> @@ -260,9 +261,12 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
>  						       sizeof(u32), GFP_NOFS);
>  			if (!info->export_targets)
>  				goto nomem;
> -			for (j = 0; j < num_export_targets; j++)
> -				info->export_targets[j] =
> -				       ceph_decode_32(&pexport_targets);
> +			for (j = 0; j < num_export_targets; j++) {
> +				target = ceph_decode_32(&pexport_targets);
> +				if (target >= m->possible_max_rank)
> +					goto corrupt;
> +				info->export_targets[j] = target;
> +			}
>  		} else {
>  			info->export_targets = NULL;
>  		}

-- 
Jeff Layton <jlayton@kernel.org>


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH v2] ceph: try to reconnect to the export targets
  2021-08-17 16:18 ` Jeff Layton
@ 2021-08-18  1:20   ` Xiubo Li
  0 siblings, 0 replies; 3+ messages in thread
From: Xiubo Li @ 2021-08-18  1:20 UTC (permalink / raw)
  To: Jeff Layton; +Cc: idryomov, pdonnell, ceph-devel


On 8/18/21 12:18 AM, Jeff Layton wrote:
> On Tue, 2021-08-17 at 11:44 +0800, xiubli@redhat.com wrote:
>> From: Xiubo Li <xiubli@redhat.com>
>>
>> In case the export MDS is crashed just after the EImportStart journal
>> is flushed, so when a standby MDS takes over it and when replaying
>> the EImportStart journal the MDS will wait the client to reconnect,
>> but the client may never register/open the sessions yet.
>>
>> We will try to reconnect that MDSes if they're in the export targets
>> and in RECONNECT state.
>>
>> Signed-off-by: Xiubo Li <xiubli@redhat.com>
>> ---
>>
>> - check the export target rank when decoding the mdsmap instead of
>> BUG_ON
>> - fix issue that the sessions have been opened during the mutex's
>> unlock/lock gap
>>
>>
>>   fs/ceph/mds_client.c | 63 +++++++++++++++++++++++++++++++++++++++++++-
>>   fs/ceph/mdsmap.c     | 10 ++++---
>>   2 files changed, 69 insertions(+), 4 deletions(-)
>>
>> diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
>> index e49dbeb6c06f..1e013fb09d73 100644
>> --- a/fs/ceph/mds_client.c
>> +++ b/fs/ceph/mds_client.c
>> @@ -4197,13 +4197,22 @@ static void check_new_map(struct ceph_mds_client *mdsc,
>>   			  struct ceph_mdsmap *newmap,
>>   			  struct ceph_mdsmap *oldmap)
>>   {
>> -	int i;
>> +	int i, err;
>> +	int *export_targets;
>>   	int oldstate, newstate;
>>   	struct ceph_mds_session *s;
>> +	struct ceph_mds_info *m_info;
>>   
>>   	dout("check_new_map new %u old %u\n",
>>   	     newmap->m_epoch, oldmap->m_epoch);
>>   
>> +	m_info = newmap->m_info;
>> +	export_targets = kcalloc(newmap->possible_max_rank, sizeof(int), GFP_NOFS);
> This allocation could fail under low-memory conditions, particularly
> since it's GFP_NOFS. One idea would be to make this function return int
> so you can just return -ENOMEM if the allocation fails.
>
> Is there a hard max to possible_max_rank? If so and it's not that big,
> then another possibility would be to just declare this array on the
> stack.
>
> Also, since this is just used as a flag, making an array of bools would
> reduce the size of the allocation by a factor of 4.

I think the CEPH_MAX_MDS is, it's 0x100. I will try the bitmap on the stack.



>> +	if (export_targets && m_info) {
>> +		for (i = 0; i < m_info->num_export_targets; i++)
>> +			export_targets[m_info->export_targets[i]] = 1;
>> +	}
>> +
> If you reverse the sense of the flags then you wouldn't need to
> initialize the array at all (assuming you still use kcalloc).

For example the size of the export_targets array is 100, and the 
num_export_target is 1 and the m_info->export_targets[0] is 7, then we 
must clear all the other 99 flags one by one ?


>
>>   	for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
>>   		if (!mdsc->sessions[i])
>>   			continue;
>> @@ -4257,6 +4266,8 @@ static void check_new_map(struct ceph_mds_client *mdsc,
>>   		if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
>>   		    newstate >= CEPH_MDS_STATE_RECONNECT) {
>>   			mutex_unlock(&mdsc->mutex);
>> +			if (export_targets)
>> +				export_targets[i] = 0;
>>   			send_mds_reconnect(mdsc, s);
>>   			mutex_lock(&mdsc->mutex);
>>   		}
>> @@ -4279,6 +4290,54 @@ static void check_new_map(struct ceph_mds_client *mdsc,
>>   		}
>>   	}
>>   
>> +	/*
>> +	 * Only open and reconnect sessions that don't exist yet.
>> +	 */
>> +	for (i = 0; i < newmap->possible_max_rank; i++) {
>> +		if (unlikely(!export_targets))
>> +			break;
>> +
>> +		/*
>> +		 * In case the import MDS is crashed just after
>> +		 * the EImportStart journal is flushed, so when
>> +		 * a standby MDS takes over it and is replaying
>> +		 * the EImportStart journal the new MDS daemon
>> +		 * will wait the client to reconnect it, but the
>> +		 * client may never register/open the session yet.
>> +		 *
>> +		 * Will try to reconnect that MDS daemon if the
>> +		 * rank number is in the export_targets array and
>> +		 * is the up:reconnect state.
>> +		 */
>> +		newstate = ceph_mdsmap_get_state(newmap, i);
>> +		if (!export_targets[i] || newstate != CEPH_MDS_STATE_RECONNECT)
>> +			continue;
>> +
>> +		/*
>> +		 * The session maybe registered and opened by some
>> +		 * requests which were choosing random MDSes during
>> +		 * the mdsc->mutex's unlock/lock gap below in rare
>> +		 * case. But the related MDS daemon will just queue
>> +		 * that requests and be still waiting for the client's
>> +		 * reconnection request in up:reconnect state.
>> +		 */
>> +		s = __ceph_lookup_mds_session(mdsc, i);
>> +		if (likely(!s)) {
>> +			s = __open_export_target_session(mdsc, i);
>> +			if (IS_ERR(s)) {
>> +				err = PTR_ERR(s);
>> +				pr_err("failed to open export target session, err %d\n",
>> +				       err);
>> +				continue;
>> +			}
>> +		}
>> +		dout("send reconnect to export target mds.%d\n", i);
>> +		mutex_unlock(&mdsc->mutex);
>> +		send_mds_reconnect(mdsc, s);
>> +		mutex_lock(&mdsc->mutex);
>> +		ceph_put_mds_session(s);
> You can put the mds session before you re-take the mutex.

Will fix it.

Thanks



^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2021-08-18  1:21 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-17  3:44 [PATCH v2] ceph: try to reconnect to the export targets xiubli
2021-08-17 16:18 ` Jeff Layton
2021-08-18  1:20   ` Xiubo Li

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).