From: xiubli@redhat.com
To: jlayton@kernel.org
Cc: idryomov@gmail.com, pdonnell@redhat.com,
ceph-devel@vger.kernel.org, Xiubo Li <xiubli@redhat.com>
Subject: [PATCH v2] ceph: try to reconnect to the export targets
Date: Tue, 17 Aug 2021 11:44:45 +0800 [thread overview]
Message-ID: <20210817034445.405663-1-xiubli@redhat.com> (raw)
From: Xiubo Li <xiubli@redhat.com>
In case the export MDS is crashed just after the EImportStart journal
is flushed, so when a standby MDS takes over it and when replaying
the EImportStart journal the MDS will wait the client to reconnect,
but the client may never register/open the sessions yet.
We will try to reconnect that MDSes if they're in the export targets
and in RECONNECT state.
Signed-off-by: Xiubo Li <xiubli@redhat.com>
---
- check the export target rank when decoding the mdsmap instead of
BUG_ON
- fix issue that the sessions have been opened during the mutex's
unlock/lock gap
fs/ceph/mds_client.c | 63 +++++++++++++++++++++++++++++++++++++++++++-
fs/ceph/mdsmap.c | 10 ++++---
2 files changed, 69 insertions(+), 4 deletions(-)
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index e49dbeb6c06f..1e013fb09d73 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -4197,13 +4197,22 @@ static void check_new_map(struct ceph_mds_client *mdsc,
struct ceph_mdsmap *newmap,
struct ceph_mdsmap *oldmap)
{
- int i;
+ int i, err;
+ int *export_targets;
int oldstate, newstate;
struct ceph_mds_session *s;
+ struct ceph_mds_info *m_info;
dout("check_new_map new %u old %u\n",
newmap->m_epoch, oldmap->m_epoch);
+ m_info = newmap->m_info;
+ export_targets = kcalloc(newmap->possible_max_rank, sizeof(int), GFP_NOFS);
+ if (export_targets && m_info) {
+ for (i = 0; i < m_info->num_export_targets; i++)
+ export_targets[m_info->export_targets[i]] = 1;
+ }
+
for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
if (!mdsc->sessions[i])
continue;
@@ -4257,6 +4266,8 @@ static void check_new_map(struct ceph_mds_client *mdsc,
if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
newstate >= CEPH_MDS_STATE_RECONNECT) {
mutex_unlock(&mdsc->mutex);
+ if (export_targets)
+ export_targets[i] = 0;
send_mds_reconnect(mdsc, s);
mutex_lock(&mdsc->mutex);
}
@@ -4279,6 +4290,54 @@ static void check_new_map(struct ceph_mds_client *mdsc,
}
}
+ /*
+ * Only open and reconnect sessions that don't exist yet.
+ */
+ for (i = 0; i < newmap->possible_max_rank; i++) {
+ if (unlikely(!export_targets))
+ break;
+
+ /*
+ * In case the import MDS is crashed just after
+ * the EImportStart journal is flushed, so when
+ * a standby MDS takes over it and is replaying
+ * the EImportStart journal the new MDS daemon
+ * will wait the client to reconnect it, but the
+ * client may never register/open the session yet.
+ *
+ * Will try to reconnect that MDS daemon if the
+ * rank number is in the export_targets array and
+ * is the up:reconnect state.
+ */
+ newstate = ceph_mdsmap_get_state(newmap, i);
+ if (!export_targets[i] || newstate != CEPH_MDS_STATE_RECONNECT)
+ continue;
+
+ /*
+ * The session maybe registered and opened by some
+ * requests which were choosing random MDSes during
+ * the mdsc->mutex's unlock/lock gap below in rare
+ * case. But the related MDS daemon will just queue
+ * that requests and be still waiting for the client's
+ * reconnection request in up:reconnect state.
+ */
+ s = __ceph_lookup_mds_session(mdsc, i);
+ if (likely(!s)) {
+ s = __open_export_target_session(mdsc, i);
+ if (IS_ERR(s)) {
+ err = PTR_ERR(s);
+ pr_err("failed to open export target session, err %d\n",
+ err);
+ continue;
+ }
+ }
+ dout("send reconnect to export target mds.%d\n", i);
+ mutex_unlock(&mdsc->mutex);
+ send_mds_reconnect(mdsc, s);
+ mutex_lock(&mdsc->mutex);
+ ceph_put_mds_session(s);
+ }
+
for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
s = mdsc->sessions[i];
if (!s)
@@ -4293,6 +4352,8 @@ static void check_new_map(struct ceph_mds_client *mdsc,
__open_export_target_sessions(mdsc, s);
}
}
+
+ kfree(export_targets);
}
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 3c444b9cb17b..d995cb02d30c 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -122,6 +122,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
int err;
u8 mdsmap_v;
u16 mdsmap_ev;
+ u32 target;
m = kzalloc(sizeof(*m), GFP_NOFS);
if (!m)
@@ -260,9 +261,12 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
sizeof(u32), GFP_NOFS);
if (!info->export_targets)
goto nomem;
- for (j = 0; j < num_export_targets; j++)
- info->export_targets[j] =
- ceph_decode_32(&pexport_targets);
+ for (j = 0; j < num_export_targets; j++) {
+ target = ceph_decode_32(&pexport_targets);
+ if (target >= m->possible_max_rank)
+ goto corrupt;
+ info->export_targets[j] = target;
+ }
} else {
info->export_targets = NULL;
}
--
2.27.0
next reply other threads:[~2021-08-17 3:44 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-08-17 3:44 xiubli [this message]
2021-08-17 16:18 ` [PATCH v2] ceph: try to reconnect to the export targets Jeff Layton
2021-08-18 1:20 ` Xiubo Li
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210817034445.405663-1-xiubli@redhat.com \
--to=xiubli@redhat.com \
--cc=ceph-devel@vger.kernel.org \
--cc=idryomov@gmail.com \
--cc=jlayton@kernel.org \
--cc=pdonnell@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).