All of lore.kernel.org
 help / color / mirror / Atom feed
From: Lidong Zhong <lidong.zhong@suse.com>
To: lvm-devel@redhat.com
Subject: [RFC PATCH] lvmlockd: purge the lock resources left in previous lockspace
Date: Fri, 30 Sep 2022 21:57:08 +0800	[thread overview]
Message-ID: <ff6e311a-5e87-2fe8-7c11-46aa7aaa048e@suse.com> (raw)

If lvmlockd in cluster is killed accidently or any other reason, the
lock resources will become orphaned in the VG lockspace. When the
cluster manager tries to restart this daemon, the LVs will probably
become inactive because of resource schedule policy and thus the lock
resouce will be omited during the adoption process. This patch will
try to purge the lock resources left in previous lockspace, so the
following actions can work again.
---
 ?daemons/lvmlockd/lvmlockd-core.c???? | 12 +++++
 ?daemons/lvmlockd/lvmlockd-dlm.c????? | 77 ++++++++++++++++++++++++++++
 ?daemons/lvmlockd/lvmlockd-internal.h |? 6 +++
 ?3 files changed, 95 insertions(+)

diff --git a/daemons/lvmlockd/lvmlockd-core.c 
b/daemons/lvmlockd/lvmlockd-core.c
index 6d0d4d98c..054bd9a10 100644
--- a/daemons/lvmlockd/lvmlockd-core.c
+++ b/daemons/lvmlockd/lvmlockd-core.c
@@ -5955,6 +5955,18 @@ static void adopt_locks(void)


 ???? /* FIXME: purge any remaining orphan locks in each rejoined ls? */
+??? /* Try to purge the orphan locks when lock manager is dlm */
+??? if (lm_support_dlm() && lm_is_running_dlm()) {
+??? ??? list_for_each_entry(ls, &ls_found, list) {
+??? ??? ??? pthread_mutex_lock(&lockspaces_mutex);
+??? ??? ??? ls1 = find_lockspace_name(ls->name);
+??? ??? ??? if (ls1) {
+??? ??? ??? ??? log_debug("ls: %s purge locks", ls->name);
+??? ??? ??? ??? lm_purge_locks_dlm(ls1);
+??? ??? ??? }
+??? ??? ??? pthread_mutex_unlock(&lockspaces_mutex);
+??? ??? }
+??? }

 ???? if (count_start_fail || count_adopt_fail)
 ???? ??? goto fail;
diff --git a/daemons/lvmlockd/lvmlockd-dlm.c 
b/daemons/lvmlockd/lvmlockd-dlm.c
index 1305c3dc2..f63f5ec82 100644
--- a/daemons/lvmlockd/lvmlockd-dlm.c
+++ b/daemons/lvmlockd/lvmlockd-dlm.c
@@ -220,6 +220,83 @@ int lm_prepare_lockspace_dlm(struct lockspace *ls)
 ???? return 0;
 ?}

+#define DLM_COMMS_PATH "/sys/kernel/config/dlm/cluster/comms"
+#define LOCK_LINE_MAX 1024
+static int get_local_nodeid()
+{
+??? struct dirent *de;
+??? DIR *ls_dir;
+??? char ls_comms_path[PATH_MAX];
+??? FILE *file = NULL;
+??? char line[LOCK_LINE_MAX];
+??? int rv = -1, val;
+
+??? memset(ls_comms_path, 0, sizeof(ls_comms_path));
+??? snprintf(ls_comms_path, PATH_MAX, "%s",DLM_COMMS_PATH);
+
+??? if (!(ls_dir = opendir(ls_comms_path)))
+??? ??? return -ECONNREFUSED;
+
+??? while ((de = readdir(ls_dir))) {
+??? ??? if (de->d_name[0] == '.')
+??? ??? ??? continue;
+??? ??? memset(ls_comms_path, 0, sizeof(ls_comms_path));
+??? ??? snprintf(ls_comms_path, PATH_MAX, "%s/%s/local",
+??? ??? ???? DLM_COMMS_PATH, de->d_name);
+??? ??? file = fopen(ls_comms_path, "r");
+??? ??? if (!file)
+??? ??? ??? continue;
+??? ??? if (fgets(line, LOCK_LINE_MAX, file)) {
+??? ??? ??? fclose(file);
+??? ??? ??? rv = sscanf(line, "%d", &val);
+??? ??? ??? if ((rv == 1) && (val == 1 )) {
+??? ??? ??? ??? memset(ls_comms_path, 0, sizeof(ls_comms_path));
+??? ??? ??? ??? snprintf(ls_comms_path, PATH_MAX, "%s/%s/nodeid",
+??? ??? ??? ??? ??? DLM_COMMS_PATH, de->d_name);
+??? ??? ??? ??? file = fopen(ls_comms_path, "r");
+??? ??? ??? ??? if (!file)
+??? ??? ??? ??? ??? continue;
+??? ??? ??? ??? if (fgets(line, LOCK_LINE_MAX, file)) {
+??? ??? ??? ??? ??? rv = sscanf(line, "%d", &val);
+??? ??? ??? ??? ??? if (rv == 1)
+??? ??? ??? ??? ??? ??? return val;
+??? ??? ??? ??? }
+??? ??? ??? }
+??? ??? }
+??? ??? fclose(file);
+??? }
+
+??? if (closedir(ls_dir))
+??? ??? log_error("get_local_nodeid closedir error");
+??? return rv;
+}
+
+int lm_purge_locks_dlm(struct lockspace *ls)
+{
+??? struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data;
+??? int nodeid;
+??? int rv = -1;
+
+??? if (!lmd || !lmd->dh) {
+??? ??? log_error("purge_locks_dlm %s no dlm_handle_t error", ls->name);
+??? ??? goto fail;
+??? }
+
+??? nodeid = get_local_nodeid();
+??? if (nodeid < 0) {
+??? ??? log_error("failed to get local nodeid");
+??? ??? goto fail;
+??? }
+??? if (dlm_ls_purge(lmd->dh, nodeid, 0)) {
+??? ??? log_error("purge_locks_dlm %s error", ls->name);
+??? ??? goto fail;
+??? }
+
+??? rv = 0;
+fail:
+??? return rv;
+}
+
 ?int lm_add_lockspace_dlm(struct lockspace *ls, int adopt)
 ?{
 ???? struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data;
diff --git a/daemons/lvmlockd/lvmlockd-internal.h 
b/daemons/lvmlockd/lvmlockd-internal.h
index ad32eb3a4..dd59b6a5d 100644
--- a/daemons/lvmlockd/lvmlockd-internal.h
+++ b/daemons/lvmlockd/lvmlockd-internal.h
@@ -392,6 +392,7 @@ static inline const char *mode_str(int x)
 ?int lm_init_vg_dlm(char *ls_name, char *vg_name, uint32_t flags, char 
*vg_args);
 ?int lm_prepare_lockspace_dlm(struct lockspace *ls);
 ?int lm_add_lockspace_dlm(struct lockspace *ls, int adopt);
+int lm_purge_locks_dlm(struct lockspace *ls);
 ?int lm_rem_lockspace_dlm(struct lockspace *ls, int free_vg);
 ?int lm_lock_dlm(struct lockspace *ls, struct resource *r, int ld_mode,
 ???? ??? struct val_blk *vb_out, int adopt);
@@ -429,6 +430,11 @@ static inline int lm_add_lockspace_dlm(struct 
lockspace *ls, int adopt)
 ???? return -1;
 ?}

+static inline int lm_purge_locks_dlm(struct lockspace *ls)
+{
+??? return -1;
+}
+
 ?static inline int lm_rem_lockspace_dlm(struct lockspace *ls, int free_vg)
 ?{
 ???? return -1;
-- 
2.35.3


             reply	other threads:[~2022-09-30 13:57 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-30 13:57 Lidong Zhong [this message]
2022-09-30 15:32 ` [RFC PATCH] lvmlockd: purge the lock resources left in previous lockspace David Teigland
2022-10-05  5:34   ` Lidong Zhong

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ff6e311a-5e87-2fe8-7c11-46aa7aaa048e@suse.com \
    --to=lidong.zhong@suse.com \
    --cc=lvm-devel@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.