All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v6.9-rc1 01/10] dlm: fix user space lock decision to copy lvb
@ 2024-03-28 15:48 Alexander Aring
  2024-03-28 15:48 ` [PATCH v6.9-rc1 02/10] fs: dlm: Simplify the allocation of slab caches in dlm_midcomms_cache_create Alexander Aring
                   ` (8 more replies)
  0 siblings, 9 replies; 10+ messages in thread
From: Alexander Aring @ 2024-03-28 15:48 UTC (permalink / raw)
  To: teigland; +Cc: gfs2

This patch fixes the copy lvb decision in case of user space DLM lock
requests. Currently the granted mode and requested mode is the same when
evaluating the dlm_lvb_operations matrix. That reflects the diagonal of
the dlm_lvb_operations matrix. Luckely it will mostly return a non zero
result on its diagonal, but in case of requested mode PW or EX the
diagonal returns 0, depending on the granted mode before the user might
get an invalid lvb data that is marked as valid.

This patch will move the decision if the lvb area should be copied or
not to the enqueue callback functionality because the granted mode is
known there. It also removes any lkb reference in the user callback
result delivery functionality that is required for a future fix.

Fixes: 61bed0baa4db ("fs: dlm: use a non-static queue for callbacks")
Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
 fs/dlm/ast.c          | 14 ++++++++++++++
 fs/dlm/dlm_internal.h |  1 +
 fs/dlm/user.c         | 15 ++-------------
 3 files changed, 17 insertions(+), 13 deletions(-)

diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index 1f2f70a1b824..decedc4ee15f 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -12,6 +12,7 @@
 #include <trace/events/dlm.h>
 
 #include "dlm_internal.h"
+#include "lvb_table.h"
 #include "memory.h"
 #include "lock.h"
 #include "user.h"
@@ -42,6 +43,7 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
 	int rv = DLM_ENQUEUE_CALLBACK_SUCCESS;
 	struct dlm_callback *cb;
+	int copy_lvb = 0;
 	int prev_mode;
 
 	if (flags & DLM_CB_BAST) {
@@ -73,6 +75,17 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
 				goto out;
 			}
 		}
+	} else if (flags & DLM_CB_CAST) {
+		if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) {
+			if (lkb->lkb_last_cast)
+				prev_mode = lkb->lkb_last_cb->mode;
+			else
+				prev_mode = -1;
+
+			if (!status && lkb->lkb_lksb->sb_lvbptr &&
+			    dlm_lvb_operations[prev_mode + 1][mode + 1])
+				copy_lvb = 1;
+		}
 	}
 
 	cb = dlm_allocate_cb();
@@ -85,6 +98,7 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
 	cb->mode = mode;
 	cb->sb_status = status;
 	cb->sb_flags = (sbflags & 0x000000FF);
+	cb->copy_lvb = copy_lvb;
 	kref_init(&cb->ref);
 	if (!test_and_set_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags))
 		rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED;
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 3b4dbce849f0..a9137c90f348 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -222,6 +222,7 @@ struct dlm_callback {
 	int			sb_status;	/* copy to lksb status */
 	uint8_t			sb_flags;	/* copy to lksb flags */
 	int8_t			mode; /* rq mode of bast, gr mode of cast */
+	int			copy_lvb;
 
 	struct list_head	list;
 	struct kref		ref;
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 9f9b68448830..12a483deeef5 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -21,7 +21,6 @@
 #include "dlm_internal.h"
 #include "lockspace.h"
 #include "lock.h"
-#include "lvb_table.h"
 #include "user.h"
 #include "ast.h"
 #include "config.h"
@@ -806,8 +805,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
 	struct dlm_lkb *lkb;
 	DECLARE_WAITQUEUE(wait, current);
 	struct dlm_callback *cb;
-	int rv, ret, copy_lvb = 0;
-	int old_mode, new_mode;
+	int rv, ret;
 
 	if (count == sizeof(struct dlm_device_version)) {
 		rv = copy_version_to_user(buf, count);
@@ -864,9 +862,6 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
 
 	lkb = list_first_entry(&proc->asts, struct dlm_lkb, lkb_cb_list);
 
-	/* rem_lkb_callback sets a new lkb_last_cast */
-	old_mode = lkb->lkb_last_cast->mode;
-
 	rv = dlm_dequeue_lkb_callback(lkb, &cb);
 	switch (rv) {
 	case DLM_DEQUEUE_CALLBACK_EMPTY:
@@ -895,12 +890,6 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
 	if (cb->flags & DLM_CB_BAST) {
 		trace_dlm_bast(lkb->lkb_resource->res_ls, lkb, cb->mode);
 	} else if (cb->flags & DLM_CB_CAST) {
-		new_mode = cb->mode;
-
-		if (!cb->sb_status && lkb->lkb_lksb->sb_lvbptr &&
-		    dlm_lvb_operations[old_mode + 1][new_mode + 1])
-			copy_lvb = 1;
-
 		lkb->lkb_lksb->sb_status = cb->sb_status;
 		lkb->lkb_lksb->sb_flags = cb->sb_flags;
 		trace_dlm_ast(lkb->lkb_resource->res_ls, lkb);
@@ -908,7 +897,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
 
 	ret = copy_result_to_user(lkb->lkb_ua,
 				  test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
-				  cb->flags, cb->mode, copy_lvb, buf, count);
+				  cb->flags, cb->mode, cb->copy_lvb, buf, count);
 
 	kref_put(&cb->ref, dlm_release_callback);
 
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH v6.9-rc1 02/10] fs: dlm: Simplify the allocation of slab caches in dlm_midcomms_cache_create
  2024-03-28 15:48 [PATCH v6.9-rc1 01/10] dlm: fix user space lock decision to copy lvb Alexander Aring
@ 2024-03-28 15:48 ` Alexander Aring
  2024-03-28 15:48 ` [PATCH v6.9-rc1 03/10] fs: dlm: Simplify the allocation of slab caches in dlm_lowcomms_msg_cache_create Alexander Aring
                   ` (7 subsequent siblings)
  8 siblings, 0 replies; 10+ messages in thread
From: Alexander Aring @ 2024-03-28 15:48 UTC (permalink / raw)
  To: teigland; +Cc: gfs2

From: Kunwu Chan <chentao@kylinos.cn>

Use the new KMEM_CACHE() macro instead of direct kmem_cache_create
to simplify the creation of SLAB caches.

Signed-off-by: Kunwu Chan <chentao@kylinos.cn>
Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
 fs/dlm/midcomms.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
index 2247ebb61be1..8e9920f1b48b 100644
--- a/fs/dlm/midcomms.c
+++ b/fs/dlm/midcomms.c
@@ -226,8 +226,7 @@ static DEFINE_MUTEX(close_lock);
 
 struct kmem_cache *dlm_midcomms_cache_create(void)
 {
-	return kmem_cache_create("dlm_mhandle", sizeof(struct dlm_mhandle),
-				 0, 0, NULL);
+	return KMEM_CACHE(dlm_mhandle, 0);
 }
 
 static inline const char *dlm_state_str(int state)
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH v6.9-rc1 03/10] fs: dlm: Simplify the allocation of slab caches in dlm_lowcomms_msg_cache_create
  2024-03-28 15:48 [PATCH v6.9-rc1 01/10] dlm: fix user space lock decision to copy lvb Alexander Aring
  2024-03-28 15:48 ` [PATCH v6.9-rc1 02/10] fs: dlm: Simplify the allocation of slab caches in dlm_midcomms_cache_create Alexander Aring
@ 2024-03-28 15:48 ` Alexander Aring
  2024-03-28 15:48 ` [PATCH v6.9-rc1 04/10] dlm: put lkbs instead of force free Alexander Aring
                   ` (6 subsequent siblings)
  8 siblings, 0 replies; 10+ messages in thread
From: Alexander Aring @ 2024-03-28 15:48 UTC (permalink / raw)
  To: teigland; +Cc: gfs2

From: Kunwu Chan <chentao@kylinos.cn>

Use the new KMEM_CACHE() macro instead of direct kmem_cache_create
to simplify the creation of SLAB caches.

Signed-off-by: Kunwu Chan <chentao@kylinos.cn>
Acked-by: Alexander Aring <aahringo@redhat.com>
Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
 fs/dlm/lowcomms.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 6296c62c10fa..712165a1e567 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -248,7 +248,7 @@ struct kmem_cache *dlm_lowcomms_writequeue_cache_create(void)
 
 struct kmem_cache *dlm_lowcomms_msg_cache_create(void)
 {
-	return kmem_cache_create("dlm_msg", sizeof(struct dlm_msg), 0, 0, NULL);
+	return KMEM_CACHE(dlm_msg, 0);
 }
 
 /* need to held writequeue_lock */
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH v6.9-rc1 04/10] dlm: put lkbs instead of force free
  2024-03-28 15:48 [PATCH v6.9-rc1 01/10] dlm: fix user space lock decision to copy lvb Alexander Aring
  2024-03-28 15:48 ` [PATCH v6.9-rc1 02/10] fs: dlm: Simplify the allocation of slab caches in dlm_midcomms_cache_create Alexander Aring
  2024-03-28 15:48 ` [PATCH v6.9-rc1 03/10] fs: dlm: Simplify the allocation of slab caches in dlm_lowcomms_msg_cache_create Alexander Aring
@ 2024-03-28 15:48 ` Alexander Aring
  2024-03-28 15:48 ` [PATCH v6.9-rc1 05/10] dlm: remove lkb from ast bast tracepoints Alexander Aring
                   ` (5 subsequent siblings)
  8 siblings, 0 replies; 10+ messages in thread
From: Alexander Aring @ 2024-03-28 15:48 UTC (permalink / raw)
  To: teigland; +Cc: gfs2

This patch converts a force free of the lkb idr and switch to use the
lkbs put functionality. If there are still references hold due the lkb
programming logic and its state it will be drop before. Instead of force
freeing the lkbs of the idr using the refcounters makes sure we using
the reference counters correctly. If we do that, then no rsb should be
left on the lockspace keep hash bucket which is an additonally check
added to this patch. All rsbs on the toss list should have a reference
counter of 1.

Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
 fs/dlm/lock.c      |  2 +-
 fs/dlm/lock.h      |  1 +
 fs/dlm/lockspace.c | 33 +++++++++++++++++++++++----------
 3 files changed, 25 insertions(+), 11 deletions(-)

diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index fd752dd03896..a39c2c49bff8 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1368,7 +1368,7 @@ static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
 	}
 }
 
-static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
+void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
 {
 	lkb->lkb_status = 0;
 	list_del(&lkb->lkb_statequeue);
diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h
index b54e2cbbe6e2..853c3d3dc49d 100644
--- a/fs/dlm/lock.h
+++ b/fs/dlm/lock.h
@@ -60,6 +60,7 @@ int dlm_debug_add_lkb(struct dlm_ls *ls, uint32_t lkb_id, char *name, int len,
 		      int lkb_nodeid, unsigned int lkb_flags, int lkb_status);
 int dlm_debug_add_lkb_to_waiters(struct dlm_ls *ls, uint32_t lkb_id,
 				 int mstype, int to_nodeid);
+void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb);
 
 static inline int is_master(struct dlm_rsb *r)
 {
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 0455dddb0797..0f9d7b498602 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -737,14 +737,30 @@ static int lkb_idr_is_any(int id, void *p, void *data)
 	return 1;
 }
 
-static int lkb_idr_free(int id, void *p, void *data)
+/*
+ * No locking required, lockspace usage should be synchronized
+ * to have any activity anymore.
+ */
+static int lkb_idr_put(int id, void *p, void *data)
 {
 	struct dlm_lkb *lkb = p;
 
-	if (lkb->lkb_lvbptr && test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags))
-		dlm_free_lvb(lkb->lkb_lvbptr);
+	if (lkb->lkb_status)
+		del_lkb(lkb->lkb_resource, lkb);
 
-	dlm_free_lkb(lkb);
+	/*
+	 * Forcibly reset wait_count and associated refcount. The
+	 * wait_count will almost always be 1, but in case of an
+	 * overlapping unlock/cancel it could be 2: see where
+	 * add_to_waiters() finds the lkb is already on the waiters
+	 * list and does lkb_wait_count++; hold_lkb().
+	 */
+	while (lkb->lkb_wait_count) {
+		lkb->lkb_wait_count--;
+		dlm_put_lkb(lkb);
+	}
+
+	WARN_ON_ONCE(!dlm_put_lkb(lkb));
 	return 0;
 }
 
@@ -826,7 +842,7 @@ static int release_lockspace(struct dlm_ls *ls, int force)
 	 * Free all lkb's in idr
 	 */
 
-	idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls);
+	idr_for_each(&ls->ls_lkbidr, lkb_idr_put, ls);
 	idr_destroy(&ls->ls_lkbidr);
 
 	/*
@@ -834,15 +850,12 @@ static int release_lockspace(struct dlm_ls *ls, int force)
 	 */
 
 	for (i = 0; i < ls->ls_rsbtbl_size; i++) {
-		while ((n = rb_first(&ls->ls_rsbtbl[i].keep))) {
-			rsb = rb_entry(n, struct dlm_rsb, res_hashnode);
-			rb_erase(n, &ls->ls_rsbtbl[i].keep);
-			dlm_free_rsb(rsb);
-		}
+		WARN_ON_ONCE(!RB_EMPTY_ROOT(&ls->ls_rsbtbl[i].keep));
 
 		while ((n = rb_first(&ls->ls_rsbtbl[i].toss))) {
 			rsb = rb_entry(n, struct dlm_rsb, res_hashnode);
 			rb_erase(n, &ls->ls_rsbtbl[i].toss);
+			WARN_ON_ONCE(kref_read(&rsb->res_ref) != 1);
 			dlm_free_rsb(rsb);
 		}
 	}
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH v6.9-rc1 05/10] dlm: remove lkb from ast bast tracepoints
  2024-03-28 15:48 [PATCH v6.9-rc1 01/10] dlm: fix user space lock decision to copy lvb Alexander Aring
                   ` (2 preceding siblings ...)
  2024-03-28 15:48 ` [PATCH v6.9-rc1 04/10] dlm: put lkbs instead of force free Alexander Aring
@ 2024-03-28 15:48 ` Alexander Aring
  2024-03-28 15:48 ` [PATCH v6.9-rc1 06/10] dlm: remove callback queue debugfs functionality Alexander Aring
                   ` (4 subsequent siblings)
  8 siblings, 0 replies; 10+ messages in thread
From: Alexander Aring @ 2024-03-28 15:48 UTC (permalink / raw)
  To: teigland; +Cc: gfs2

This patch removes the lkb dependency from the ast and bast DLM
tracepoints to prepare to not hold a lkb reference when an ast or bast
is queued to be called. There is also no need to check if the
lkb_resource is set because it should always be the case in such
context.

Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
 fs/dlm/ast.c               |  9 ++++++--
 fs/dlm/user.c              |  9 ++++++--
 include/trace/events/dlm.h | 46 ++++++++++++++++----------------------
 3 files changed, 33 insertions(+), 31 deletions(-)

diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index decedc4ee15f..dd7cca3c1472 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -171,6 +171,7 @@ void dlm_callback_work(struct work_struct *work)
 {
 	struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work);
 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
+	struct dlm_rsb *rsb = lkb->lkb_resource;
 	void (*castfn) (void *astparam);
 	void (*bastfn) (void *astparam, int mode);
 	struct dlm_callback *cb;
@@ -190,14 +191,18 @@ void dlm_callback_work(struct work_struct *work)
 		bastfn = lkb->lkb_bastfn;
 
 		if (cb->flags & DLM_CB_BAST) {
-			trace_dlm_bast(ls, lkb, cb->mode);
+			trace_dlm_bast(ls->ls_global_id, lkb->lkb_id,
+				       cb->mode, rsb->res_name,
+				       rsb->res_length);
 			lkb->lkb_last_bast_time = ktime_get();
 			lkb->lkb_last_bast_mode = cb->mode;
 			bastfn(lkb->lkb_astparam, cb->mode);
 		} else if (cb->flags & DLM_CB_CAST) {
 			lkb->lkb_lksb->sb_status = cb->sb_status;
 			lkb->lkb_lksb->sb_flags = cb->sb_flags;
-			trace_dlm_ast(ls, lkb);
+			trace_dlm_ast(ls->ls_global_id, lkb->lkb_id,
+				      cb->sb_flags, cb->sb_status,
+				      rsb->res_name, rsb->res_length);
 			lkb->lkb_last_cast_time = ktime_get();
 			castfn(lkb->lkb_astparam);
 		}
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 12a483deeef5..6f99bbeeac9b 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -805,6 +805,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
 	struct dlm_lkb *lkb;
 	DECLARE_WAITQUEUE(wait, current);
 	struct dlm_callback *cb;
+	struct dlm_rsb *rsb;
 	int rv, ret;
 
 	if (count == sizeof(struct dlm_device_version)) {
@@ -887,12 +888,16 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
 	}
 	spin_unlock(&proc->asts_spin);
 
+	rsb = lkb->lkb_resource;
 	if (cb->flags & DLM_CB_BAST) {
-		trace_dlm_bast(lkb->lkb_resource->res_ls, lkb, cb->mode);
+		trace_dlm_bast(rsb->res_ls->ls_global_id, lkb->lkb_id,
+			       cb->mode, rsb->res_name, rsb->res_length);
 	} else if (cb->flags & DLM_CB_CAST) {
 		lkb->lkb_lksb->sb_status = cb->sb_status;
 		lkb->lkb_lksb->sb_flags = cb->sb_flags;
-		trace_dlm_ast(lkb->lkb_resource->res_ls, lkb);
+		trace_dlm_ast(rsb->res_ls->ls_global_id, lkb->lkb_id,
+			      cb->sb_flags, cb->sb_status, rsb->res_name,
+			      rsb->res_length);
 	}
 
 	ret = copy_result_to_user(lkb->lkb_ua,
diff --git a/include/trace/events/dlm.h b/include/trace/events/dlm.h
index c1a146f9fc91..af160082c9e3 100644
--- a/include/trace/events/dlm.h
+++ b/include/trace/events/dlm.h
@@ -189,29 +189,25 @@ TRACE_EVENT(dlm_lock_end,
 
 TRACE_EVENT(dlm_bast,
 
-	TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, int mode),
+	TP_PROTO(__u32 ls_id, __u32 lkb_id, int mode,
+		 const char *res_name, size_t res_length),
 
-	TP_ARGS(ls, lkb, mode),
+	TP_ARGS(ls_id, lkb_id, mode, res_name, res_length),
 
 	TP_STRUCT__entry(
 		__field(__u32, ls_id)
 		__field(__u32, lkb_id)
 		__field(int, mode)
-		__dynamic_array(unsigned char, res_name,
-				lkb->lkb_resource ? lkb->lkb_resource->res_length : 0)
+		__dynamic_array(unsigned char, res_name, res_length)
 	),
 
 	TP_fast_assign(
-		struct dlm_rsb *r;
-
-		__entry->ls_id = ls->ls_global_id;
-		__entry->lkb_id = lkb->lkb_id;
+		__entry->ls_id = ls_id;
+		__entry->lkb_id = lkb_id;
 		__entry->mode = mode;
 
-		r = lkb->lkb_resource;
-		if (r)
-			memcpy(__get_dynamic_array(res_name), r->res_name,
-			       __get_dynamic_array_len(res_name));
+		memcpy(__get_dynamic_array(res_name), res_name,
+		       __get_dynamic_array_len(res_name));
 	),
 
 	TP_printk("ls_id=%u lkb_id=%x mode=%s res_name=%s",
@@ -224,31 +220,27 @@ TRACE_EVENT(dlm_bast,
 
 TRACE_EVENT(dlm_ast,
 
-	TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb),
+	TP_PROTO(__u32 ls_id, __u32 lkb_id, __u8 sb_flags, int sb_status,
+		 const char *res_name, size_t res_length),
 
-	TP_ARGS(ls, lkb),
+	TP_ARGS(ls_id, lkb_id, sb_flags, sb_status, res_name, res_length),
 
 	TP_STRUCT__entry(
 		__field(__u32, ls_id)
 		__field(__u32, lkb_id)
-		__field(u8, sb_flags)
+		__field(__u8, sb_flags)
 		__field(int, sb_status)
-		__dynamic_array(unsigned char, res_name,
-				lkb->lkb_resource ? lkb->lkb_resource->res_length : 0)
+		__dynamic_array(unsigned char, res_name, res_length)
 	),
 
 	TP_fast_assign(
-		struct dlm_rsb *r;
-
-		__entry->ls_id = ls->ls_global_id;
-		__entry->lkb_id = lkb->lkb_id;
-		__entry->sb_flags = lkb->lkb_lksb->sb_flags;
-		__entry->sb_status = lkb->lkb_lksb->sb_status;
+		__entry->ls_id = ls_id;
+		__entry->lkb_id = lkb_id;
+		__entry->sb_flags = sb_flags;
+		__entry->sb_status = sb_status;
 
-		r = lkb->lkb_resource;
-		if (r)
-			memcpy(__get_dynamic_array(res_name), r->res_name,
-			       __get_dynamic_array_len(res_name));
+		memcpy(__get_dynamic_array(res_name), res_name,
+		       __get_dynamic_array_len(res_name));
 	),
 
 	TP_printk("ls_id=%u lkb_id=%x sb_flags=%s sb_status=%d res_name=%s",
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH v6.9-rc1 06/10] dlm: remove callback queue debugfs functionality
  2024-03-28 15:48 [PATCH v6.9-rc1 01/10] dlm: fix user space lock decision to copy lvb Alexander Aring
                   ` (3 preceding siblings ...)
  2024-03-28 15:48 ` [PATCH v6.9-rc1 05/10] dlm: remove lkb from ast bast tracepoints Alexander Aring
@ 2024-03-28 15:48 ` Alexander Aring
  2024-03-28 15:48 ` [PATCH v6.9-rc1 07/10] dlm: move lkb debug information out of callback Alexander Aring
                   ` (3 subsequent siblings)
  8 siblings, 0 replies; 10+ messages in thread
From: Alexander Aring @ 2024-03-28 15:48 UTC (permalink / raw)
  To: teigland; +Cc: gfs2

This patch removes the whole functionality to dump the pending lkb
callbacks. We are preparing to remove any relation from the lkb and its
callback because we cannot held any lkb reference when the callback
occurs. As result we will drop the per lkb callback queue.

Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
 fs/dlm/debug_fs.c | 96 -----------------------------------------------
 1 file changed, 96 deletions(-)

diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index 4fa11d9ddbb6..289d959c7700 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -366,52 +366,6 @@ static void print_format4(struct dlm_rsb *r, struct seq_file *s)
 	unlock_rsb(r);
 }
 
-static void print_format5_lock(struct seq_file *s, struct dlm_lkb *lkb)
-{
-	struct dlm_callback *cb;
-
-	/* lkb_id lkb_flags mode flags sb_status sb_flags */
-
-	spin_lock(&lkb->lkb_cb_lock);
-	list_for_each_entry(cb, &lkb->lkb_callbacks, list) {
-		seq_printf(s, "%x %x %d %x %d %x\n",
-			   lkb->lkb_id,
-			   dlm_iflags_val(lkb),
-			   cb->mode,
-			   cb->flags,
-			   cb->sb_status,
-			   cb->sb_flags);
-	}
-	spin_unlock(&lkb->lkb_cb_lock);
-}
-
-static void print_format5(struct dlm_rsb *r, struct seq_file *s)
-{
-	struct dlm_lkb *lkb;
-
-	lock_rsb(r);
-
-	list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
-		print_format5_lock(s, lkb);
-		if (seq_has_overflowed(s))
-			goto out;
-	}
-
-	list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
-		print_format5_lock(s, lkb);
-		if (seq_has_overflowed(s))
-			goto out;
-	}
-
-	list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue) {
-		print_format5_lock(s, lkb);
-		if (seq_has_overflowed(s))
-			goto out;
-	}
- out:
-	unlock_rsb(r);
-}
-
 struct rsbtbl_iter {
 	struct dlm_rsb *rsb;
 	unsigned bucket;
@@ -455,13 +409,6 @@ static int table_seq_show(struct seq_file *seq, void *iter_ptr)
 		}
 		print_format4(ri->rsb, seq);
 		break;
-	case 5:
-		if (ri->header) {
-			seq_puts(seq, "lkb_id lkb_flags mode flags sb_status sb_flags\n");
-			ri->header = 0;
-		}
-		print_format5(ri->rsb, seq);
-		break;
 	}
 
 	return 0;
@@ -471,7 +418,6 @@ static const struct seq_operations format1_seq_ops;
 static const struct seq_operations format2_seq_ops;
 static const struct seq_operations format3_seq_ops;
 static const struct seq_operations format4_seq_ops;
-static const struct seq_operations format5_seq_ops;
 
 static void *table_seq_start(struct seq_file *seq, loff_t *pos)
 {
@@ -503,8 +449,6 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
 		ri->format = 3;
 	if (seq->op == &format4_seq_ops)
 		ri->format = 4;
-	if (seq->op == &format5_seq_ops)
-		ri->format = 5;
 
 	tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
 
@@ -659,18 +603,10 @@ static const struct seq_operations format4_seq_ops = {
 	.show  = table_seq_show,
 };
 
-static const struct seq_operations format5_seq_ops = {
-	.start = table_seq_start,
-	.next  = table_seq_next,
-	.stop  = table_seq_stop,
-	.show  = table_seq_show,
-};
-
 static const struct file_operations format1_fops;
 static const struct file_operations format2_fops;
 static const struct file_operations format3_fops;
 static const struct file_operations format4_fops;
-static const struct file_operations format5_fops;
 
 static int table_open1(struct inode *inode, struct file *file)
 {
@@ -757,20 +693,6 @@ static int table_open4(struct inode *inode, struct file *file)
 	return 0;
 }
 
-static int table_open5(struct inode *inode, struct file *file)
-{
-	struct seq_file *seq;
-	int ret;
-
-	ret = seq_open(file, &format5_seq_ops);
-	if (ret)
-		return ret;
-
-	seq = file->private_data;
-	seq->private = inode->i_private; /* the dlm_ls */
-	return 0;
-}
-
 static const struct file_operations format1_fops = {
 	.owner   = THIS_MODULE,
 	.open    = table_open1,
@@ -804,14 +726,6 @@ static const struct file_operations format4_fops = {
 	.release = seq_release
 };
 
-static const struct file_operations format5_fops = {
-	.owner   = THIS_MODULE,
-	.open    = table_open5,
-	.read    = seq_read,
-	.llseek  = seq_lseek,
-	.release = seq_release
-};
-
 /*
  * dump lkb's on the ls_waiters list
  */
@@ -1021,16 +935,6 @@ void dlm_create_debug_file(struct dlm_ls *ls)
 							  dlm_root,
 							  ls,
 							  &waiters_fops);
-
-	/* format 5 */
-
-	snprintf(name, sizeof(name), "%s_queued_asts", ls->ls_name);
-
-	ls->ls_debug_queued_asts_dentry = debugfs_create_file(name,
-							      0644,
-							      dlm_root,
-							      ls,
-							      &format5_fops);
 }
 
 void __init dlm_register_debugfs(void)
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH v6.9-rc1 07/10] dlm: move lkb debug information out of callback
  2024-03-28 15:48 [PATCH v6.9-rc1 01/10] dlm: fix user space lock decision to copy lvb Alexander Aring
                   ` (4 preceding siblings ...)
  2024-03-28 15:48 ` [PATCH v6.9-rc1 06/10] dlm: remove callback queue debugfs functionality Alexander Aring
@ 2024-03-28 15:48 ` Alexander Aring
  2024-03-28 15:48 ` [PATCH v6.9-rc1 08/10] dlm: combine switch case fail and default statements Alexander Aring
                   ` (2 subsequent siblings)
  8 siblings, 0 replies; 10+ messages in thread
From: Alexander Aring @ 2024-03-28 15:48 UTC (permalink / raw)
  To: teigland; +Cc: gfs2

This patch removes per lkb callback debug information to the
functionality when we queue the callback. As we need to remove any lkb
reference in the callback handling this functionality will be move to an
earlier point of execution. It is so far only used for debugging and the
exact timing doesn't matter.

Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
 fs/dlm/ast.c | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index dd7cca3c1472..cadbcbe0786b 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -105,8 +105,13 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
 
 	list_add_tail(&cb->list, &lkb->lkb_callbacks);
 
-	if (flags & DLM_CB_CAST)
+	if (flags & DLM_CB_BAST) {
+		lkb->lkb_last_bast_time = ktime_get();
+		lkb->lkb_last_bast_mode = cb->mode;
+	} else if (flags & DLM_CB_CAST) {
 		dlm_callback_set_last_ptr(&lkb->lkb_last_cast, cb);
+		lkb->lkb_last_cast_time = ktime_get();
+	}
 
 	dlm_callback_set_last_ptr(&lkb->lkb_last_cb, cb);
 
@@ -194,8 +199,6 @@ void dlm_callback_work(struct work_struct *work)
 			trace_dlm_bast(ls->ls_global_id, lkb->lkb_id,
 				       cb->mode, rsb->res_name,
 				       rsb->res_length);
-			lkb->lkb_last_bast_time = ktime_get();
-			lkb->lkb_last_bast_mode = cb->mode;
 			bastfn(lkb->lkb_astparam, cb->mode);
 		} else if (cb->flags & DLM_CB_CAST) {
 			lkb->lkb_lksb->sb_status = cb->sb_status;
@@ -203,7 +206,6 @@ void dlm_callback_work(struct work_struct *work)
 			trace_dlm_ast(ls->ls_global_id, lkb->lkb_id,
 				      cb->sb_flags, cb->sb_status,
 				      rsb->res_name, rsb->res_length);
-			lkb->lkb_last_cast_time = ktime_get();
 			castfn(lkb->lkb_astparam);
 		}
 
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH v6.9-rc1 08/10] dlm: combine switch case fail and default statements
  2024-03-28 15:48 [PATCH v6.9-rc1 01/10] dlm: fix user space lock decision to copy lvb Alexander Aring
                   ` (5 preceding siblings ...)
  2024-03-28 15:48 ` [PATCH v6.9-rc1 07/10] dlm: move lkb debug information out of callback Alexander Aring
@ 2024-03-28 15:48 ` Alexander Aring
  2024-03-28 15:48 ` [PATCH v6.9-rc1 09/10] dlm: fix race between final callback and remove Alexander Aring
  2024-03-28 15:48 ` [PATCH v6.9-rc1 10/10] dlm: remove callback reference counting Alexander Aring
  8 siblings, 0 replies; 10+ messages in thread
From: Alexander Aring @ 2024-03-28 15:48 UTC (permalink / raw)
  To: teigland; +Cc: gfs2

This patch combines the failure and default cases for enqueue and
dequeue a callback to the lkb callback queue that should end in both
cases as it should never happen.

Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
 fs/dlm/ast.c  | 5 ++---
 fs/dlm/user.c | 9 ++++-----
 2 files changed, 6 insertions(+), 8 deletions(-)

diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index cadbcbe0786b..5ea0b62f276b 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -160,11 +160,10 @@ void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
 		}
 		spin_unlock(&ls->ls_cb_lock);
 		break;
-	case DLM_ENQUEUE_CALLBACK_FAILURE:
-		WARN_ON_ONCE(1);
-		break;
 	case DLM_ENQUEUE_CALLBACK_SUCCESS:
 		break;
+	case DLM_ENQUEUE_CALLBACK_FAILURE:
+		fallthrough;
 	default:
 		WARN_ON_ONCE(1);
 		break;
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 6f99bbeeac9b..fa99b6074e5c 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -231,10 +231,6 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
 
 	rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags);
 	switch (rv) {
-	case DLM_ENQUEUE_CALLBACK_FAILURE:
-		spin_unlock(&proc->asts_spin);
-		WARN_ON_ONCE(1);
-		goto out;
 	case DLM_ENQUEUE_CALLBACK_NEED_SCHED:
 		kref_get(&lkb->lkb_ref);
 		list_add_tail(&lkb->lkb_cb_list, &proc->asts);
@@ -242,9 +238,12 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
 		break;
 	case DLM_ENQUEUE_CALLBACK_SUCCESS:
 		break;
+	case DLM_ENQUEUE_CALLBACK_FAILURE:
+		fallthrough;
 	default:
+		spin_unlock(&proc->asts_spin);
 		WARN_ON_ONCE(1);
-		break;
+		goto out;
 	}
 	spin_unlock(&proc->asts_spin);
 
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH v6.9-rc1 09/10] dlm: fix race between final callback and remove
  2024-03-28 15:48 [PATCH v6.9-rc1 01/10] dlm: fix user space lock decision to copy lvb Alexander Aring
                   ` (6 preceding siblings ...)
  2024-03-28 15:48 ` [PATCH v6.9-rc1 08/10] dlm: combine switch case fail and default statements Alexander Aring
@ 2024-03-28 15:48 ` Alexander Aring
  2024-03-28 15:48 ` [PATCH v6.9-rc1 10/10] dlm: remove callback reference counting Alexander Aring
  8 siblings, 0 replies; 10+ messages in thread
From: Alexander Aring @ 2024-03-28 15:48 UTC (permalink / raw)
  To: teigland; +Cc: gfs2

This patch fixes the following issue (described by David Teigland):

node 1 is dir
node 2 is master
node 3 is other

1->2: unlock
2: put final lkb, rsb moved to toss
2->1: unlock_reply
1: queue lkb callback with EUNLOCK  (*)
2->1: remove
1: receive_remove ignored (rsb on keep because of queued lkb callback)
1: complete lkb callback, put_lkb, move rsb to toss
3->1: lookup
1->3: lookup_reply master=2
3->2: request
2->3: request_reply EBADR

(*) When queuing the unlock callback, perhaps we could save the callback
information in another struct, and not keep the lkb/rsb referenced for
the callback.  Then the lkb would be freed immediately in unlock_reply,
and the rsb would be moved to the toss list in the unlock_reply.

This theory assumes some large delay in delivering the unlock callback,
because the remove message from 2->1 only happens after some seconds.

When node 3 does the request and 2 replies wrongly with EBADR because
the dir node 1 wrongly replies the master is 2 but it isn't anymore. We
have a deadlock situation that is not resolveable anymore. The problem
became a bigger issue when we reverted commit 96006ea6d4ee ("dlm: fix
missing dir remove") that was probably a "bandaid" fix for this issue
without knowing the real source of it by resending the remove message.

The root of this issue is a unexpected refcount situation when receiving
a remove message. This is however with default configuration parameters
are very unlikely race but could occur in some certain situations. A
specific case would be a final unlock request as described above but the
callback workqueue still hold a reference of the lkb so the rsb is not
on the toss list as it is required and expected to handle the remove
message.

This patch is fixing this issue by not keeping the lkb reference when
handling the callback anymore. User space callback handling is also
being effected because there is a unknown time until the user space
program does a device_read().

This patch slowers the callback handling but future patches will mark
the callback workqueue handling as deprecated and call the DLM callbacks
directly which should not have this problem for kernel locks. For user
space locks the locking behaviour is known to be slow. Before looking
again at it to maybe improve the behaviour that is not deprecated we
fixing this potential issue.

Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
 fs/dlm/ast.c          | 166 ++++++++++++++++--------------------------
 fs/dlm/ast.h          |  10 +--
 fs/dlm/dlm_internal.h |  60 +++++++++------
 fs/dlm/lock.c         |  20 ++---
 fs/dlm/user.c         |  86 ++++++----------------
 5 files changed, 129 insertions(+), 213 deletions(-)

diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index 5ea0b62f276b..e9da812352b4 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -37,12 +37,32 @@ void dlm_callback_set_last_ptr(struct dlm_callback **from,
 	*from = to;
 }
 
-int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
-			     int status, uint32_t sbflags)
+static void dlm_callback_work(struct work_struct *work)
 {
-	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
+	struct dlm_callback *cb = container_of(work, struct dlm_callback, work);
+
+	if (cb->flags & DLM_CB_BAST) {
+		trace_dlm_bast(cb->ls_id, cb->lkb_id, cb->mode, cb->res_name,
+			       cb->res_length);
+		cb->bastfn(cb->astparam, cb->mode);
+	} else if (cb->flags & DLM_CB_CAST) {
+		trace_dlm_ast(cb->ls_id, cb->lkb_id, cb->sb_status,
+			      cb->sb_flags, cb->res_name, cb->res_length);
+		cb->lkb_lksb->sb_status = cb->sb_status;
+		cb->lkb_lksb->sb_flags = cb->sb_flags;
+		cb->astfn(cb->astparam);
+	}
+
+	kref_put(&cb->ref, dlm_release_callback);
+}
+
+int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
+			   int status, uint32_t sbflags,
+			   struct dlm_callback **cb)
+{
+	struct dlm_rsb *rsb = lkb->lkb_resource;
 	int rv = DLM_ENQUEUE_CALLBACK_SUCCESS;
-	struct dlm_callback *cb;
+	struct dlm_ls *ls = rsb->res_ls;
 	int copy_lvb = 0;
 	int prev_mode;
 
@@ -88,57 +108,46 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
 		}
 	}
 
-	cb = dlm_allocate_cb();
-	if (!cb) {
+	*cb = dlm_allocate_cb();
+	if (!*cb) {
 		rv = DLM_ENQUEUE_CALLBACK_FAILURE;
 		goto out;
 	}
 
-	cb->flags = flags;
-	cb->mode = mode;
-	cb->sb_status = status;
-	cb->sb_flags = (sbflags & 0x000000FF);
-	cb->copy_lvb = copy_lvb;
-	kref_init(&cb->ref);
-	if (!test_and_set_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags))
-		rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED;
+	/* for tracing */
+	(*cb)->lkb_id = lkb->lkb_id;
+	(*cb)->ls_id = ls->ls_global_id;
+	memcpy((*cb)->res_name, rsb->res_name, rsb->res_length);
+	(*cb)->res_length = rsb->res_length;
 
-	list_add_tail(&cb->list, &lkb->lkb_callbacks);
+	(*cb)->flags = flags;
+	(*cb)->mode = mode;
+	(*cb)->sb_status = status;
+	(*cb)->sb_flags = (sbflags & 0x000000FF);
+	(*cb)->copy_lvb = copy_lvb;
+	(*cb)->lkb_lksb = lkb->lkb_lksb;
+	kref_init(&(*cb)->ref);
 
 	if (flags & DLM_CB_BAST) {
 		lkb->lkb_last_bast_time = ktime_get();
-		lkb->lkb_last_bast_mode = cb->mode;
+		lkb->lkb_last_bast_mode = mode;
 	} else if (flags & DLM_CB_CAST) {
-		dlm_callback_set_last_ptr(&lkb->lkb_last_cast, cb);
+		dlm_callback_set_last_ptr(&lkb->lkb_last_cast, *cb);
 		lkb->lkb_last_cast_time = ktime_get();
 	}
 
-	dlm_callback_set_last_ptr(&lkb->lkb_last_cb, cb);
+	dlm_callback_set_last_ptr(&lkb->lkb_last_cb, *cb);
+	rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED;
 
- out:
+out:
 	return rv;
 }
 
-int dlm_dequeue_lkb_callback(struct dlm_lkb *lkb, struct dlm_callback **cb)
-{
-	/* oldest undelivered cb is callbacks first entry */
-	*cb = list_first_entry_or_null(&lkb->lkb_callbacks,
-				       struct dlm_callback, list);
-	if (!*cb)
-		return DLM_DEQUEUE_CALLBACK_EMPTY;
-
-	/* remove it from callbacks so shift others down */
-	list_del(&(*cb)->list);
-	if (list_empty(&lkb->lkb_callbacks))
-		return DLM_DEQUEUE_CALLBACK_LAST;
-
-	return DLM_DEQUEUE_CALLBACK_SUCCESS;
-}
-
 void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
-		uint32_t sbflags)
+		  uint32_t sbflags)
 {
 	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
+	struct dlm_callback *cb;
 	int rv;
 
 	if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) {
@@ -146,18 +155,20 @@ void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
 		return;
 	}
 
-	spin_lock(&lkb->lkb_cb_lock);
-	rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags);
+	rv = dlm_queue_lkb_callback(lkb, flags, mode, status, sbflags,
+				    &cb);
 	switch (rv) {
 	case DLM_ENQUEUE_CALLBACK_NEED_SCHED:
-		kref_get(&lkb->lkb_ref);
+		cb->astfn = lkb->lkb_astfn;
+		cb->bastfn = lkb->lkb_bastfn;
+		cb->astparam = lkb->lkb_astparam;
+		INIT_WORK(&cb->work, dlm_callback_work);
 
 		spin_lock(&ls->ls_cb_lock);
-		if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) {
-			list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay);
-		} else {
-			queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
-		}
+		if (test_bit(LSFL_CB_DELAY, &ls->ls_flags))
+			list_add(&cb->list, &ls->ls_cb_delay);
+		else
+			queue_work(ls->ls_callback_wq, &cb->work);
 		spin_unlock(&ls->ls_cb_lock);
 		break;
 	case DLM_ENQUEUE_CALLBACK_SUCCESS:
@@ -168,67 +179,12 @@ void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
 		WARN_ON_ONCE(1);
 		break;
 	}
-	spin_unlock(&lkb->lkb_cb_lock);
-}
-
-void dlm_callback_work(struct work_struct *work)
-{
-	struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work);
-	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
-	struct dlm_rsb *rsb = lkb->lkb_resource;
-	void (*castfn) (void *astparam);
-	void (*bastfn) (void *astparam, int mode);
-	struct dlm_callback *cb;
-	int rv;
-
-	spin_lock(&lkb->lkb_cb_lock);
-	rv = dlm_dequeue_lkb_callback(lkb, &cb);
-	if (WARN_ON_ONCE(rv == DLM_DEQUEUE_CALLBACK_EMPTY)) {
-		clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags);
-		spin_unlock(&lkb->lkb_cb_lock);
-		goto out;
-	}
-	spin_unlock(&lkb->lkb_cb_lock);
-
-	for (;;) {
-		castfn = lkb->lkb_astfn;
-		bastfn = lkb->lkb_bastfn;
-
-		if (cb->flags & DLM_CB_BAST) {
-			trace_dlm_bast(ls->ls_global_id, lkb->lkb_id,
-				       cb->mode, rsb->res_name,
-				       rsb->res_length);
-			bastfn(lkb->lkb_astparam, cb->mode);
-		} else if (cb->flags & DLM_CB_CAST) {
-			lkb->lkb_lksb->sb_status = cb->sb_status;
-			lkb->lkb_lksb->sb_flags = cb->sb_flags;
-			trace_dlm_ast(ls->ls_global_id, lkb->lkb_id,
-				      cb->sb_flags, cb->sb_status,
-				      rsb->res_name, rsb->res_length);
-			castfn(lkb->lkb_astparam);
-		}
-
-		kref_put(&cb->ref, dlm_release_callback);
-
-		spin_lock(&lkb->lkb_cb_lock);
-		rv = dlm_dequeue_lkb_callback(lkb, &cb);
-		if (rv == DLM_DEQUEUE_CALLBACK_EMPTY) {
-			clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags);
-			spin_unlock(&lkb->lkb_cb_lock);
-			break;
-		}
-		spin_unlock(&lkb->lkb_cb_lock);
-	}
-
-out:
-	/* undo kref_get from dlm_add_callback, may cause lkb to be freed */
-	dlm_put_lkb(lkb);
 }
 
 int dlm_callback_start(struct dlm_ls *ls)
 {
-	ls->ls_callback_wq = alloc_workqueue("dlm_callback",
-					     WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
+	ls->ls_callback_wq = alloc_ordered_workqueue("dlm_callback",
+						     WQ_HIGHPRI | WQ_MEM_RECLAIM);
 	if (!ls->ls_callback_wq) {
 		log_print("can't start dlm_callback workqueue");
 		return -ENOMEM;
@@ -257,7 +213,7 @@ void dlm_callback_suspend(struct dlm_ls *ls)
 
 void dlm_callback_resume(struct dlm_ls *ls)
 {
-	struct dlm_lkb *lkb, *safe;
+	struct dlm_callback *cb, *safe;
 	int count = 0, sum = 0;
 	bool empty;
 
@@ -266,9 +222,9 @@ void dlm_callback_resume(struct dlm_ls *ls)
 
 more:
 	spin_lock(&ls->ls_cb_lock);
-	list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
-		list_del_init(&lkb->lkb_cb_list);
-		queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
+	list_for_each_entry_safe(cb, safe, &ls->ls_cb_delay, list) {
+		list_del(&cb->list);
+		queue_work(ls->ls_callback_wq, &cb->work);
 		count++;
 		if (count == MAX_CB_QUEUE)
 			break;
diff --git a/fs/dlm/ast.h b/fs/dlm/ast.h
index ce007892dc2d..9bd12409e1ee 100644
--- a/fs/dlm/ast.h
+++ b/fs/dlm/ast.h
@@ -14,19 +14,15 @@
 #define DLM_ENQUEUE_CALLBACK_NEED_SCHED	1
 #define DLM_ENQUEUE_CALLBACK_SUCCESS	0
 #define DLM_ENQUEUE_CALLBACK_FAILURE	-1
-int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
-			     int status, uint32_t sbflags);
-#define DLM_DEQUEUE_CALLBACK_EMPTY	2
-#define DLM_DEQUEUE_CALLBACK_LAST	1
-#define DLM_DEQUEUE_CALLBACK_SUCCESS	0
-int dlm_dequeue_lkb_callback(struct dlm_lkb *lkb, struct dlm_callback **cb);
+int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
+			   int status, uint32_t sbflags,
+			   struct dlm_callback **cb);
 void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
                 uint32_t sbflags);
 void dlm_callback_set_last_ptr(struct dlm_callback **from,
 			       struct dlm_callback *to);
 
 void dlm_release_callback(struct kref *ref);
-void dlm_callback_work(struct work_struct *work);
 int dlm_callback_start(struct dlm_ls *ls);
 void dlm_callback_stop(struct dlm_ls *ls);
 void dlm_callback_suspend(struct dlm_ls *ls);
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index a9137c90f348..cda1526fd15b 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -16,6 +16,7 @@
  * This is the main header file to be included in each DLM source file.
  */
 
+#include <uapi/linux/dlm_device.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/types.h>
@@ -204,8 +205,7 @@ struct dlm_args {
 #define DLM_IFL_OVERLAP_CANCEL_BIT 20
 #define DLM_IFL_ENDOFLIFE_BIT	21
 #define DLM_IFL_DEADLOCK_CANCEL_BIT 24
-#define DLM_IFL_CB_PENDING_BIT	25
-#define __DLM_IFL_MAX_BIT	DLM_IFL_CB_PENDING_BIT
+#define __DLM_IFL_MAX_BIT	DLM_IFL_DEADLOCK_CANCEL_BIT
 
 /* lkb_dflags */
 
@@ -217,12 +217,45 @@ struct dlm_args {
 #define DLM_CB_CAST		0x00000001
 #define DLM_CB_BAST		0x00000002
 
+/* much of this is just saving user space pointers associated with the
+ * lock that we pass back to the user lib with an ast
+ */
+
+struct dlm_user_args {
+	struct dlm_user_proc	*proc; /* each process that opens the lockspace
+					* device has private data
+					* (dlm_user_proc) on the struct file,
+					* the process's locks point back to it
+					*/
+	struct dlm_lksb		lksb;
+	struct dlm_lksb __user	*user_lksb;
+	void __user		*castparam;
+	void __user		*castaddr;
+	void __user		*bastparam;
+	void __user		*bastaddr;
+	uint64_t		xid;
+};
+
 struct dlm_callback {
 	uint32_t		flags;		/* DLM_CBF_ */
 	int			sb_status;	/* copy to lksb status */
 	uint8_t			sb_flags;	/* copy to lksb flags */
 	int8_t			mode; /* rq mode of bast, gr mode of cast */
-	int			copy_lvb;
+	bool			copy_lvb;
+	struct dlm_lksb		*lkb_lksb;
+	unsigned char		lvbptr[DLM_USER_LVB_LEN];
+
+	union {
+		void			*astparam;	/* caller's ast arg */
+		struct dlm_user_args	ua;
+	};
+	struct work_struct	work;
+	void			(*bastfn)(void *astparam, int mode);
+	void			(*astfn)(void *astparam);
+	char			res_name[DLM_RESNAME_MAXLEN];
+	size_t			res_length;
+	uint32_t		ls_id;
+	uint32_t		lkb_id;
 
 	struct list_head	list;
 	struct kref		ref;
@@ -256,10 +289,6 @@ struct dlm_lkb {
 	struct list_head	lkb_ownqueue;	/* list of locks for a process */
 	ktime_t			lkb_timestamp;
 
-	spinlock_t		lkb_cb_lock;
-	struct work_struct	lkb_cb_work;
-	struct list_head	lkb_cb_list; /* for ls_cb_delay or proc->asts */
-	struct list_head	lkb_callbacks;
 	struct dlm_callback	*lkb_last_cast;
 	struct dlm_callback	*lkb_last_cb;
 	int			lkb_last_bast_mode;
@@ -688,23 +717,6 @@ struct dlm_ls {
 #define LSFL_CB_DELAY		9
 #define LSFL_NODIR		10
 
-/* much of this is just saving user space pointers associated with the
-   lock that we pass back to the user lib with an ast */
-
-struct dlm_user_args {
-	struct dlm_user_proc	*proc; /* each process that opens the lockspace
-					  device has private data
-					  (dlm_user_proc) on the struct file,
-					  the process's locks point back to it*/
-	struct dlm_lksb		lksb;
-	struct dlm_lksb __user	*user_lksb;
-	void __user		*castparam;
-	void __user		*castaddr;
-	void __user		*bastparam;
-	void __user		*bastaddr;
-	uint64_t		xid;
-};
-
 #define DLM_PROC_FLAGS_CLOSING 1
 #define DLM_PROC_FLAGS_COMPAT  2
 
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index a39c2c49bff8..1c0d01dc7eca 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1203,10 +1203,6 @@ static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret,
 	kref_init(&lkb->lkb_ref);
 	INIT_LIST_HEAD(&lkb->lkb_ownqueue);
 	INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
-	INIT_LIST_HEAD(&lkb->lkb_cb_list);
-	INIT_LIST_HEAD(&lkb->lkb_callbacks);
-	spin_lock_init(&lkb->lkb_cb_lock);
-	INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
 
 	idr_preload(GFP_NOFS);
 	spin_lock(&ls->ls_lkbidr_spin);
@@ -6003,6 +5999,7 @@ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
 
 void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
 {
+	struct dlm_callback *cb, *cb_safe;
 	struct dlm_lkb *lkb, *safe;
 
 	dlm_lock_recovery(ls);
@@ -6032,10 +6029,9 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
 		dlm_put_lkb(lkb);
 	}
 
-	list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
-		dlm_purge_lkb_callbacks(lkb);
-		list_del_init(&lkb->lkb_cb_list);
-		dlm_put_lkb(lkb);
+	list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) {
+		list_del(&cb->list);
+		kref_put(&cb->ref, dlm_release_callback);
 	}
 
 	spin_unlock(&ls->ls_clear_proc_locks);
@@ -6044,6 +6040,7 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
 
 static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
 {
+	struct dlm_callback *cb, *cb_safe;
 	struct dlm_lkb *lkb, *safe;
 
 	while (1) {
@@ -6073,10 +6070,9 @@ static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
 	spin_unlock(&proc->locks_spin);
 
 	spin_lock(&proc->asts_spin);
-	list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
-		dlm_purge_lkb_callbacks(lkb);
-		list_del_init(&lkb->lkb_cb_list);
-		dlm_put_lkb(lkb);
+	list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) {
+		list_del(&cb->list);
+		kref_put(&cb->ref, dlm_release_callback);
 	}
 	spin_unlock(&proc->asts_spin);
 }
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index fa99b6074e5c..334a6d64d413 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -21,6 +21,7 @@
 #include "dlm_internal.h"
 #include "lockspace.h"
 #include "lock.h"
+#include "lvb_table.h"
 #include "user.h"
 #include "ast.h"
 #include "config.h"
@@ -144,24 +145,6 @@ static void compat_output(struct dlm_lock_result *res,
 }
 #endif
 
-/* should held proc->asts_spin lock */
-void dlm_purge_lkb_callbacks(struct dlm_lkb *lkb)
-{
-	struct dlm_callback *cb, *safe;
-
-	list_for_each_entry_safe(cb, safe, &lkb->lkb_callbacks, list) {
-		list_del(&cb->list);
-		kref_put(&cb->ref, dlm_release_callback);
-	}
-
-	clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags);
-
-	/* invalidate */
-	dlm_callback_set_last_ptr(&lkb->lkb_last_cast, NULL);
-	dlm_callback_set_last_ptr(&lkb->lkb_last_cb, NULL);
-	lkb->lkb_last_bast_mode = -1;
-}
-
 /* Figure out if this lock is at the end of its life and no longer
    available for the application to use.  The lkb still exists until
    the final ast is read.  A lock becomes EOL in three situations:
@@ -198,6 +181,7 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
 	struct dlm_ls *ls;
 	struct dlm_user_args *ua;
 	struct dlm_user_proc *proc;
+	struct dlm_callback *cb;
 	int rv;
 
 	if (test_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags) ||
@@ -229,11 +213,18 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
 
 	spin_lock(&proc->asts_spin);
 
-	rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags);
+	rv = dlm_queue_lkb_callback(lkb, flags, mode, status, sbflags, &cb);
 	switch (rv) {
 	case DLM_ENQUEUE_CALLBACK_NEED_SCHED:
-		kref_get(&lkb->lkb_ref);
-		list_add_tail(&lkb->lkb_cb_list, &proc->asts);
+		cb->ua = *ua;
+		cb->lkb_lksb = &cb->ua.lksb;
+		if (cb->copy_lvb) {
+			memcpy(cb->lvbptr, ua->lksb.sb_lvbptr,
+			       DLM_USER_LVB_LEN);
+			cb->lkb_lksb->sb_lvbptr = cb->lvbptr;
+		}
+
+		list_add_tail(&cb->list, &proc->asts);
 		wake_up_interruptible(&proc->wait);
 		break;
 	case DLM_ENQUEUE_CALLBACK_SUCCESS:
@@ -801,10 +792,8 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
 			   loff_t *ppos)
 {
 	struct dlm_user_proc *proc = file->private_data;
-	struct dlm_lkb *lkb;
 	DECLARE_WAITQUEUE(wait, current);
 	struct dlm_callback *cb;
-	struct dlm_rsb *rsb;
 	int rv, ret;
 
 	if (count == sizeof(struct dlm_device_version)) {
@@ -824,8 +813,6 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
 #endif
 		return -EINVAL;
 
- try_another:
-
 	/* do we really need this? can a read happen after a close? */
 	if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
 		return -EINVAL;
@@ -860,55 +847,24 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
 	   without removing lkb_cb_list; so empty lkb_cb_list is always
 	   consistent with empty lkb_callbacks */
 
-	lkb = list_first_entry(&proc->asts, struct dlm_lkb, lkb_cb_list);
-
-	rv = dlm_dequeue_lkb_callback(lkb, &cb);
-	switch (rv) {
-	case DLM_DEQUEUE_CALLBACK_EMPTY:
-		/* this shouldn't happen; lkb should have been removed from
-		 * list when last item was dequeued
-		 */
-		log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id);
-		list_del_init(&lkb->lkb_cb_list);
-		spin_unlock(&proc->asts_spin);
-		/* removes ref for proc->asts, may cause lkb to be freed */
-		dlm_put_lkb(lkb);
-		WARN_ON_ONCE(1);
-		goto try_another;
-	case DLM_DEQUEUE_CALLBACK_LAST:
-		list_del_init(&lkb->lkb_cb_list);
-		clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags);
-		break;
-	case DLM_DEQUEUE_CALLBACK_SUCCESS:
-		break;
-	default:
-		WARN_ON_ONCE(1);
-		break;
-	}
+	cb = list_first_entry(&proc->asts, struct dlm_callback, list);
+	list_del(&cb->list);
 	spin_unlock(&proc->asts_spin);
 
-	rsb = lkb->lkb_resource;
 	if (cb->flags & DLM_CB_BAST) {
-		trace_dlm_bast(rsb->res_ls->ls_global_id, lkb->lkb_id,
-			       cb->mode, rsb->res_name, rsb->res_length);
+		trace_dlm_bast(cb->ls_id, cb->lkb_id, cb->mode, cb->res_name,
+			       cb->res_length);
 	} else if (cb->flags & DLM_CB_CAST) {
-		lkb->lkb_lksb->sb_status = cb->sb_status;
-		lkb->lkb_lksb->sb_flags = cb->sb_flags;
-		trace_dlm_ast(rsb->res_ls->ls_global_id, lkb->lkb_id,
-			      cb->sb_flags, cb->sb_status, rsb->res_name,
-			      rsb->res_length);
+		cb->lkb_lksb->sb_status = cb->sb_status;
+		cb->lkb_lksb->sb_flags = cb->sb_flags;
+		trace_dlm_ast(cb->ls_id, cb->lkb_id, cb->sb_status,
+			      cb->sb_flags, cb->res_name, cb->res_length);
 	}
 
-	ret = copy_result_to_user(lkb->lkb_ua,
+	ret = copy_result_to_user(&cb->ua,
 				  test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
 				  cb->flags, cb->mode, cb->copy_lvb, buf, count);
-
 	kref_put(&cb->ref, dlm_release_callback);
-
-	/* removes ref for proc->asts, may cause lkb to be freed */
-	if (rv == DLM_DEQUEUE_CALLBACK_LAST)
-		dlm_put_lkb(lkb);
-
 	return ret;
 }
 
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH v6.9-rc1 10/10] dlm: remove callback reference counting
  2024-03-28 15:48 [PATCH v6.9-rc1 01/10] dlm: fix user space lock decision to copy lvb Alexander Aring
                   ` (7 preceding siblings ...)
  2024-03-28 15:48 ` [PATCH v6.9-rc1 09/10] dlm: fix race between final callback and remove Alexander Aring
@ 2024-03-28 15:48 ` Alexander Aring
  8 siblings, 0 replies; 10+ messages in thread
From: Alexander Aring @ 2024-03-28 15:48 UTC (permalink / raw)
  To: teigland; +Cc: gfs2

Since we copying all necessary callback information from the lkb to the
per callback structure this structure increased in the amount of memory
being used. As each lkb structure holds a callback structure reference
for the last callback that occurred this can hold more memory than
necessary. We just copy the necessary information as it was before since
commit 61bed0baa4db ("fs: dlm: use a non-static queue for callbacks").

Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
 fs/dlm/ast.c          | 56 +++++++++++++------------------------------
 fs/dlm/ast.h          |  3 ---
 fs/dlm/debug_fs.c     |  2 +-
 fs/dlm/dlm_internal.h |  8 +++----
 fs/dlm/lock.c         |  8 ++++---
 fs/dlm/memory.c       |  4 ----
 fs/dlm/user.c         |  2 +-
 7 files changed, 28 insertions(+), 55 deletions(-)

diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index e9da812352b4..03879c94fadb 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -18,25 +18,6 @@
 #include "user.h"
 #include "ast.h"
 
-void dlm_release_callback(struct kref *ref)
-{
-	struct dlm_callback *cb = container_of(ref, struct dlm_callback, ref);
-
-	dlm_free_cb(cb);
-}
-
-void dlm_callback_set_last_ptr(struct dlm_callback **from,
-			       struct dlm_callback *to)
-{
-	if (*from)
-		kref_put(&(*from)->ref, dlm_release_callback);
-
-	if (to)
-		kref_get(&to->ref);
-
-	*from = to;
-}
-
 static void dlm_callback_work(struct work_struct *work)
 {
 	struct dlm_callback *cb = container_of(work, struct dlm_callback, work);
@@ -53,7 +34,7 @@ static void dlm_callback_work(struct work_struct *work)
 		cb->astfn(cb->astparam);
 	}
 
-	kref_put(&cb->ref, dlm_release_callback);
+	dlm_free_cb(cb);
 }
 
 int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
@@ -70,11 +51,11 @@ int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
 		/* if cb is a bast, it should be skipped if the blocking mode is
 		 * compatible with the last granted mode
 		 */
-		if (lkb->lkb_last_cast) {
-			if (dlm_modes_compat(mode, lkb->lkb_last_cast->mode)) {
+		if (lkb->lkb_last_cast_cb_mode != -1) {
+			if (dlm_modes_compat(mode, lkb->lkb_last_cast_cb_mode)) {
 				log_debug(ls, "skip %x bast mode %d for cast mode %d",
 					  lkb->lkb_id, mode,
-					  lkb->lkb_last_cast->mode);
+					  lkb->lkb_last_cast_cb_mode);
 				goto out;
 			}
 		}
@@ -85,8 +66,9 @@ int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
 		 * is a bast for the same mode or a more restrictive mode.
 		 * (the addional > PR check is needed for PR/CW inversion)
 		 */
-		if (lkb->lkb_last_cb && lkb->lkb_last_cb->flags & DLM_CB_BAST) {
-			prev_mode = lkb->lkb_last_cb->mode;
+		if (lkb->lkb_last_cb_mode != -1 &&
+		    lkb->lkb_last_cb_flags & DLM_CB_BAST) {
+			prev_mode = lkb->lkb_last_cb_mode;
 
 			if ((prev_mode == mode) ||
 			    (prev_mode > mode && prev_mode > DLM_LOCK_PR)) {
@@ -95,19 +77,25 @@ int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
 				goto out;
 			}
 		}
+
+		lkb->lkb_last_bast_time = ktime_get();
+		lkb->lkb_last_bast_cb_mode = mode;
 	} else if (flags & DLM_CB_CAST) {
 		if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) {
-			if (lkb->lkb_last_cast)
-				prev_mode = lkb->lkb_last_cb->mode;
-			else
-				prev_mode = -1;
+			prev_mode = lkb->lkb_last_cast_cb_mode;
 
 			if (!status && lkb->lkb_lksb->sb_lvbptr &&
 			    dlm_lvb_operations[prev_mode + 1][mode + 1])
 				copy_lvb = 1;
 		}
+
+		lkb->lkb_last_cast_cb_mode = mode;
+		lkb->lkb_last_cast_time = ktime_get();
 	}
 
+	lkb->lkb_last_cb_mode = mode;
+	lkb->lkb_last_cb_flags = flags;
+
 	*cb = dlm_allocate_cb();
 	if (!*cb) {
 		rv = DLM_ENQUEUE_CALLBACK_FAILURE;
@@ -126,17 +114,7 @@ int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
 	(*cb)->sb_flags = (sbflags & 0x000000FF);
 	(*cb)->copy_lvb = copy_lvb;
 	(*cb)->lkb_lksb = lkb->lkb_lksb;
-	kref_init(&(*cb)->ref);
-
-	if (flags & DLM_CB_BAST) {
-		lkb->lkb_last_bast_time = ktime_get();
-		lkb->lkb_last_bast_mode = mode;
-	} else if (flags & DLM_CB_CAST) {
-		dlm_callback_set_last_ptr(&lkb->lkb_last_cast, *cb);
-		lkb->lkb_last_cast_time = ktime_get();
-	}
 
-	dlm_callback_set_last_ptr(&lkb->lkb_last_cb, *cb);
 	rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED;
 
 out:
diff --git a/fs/dlm/ast.h b/fs/dlm/ast.h
index 9bd12409e1ee..9093ff043bee 100644
--- a/fs/dlm/ast.h
+++ b/fs/dlm/ast.h
@@ -19,10 +19,7 @@ int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
 			   struct dlm_callback **cb);
 void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
                 uint32_t sbflags);
-void dlm_callback_set_last_ptr(struct dlm_callback **from,
-			       struct dlm_callback *to);
 
-void dlm_release_callback(struct kref *ref);
 int dlm_callback_start(struct dlm_ls *ls);
 void dlm_callback_stop(struct dlm_ls *ls);
 void dlm_callback_suspend(struct dlm_ls *ls);
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index 289d959c7700..19cdedd56629 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -247,7 +247,7 @@ static void print_format3_lock(struct seq_file *s, struct dlm_lkb *lkb,
 		   lkb->lkb_status,
 		   lkb->lkb_grmode,
 		   lkb->lkb_rqmode,
-		   lkb->lkb_last_bast_mode,
+		   lkb->lkb_last_bast_cb_mode,
 		   rsb_lookup,
 		   lkb->lkb_wait_type,
 		   lkb->lkb_lvbseq,
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index cda1526fd15b..1d2ee5c2d23d 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -258,7 +258,6 @@ struct dlm_callback {
 	uint32_t		lkb_id;
 
 	struct list_head	list;
-	struct kref		ref;
 };
 
 struct dlm_lkb {
@@ -289,9 +288,10 @@ struct dlm_lkb {
 	struct list_head	lkb_ownqueue;	/* list of locks for a process */
 	ktime_t			lkb_timestamp;
 
-	struct dlm_callback	*lkb_last_cast;
-	struct dlm_callback	*lkb_last_cb;
-	int			lkb_last_bast_mode;
+	int8_t			lkb_last_cast_cb_mode;
+	int8_t			lkb_last_bast_cb_mode;
+	int8_t			lkb_last_cb_mode;
+	uint8_t			lkb_last_cb_flags;
 	ktime_t			lkb_last_cast_time;	/* for debugging */
 	ktime_t			lkb_last_bast_time;	/* for debugging */
 
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 1c0d01dc7eca..f8516b532211 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1197,7 +1197,9 @@ static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret,
 	if (!lkb)
 		return -ENOMEM;
 
-	lkb->lkb_last_bast_mode = -1;
+	lkb->lkb_last_bast_cb_mode = DLM_LOCK_IV;
+	lkb->lkb_last_cast_cb_mode = DLM_LOCK_IV;
+	lkb->lkb_last_cb_mode = DLM_LOCK_IV;
 	lkb->lkb_nodeid = -1;
 	lkb->lkb_grmode = DLM_LOCK_IV;
 	kref_init(&lkb->lkb_ref);
@@ -6031,7 +6033,7 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
 
 	list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) {
 		list_del(&cb->list);
-		kref_put(&cb->ref, dlm_release_callback);
+		dlm_free_cb(cb);
 	}
 
 	spin_unlock(&ls->ls_clear_proc_locks);
@@ -6072,7 +6074,7 @@ static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
 	spin_lock(&proc->asts_spin);
 	list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) {
 		list_del(&cb->list);
-		kref_put(&cb->ref, dlm_release_callback);
+		dlm_free_cb(cb);
 	}
 	spin_unlock(&proc->asts_spin);
 }
diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c
index 64f212a066cf..be9398ddf357 100644
--- a/fs/dlm/memory.c
+++ b/fs/dlm/memory.c
@@ -127,10 +127,6 @@ void dlm_free_lkb(struct dlm_lkb *lkb)
 		}
 	}
 
-	/* drop references if they are set */
-	dlm_callback_set_last_ptr(&lkb->lkb_last_cast, NULL);
-	dlm_callback_set_last_ptr(&lkb->lkb_last_cb, NULL);
-
 	kmem_cache_free(lkb_cache, lkb);
 }
 
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 334a6d64d413..b4971ba4bdd6 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -864,7 +864,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
 	ret = copy_result_to_user(&cb->ua,
 				  test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
 				  cb->flags, cb->mode, cb->copy_lvb, buf, count);
-	kref_put(&cb->ref, dlm_release_callback);
+	dlm_free_cb(cb);
 	return ret;
 }
 
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2024-03-28 15:48 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-03-28 15:48 [PATCH v6.9-rc1 01/10] dlm: fix user space lock decision to copy lvb Alexander Aring
2024-03-28 15:48 ` [PATCH v6.9-rc1 02/10] fs: dlm: Simplify the allocation of slab caches in dlm_midcomms_cache_create Alexander Aring
2024-03-28 15:48 ` [PATCH v6.9-rc1 03/10] fs: dlm: Simplify the allocation of slab caches in dlm_lowcomms_msg_cache_create Alexander Aring
2024-03-28 15:48 ` [PATCH v6.9-rc1 04/10] dlm: put lkbs instead of force free Alexander Aring
2024-03-28 15:48 ` [PATCH v6.9-rc1 05/10] dlm: remove lkb from ast bast tracepoints Alexander Aring
2024-03-28 15:48 ` [PATCH v6.9-rc1 06/10] dlm: remove callback queue debugfs functionality Alexander Aring
2024-03-28 15:48 ` [PATCH v6.9-rc1 07/10] dlm: move lkb debug information out of callback Alexander Aring
2024-03-28 15:48 ` [PATCH v6.9-rc1 08/10] dlm: combine switch case fail and default statements Alexander Aring
2024-03-28 15:48 ` [PATCH v6.9-rc1 09/10] dlm: fix race between final callback and remove Alexander Aring
2024-03-28 15:48 ` [PATCH v6.9-rc1 10/10] dlm: remove callback reference counting Alexander Aring

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.