All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andreas Gruenbacher <agruenba@redhat.com>
To: Linus Torvalds <torvalds@linux-foundation.org>,
	Alexander Viro <viro@zeniv.linux.org.uk>,
	Christoph Hellwig <hch@infradead.org>,
	"Darrick J. Wong" <djwong@kernel.org>
Cc: Jan Kara <jack@suse.cz>, Matthew Wilcox <willy@infradead.org>,
	cluster-devel@redhat.com, linux-fsdevel@vger.kernel.org,
	linux-kernel@vger.kernel.org, ocfs2-devel@oss.oracle.com,
	Andreas Gruenbacher <agruenba@redhat.com>
Subject: [PATCH v7 07/19] gfs2: Clean up function may_grant
Date: Fri, 27 Aug 2021 18:49:14 +0200	[thread overview]
Message-ID: <20210827164926.1726765-8-agruenba@redhat.com> (raw)
In-Reply-To: <20210827164926.1726765-1-agruenba@redhat.com>

Pass the first current glock holder into function may_grant and
deobfuscate the logic there.

We're now using function find_first_holder in do_promote, so move the
function's definition above do_promote.

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
---
 fs/gfs2/glock.c | 120 ++++++++++++++++++++++++++++--------------------
 1 file changed, 70 insertions(+), 50 deletions(-)

diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 1f3902ecdded..545b435f55ea 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -301,46 +301,59 @@ void gfs2_glock_put(struct gfs2_glock *gl)
 }
 
 /**
- * may_grant - check if its ok to grant a new lock
+ * may_grant - check if it's ok to grant a new lock
  * @gl: The glock
+ * @current_gh: One of the current holders of @gl
  * @gh: The lock request which we wish to grant
  *
- * Returns: true if its ok to grant the lock
+ * With our current compatibility rules, if a glock has one or more active
+ * holders (HIF_HOLDER flag set), any of those holders can be passed in as
+ * @current_gh; they are all the same as far as compatibility with the new @gh
+ * goes.
+ *
+ * Returns true if it's ok to grant the lock.
  */
 
-static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
-{
-	const struct gfs2_holder *gh_head = list_first_entry(&gl->gl_holders, const struct gfs2_holder, gh_list);
+static inline bool may_grant(const struct gfs2_glock *gl,
+			     const struct gfs2_holder *current_gh,
+			     const struct gfs2_holder *gh)
+{
+	if (current_gh) {
+		BUG_ON(!test_bit(HIF_HOLDER, &current_gh->gh_iflags));
+
+		switch(current_gh->gh_state) {
+		case LM_ST_EXCLUSIVE:
+			/*
+			 * Here we make a special exception to grant holders
+			 * who agree to share the EX lock with other holders
+			 * who also have the bit set. If the original holder
+			 * has the LM_FLAG_NODE_SCOPE bit set, we grant more
+			 * holders with the bit set.
+			 */
+			return gh->gh_state == LM_ST_EXCLUSIVE &&
+			       (current_gh->gh_flags & LM_FLAG_NODE_SCOPE) &&
+			       (gh->gh_flags & LM_FLAG_NODE_SCOPE);
 
-	if (gh != gh_head) {
-		/**
-		 * Here we make a special exception to grant holders who agree
-		 * to share the EX lock with other holders who also have the
-		 * bit set. If the original holder has the LM_FLAG_NODE_SCOPE bit
-		 * is set, we grant more holders with the bit set.
-		 */
-		if (gh_head->gh_state == LM_ST_EXCLUSIVE &&
-		    (gh_head->gh_flags & LM_FLAG_NODE_SCOPE) &&
-		    gh->gh_state == LM_ST_EXCLUSIVE &&
-		    (gh->gh_flags & LM_FLAG_NODE_SCOPE))
-			return 1;
-		if ((gh->gh_state == LM_ST_EXCLUSIVE ||
-		     gh_head->gh_state == LM_ST_EXCLUSIVE))
-			return 0;
+		case LM_ST_SHARED:
+		case LM_ST_DEFERRED:
+			return gh->gh_state == current_gh->gh_state;
+
+		default:
+			return false;
+		}
 	}
+
 	if (gl->gl_state == gh->gh_state)
-		return 1;
+		return true;
 	if (gh->gh_flags & GL_EXACT)
-		return 0;
+		return false;
 	if (gl->gl_state == LM_ST_EXCLUSIVE) {
-		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
-			return 1;
-		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
-			return 1;
+		return gh->gh_state == LM_ST_SHARED ||
+		       gh->gh_state == LM_ST_DEFERRED;
 	}
-	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
-		return 1;
-	return 0;
+	if (gh->gh_flags & LM_FLAG_ANY)
+		return gl->gl_state != LM_ST_UNLOCKED;
+	return false;
 }
 
 static void gfs2_holder_wake(struct gfs2_holder *gh)
@@ -380,6 +393,24 @@ static void do_error(struct gfs2_glock *gl, const int ret)
 	}
 }
 
+/**
+ * find_first_holder - find the first "holder" gh
+ * @gl: the glock
+ */
+
+static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
+{
+	struct gfs2_holder *gh;
+
+	if (!list_empty(&gl->gl_holders)) {
+		gh = list_first_entry(&gl->gl_holders, struct gfs2_holder,
+				      gh_list);
+		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
+			return gh;
+	}
+	return NULL;
+}
+
 /**
  * do_promote - promote as many requests as possible on the current queue
  * @gl: The glock
@@ -393,14 +424,16 @@ __releases(&gl->gl_lockref.lock)
 __acquires(&gl->gl_lockref.lock)
 {
 	const struct gfs2_glock_operations *glops = gl->gl_ops;
-	struct gfs2_holder *gh, *tmp;
+	struct gfs2_holder *gh, *tmp, *first_gh;
 	int ret;
 
+	first_gh = find_first_holder(gl);
+
 restart:
 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 			continue;
-		if (may_grant(gl, gh)) {
+		if (may_grant(gl, first_gh, gh)) {
 			if (gh->gh_list.prev == &gl->gl_holders &&
 			    glops->go_lock) {
 				spin_unlock(&gl->gl_lockref.lock);
@@ -722,23 +755,6 @@ __acquires(&gl->gl_lockref.lock)
 	spin_lock(&gl->gl_lockref.lock);
 }
 
-/**
- * find_first_holder - find the first "holder" gh
- * @gl: the glock
- */
-
-static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
-{
-	struct gfs2_holder *gh;
-
-	if (!list_empty(&gl->gl_holders)) {
-		gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
-		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
-			return gh;
-	}
-	return NULL;
-}
-
 /**
  * run_queue - do all outstanding tasks related to a glock
  * @gl: The glock in question
@@ -1354,8 +1370,12 @@ __acquires(&gl->gl_lockref.lock)
 		GLOCK_BUG_ON(gl, true);
 
 	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
-		if (test_bit(GLF_LOCK, &gl->gl_flags))
-			try_futile = !may_grant(gl, gh);
+		if (test_bit(GLF_LOCK, &gl->gl_flags)) {
+			struct gfs2_holder *first_gh;
+
+			first_gh = find_first_holder(gl);
+			try_futile = !may_grant(gl, first_gh, gh);
+		}
 		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
 			goto fail;
 	}
-- 
2.26.3


WARNING: multiple messages have this Message-ID (diff)
From: Andreas Gruenbacher <agruenba@redhat.com>
To: Linus Torvalds <torvalds@linux-foundation.org>,
	Alexander Viro <viro@zeniv.linux.org.uk>,
	Christoph Hellwig <hch@infradead.org>,
	"Darrick J. Wong" <djwong@kernel.org>
Cc: Jan Kara <jack@suse.cz>,
	Andreas Gruenbacher <agruenba@redhat.com>,
	linux-kernel@vger.kernel.org, cluster-devel@redhat.com,
	linux-fsdevel@vger.kernel.org, ocfs2-devel@oss.oracle.com
Subject: [Ocfs2-devel] [PATCH v7 07/19] gfs2: Clean up function may_grant
Date: Fri, 27 Aug 2021 18:49:14 +0200	[thread overview]
Message-ID: <20210827164926.1726765-8-agruenba@redhat.com> (raw)
In-Reply-To: <20210827164926.1726765-1-agruenba@redhat.com>

Pass the first current glock holder into function may_grant and
deobfuscate the logic there.

We're now using function find_first_holder in do_promote, so move the
function's definition above do_promote.

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
---
 fs/gfs2/glock.c | 120 ++++++++++++++++++++++++++++--------------------
 1 file changed, 70 insertions(+), 50 deletions(-)

diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 1f3902ecdded..545b435f55ea 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -301,46 +301,59 @@ void gfs2_glock_put(struct gfs2_glock *gl)
 }
 
 /**
- * may_grant - check if its ok to grant a new lock
+ * may_grant - check if it's ok to grant a new lock
  * @gl: The glock
+ * @current_gh: One of the current holders of @gl
  * @gh: The lock request which we wish to grant
  *
- * Returns: true if its ok to grant the lock
+ * With our current compatibility rules, if a glock has one or more active
+ * holders (HIF_HOLDER flag set), any of those holders can be passed in as
+ * @current_gh; they are all the same as far as compatibility with the new @gh
+ * goes.
+ *
+ * Returns true if it's ok to grant the lock.
  */
 
-static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
-{
-	const struct gfs2_holder *gh_head = list_first_entry(&gl->gl_holders, const struct gfs2_holder, gh_list);
+static inline bool may_grant(const struct gfs2_glock *gl,
+			     const struct gfs2_holder *current_gh,
+			     const struct gfs2_holder *gh)
+{
+	if (current_gh) {
+		BUG_ON(!test_bit(HIF_HOLDER, &current_gh->gh_iflags));
+
+		switch(current_gh->gh_state) {
+		case LM_ST_EXCLUSIVE:
+			/*
+			 * Here we make a special exception to grant holders
+			 * who agree to share the EX lock with other holders
+			 * who also have the bit set. If the original holder
+			 * has the LM_FLAG_NODE_SCOPE bit set, we grant more
+			 * holders with the bit set.
+			 */
+			return gh->gh_state == LM_ST_EXCLUSIVE &&
+			       (current_gh->gh_flags & LM_FLAG_NODE_SCOPE) &&
+			       (gh->gh_flags & LM_FLAG_NODE_SCOPE);
 
-	if (gh != gh_head) {
-		/**
-		 * Here we make a special exception to grant holders who agree
-		 * to share the EX lock with other holders who also have the
-		 * bit set. If the original holder has the LM_FLAG_NODE_SCOPE bit
-		 * is set, we grant more holders with the bit set.
-		 */
-		if (gh_head->gh_state == LM_ST_EXCLUSIVE &&
-		    (gh_head->gh_flags & LM_FLAG_NODE_SCOPE) &&
-		    gh->gh_state == LM_ST_EXCLUSIVE &&
-		    (gh->gh_flags & LM_FLAG_NODE_SCOPE))
-			return 1;
-		if ((gh->gh_state == LM_ST_EXCLUSIVE ||
-		     gh_head->gh_state == LM_ST_EXCLUSIVE))
-			return 0;
+		case LM_ST_SHARED:
+		case LM_ST_DEFERRED:
+			return gh->gh_state == current_gh->gh_state;
+
+		default:
+			return false;
+		}
 	}
+
 	if (gl->gl_state == gh->gh_state)
-		return 1;
+		return true;
 	if (gh->gh_flags & GL_EXACT)
-		return 0;
+		return false;
 	if (gl->gl_state == LM_ST_EXCLUSIVE) {
-		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
-			return 1;
-		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
-			return 1;
+		return gh->gh_state == LM_ST_SHARED ||
+		       gh->gh_state == LM_ST_DEFERRED;
 	}
-	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
-		return 1;
-	return 0;
+	if (gh->gh_flags & LM_FLAG_ANY)
+		return gl->gl_state != LM_ST_UNLOCKED;
+	return false;
 }
 
 static void gfs2_holder_wake(struct gfs2_holder *gh)
@@ -380,6 +393,24 @@ static void do_error(struct gfs2_glock *gl, const int ret)
 	}
 }
 
+/**
+ * find_first_holder - find the first "holder" gh
+ * @gl: the glock
+ */
+
+static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
+{
+	struct gfs2_holder *gh;
+
+	if (!list_empty(&gl->gl_holders)) {
+		gh = list_first_entry(&gl->gl_holders, struct gfs2_holder,
+				      gh_list);
+		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
+			return gh;
+	}
+	return NULL;
+}
+
 /**
  * do_promote - promote as many requests as possible on the current queue
  * @gl: The glock
@@ -393,14 +424,16 @@ __releases(&gl->gl_lockref.lock)
 __acquires(&gl->gl_lockref.lock)
 {
 	const struct gfs2_glock_operations *glops = gl->gl_ops;
-	struct gfs2_holder *gh, *tmp;
+	struct gfs2_holder *gh, *tmp, *first_gh;
 	int ret;
 
+	first_gh = find_first_holder(gl);
+
 restart:
 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 			continue;
-		if (may_grant(gl, gh)) {
+		if (may_grant(gl, first_gh, gh)) {
 			if (gh->gh_list.prev == &gl->gl_holders &&
 			    glops->go_lock) {
 				spin_unlock(&gl->gl_lockref.lock);
@@ -722,23 +755,6 @@ __acquires(&gl->gl_lockref.lock)
 	spin_lock(&gl->gl_lockref.lock);
 }
 
-/**
- * find_first_holder - find the first "holder" gh
- * @gl: the glock
- */
-
-static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
-{
-	struct gfs2_holder *gh;
-
-	if (!list_empty(&gl->gl_holders)) {
-		gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
-		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
-			return gh;
-	}
-	return NULL;
-}
-
 /**
  * run_queue - do all outstanding tasks related to a glock
  * @gl: The glock in question
@@ -1354,8 +1370,12 @@ __acquires(&gl->gl_lockref.lock)
 		GLOCK_BUG_ON(gl, true);
 
 	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
-		if (test_bit(GLF_LOCK, &gl->gl_flags))
-			try_futile = !may_grant(gl, gh);
+		if (test_bit(GLF_LOCK, &gl->gl_flags)) {
+			struct gfs2_holder *first_gh;
+
+			first_gh = find_first_holder(gl);
+			try_futile = !may_grant(gl, first_gh, gh);
+		}
 		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
 			goto fail;
 	}
-- 
2.26.3


_______________________________________________
Ocfs2-devel mailing list
Ocfs2-devel@oss.oracle.com
https://oss.oracle.com/mailman/listinfo/ocfs2-devel

WARNING: multiple messages have this Message-ID (diff)
From: Andreas Gruenbacher <agruenba@redhat.com>
To: cluster-devel.redhat.com
Subject: [Cluster-devel] [PATCH v7 07/19] gfs2: Clean up function may_grant
Date: Fri, 27 Aug 2021 18:49:14 +0200	[thread overview]
Message-ID: <20210827164926.1726765-8-agruenba@redhat.com> (raw)
In-Reply-To: <20210827164926.1726765-1-agruenba@redhat.com>

Pass the first current glock holder into function may_grant and
deobfuscate the logic there.

We're now using function find_first_holder in do_promote, so move the
function's definition above do_promote.

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
---
 fs/gfs2/glock.c | 120 ++++++++++++++++++++++++++++--------------------
 1 file changed, 70 insertions(+), 50 deletions(-)

diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 1f3902ecdded..545b435f55ea 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -301,46 +301,59 @@ void gfs2_glock_put(struct gfs2_glock *gl)
 }
 
 /**
- * may_grant - check if its ok to grant a new lock
+ * may_grant - check if it's ok to grant a new lock
  * @gl: The glock
+ * @current_gh: One of the current holders of @gl
  * @gh: The lock request which we wish to grant
  *
- * Returns: true if its ok to grant the lock
+ * With our current compatibility rules, if a glock has one or more active
+ * holders (HIF_HOLDER flag set), any of those holders can be passed in as
+ * @current_gh; they are all the same as far as compatibility with the new @gh
+ * goes.
+ *
+ * Returns true if it's ok to grant the lock.
  */
 
-static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
-{
-	const struct gfs2_holder *gh_head = list_first_entry(&gl->gl_holders, const struct gfs2_holder, gh_list);
+static inline bool may_grant(const struct gfs2_glock *gl,
+			     const struct gfs2_holder *current_gh,
+			     const struct gfs2_holder *gh)
+{
+	if (current_gh) {
+		BUG_ON(!test_bit(HIF_HOLDER, &current_gh->gh_iflags));
+
+		switch(current_gh->gh_state) {
+		case LM_ST_EXCLUSIVE:
+			/*
+			 * Here we make a special exception to grant holders
+			 * who agree to share the EX lock with other holders
+			 * who also have the bit set. If the original holder
+			 * has the LM_FLAG_NODE_SCOPE bit set, we grant more
+			 * holders with the bit set.
+			 */
+			return gh->gh_state == LM_ST_EXCLUSIVE &&
+			       (current_gh->gh_flags & LM_FLAG_NODE_SCOPE) &&
+			       (gh->gh_flags & LM_FLAG_NODE_SCOPE);
 
-	if (gh != gh_head) {
-		/**
-		 * Here we make a special exception to grant holders who agree
-		 * to share the EX lock with other holders who also have the
-		 * bit set. If the original holder has the LM_FLAG_NODE_SCOPE bit
-		 * is set, we grant more holders with the bit set.
-		 */
-		if (gh_head->gh_state == LM_ST_EXCLUSIVE &&
-		    (gh_head->gh_flags & LM_FLAG_NODE_SCOPE) &&
-		    gh->gh_state == LM_ST_EXCLUSIVE &&
-		    (gh->gh_flags & LM_FLAG_NODE_SCOPE))
-			return 1;
-		if ((gh->gh_state == LM_ST_EXCLUSIVE ||
-		     gh_head->gh_state == LM_ST_EXCLUSIVE))
-			return 0;
+		case LM_ST_SHARED:
+		case LM_ST_DEFERRED:
+			return gh->gh_state == current_gh->gh_state;
+
+		default:
+			return false;
+		}
 	}
+
 	if (gl->gl_state == gh->gh_state)
-		return 1;
+		return true;
 	if (gh->gh_flags & GL_EXACT)
-		return 0;
+		return false;
 	if (gl->gl_state == LM_ST_EXCLUSIVE) {
-		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
-			return 1;
-		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
-			return 1;
+		return gh->gh_state == LM_ST_SHARED ||
+		       gh->gh_state == LM_ST_DEFERRED;
 	}
-	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
-		return 1;
-	return 0;
+	if (gh->gh_flags & LM_FLAG_ANY)
+		return gl->gl_state != LM_ST_UNLOCKED;
+	return false;
 }
 
 static void gfs2_holder_wake(struct gfs2_holder *gh)
@@ -380,6 +393,24 @@ static void do_error(struct gfs2_glock *gl, const int ret)
 	}
 }
 
+/**
+ * find_first_holder - find the first "holder" gh
+ * @gl: the glock
+ */
+
+static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
+{
+	struct gfs2_holder *gh;
+
+	if (!list_empty(&gl->gl_holders)) {
+		gh = list_first_entry(&gl->gl_holders, struct gfs2_holder,
+				      gh_list);
+		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
+			return gh;
+	}
+	return NULL;
+}
+
 /**
  * do_promote - promote as many requests as possible on the current queue
  * @gl: The glock
@@ -393,14 +424,16 @@ __releases(&gl->gl_lockref.lock)
 __acquires(&gl->gl_lockref.lock)
 {
 	const struct gfs2_glock_operations *glops = gl->gl_ops;
-	struct gfs2_holder *gh, *tmp;
+	struct gfs2_holder *gh, *tmp, *first_gh;
 	int ret;
 
+	first_gh = find_first_holder(gl);
+
 restart:
 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
 			continue;
-		if (may_grant(gl, gh)) {
+		if (may_grant(gl, first_gh, gh)) {
 			if (gh->gh_list.prev == &gl->gl_holders &&
 			    glops->go_lock) {
 				spin_unlock(&gl->gl_lockref.lock);
@@ -722,23 +755,6 @@ __acquires(&gl->gl_lockref.lock)
 	spin_lock(&gl->gl_lockref.lock);
 }
 
-/**
- * find_first_holder - find the first "holder" gh
- * @gl: the glock
- */
-
-static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
-{
-	struct gfs2_holder *gh;
-
-	if (!list_empty(&gl->gl_holders)) {
-		gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
-		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
-			return gh;
-	}
-	return NULL;
-}
-
 /**
  * run_queue - do all outstanding tasks related to a glock
  * @gl: The glock in question
@@ -1354,8 +1370,12 @@ __acquires(&gl->gl_lockref.lock)
 		GLOCK_BUG_ON(gl, true);
 
 	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
-		if (test_bit(GLF_LOCK, &gl->gl_flags))
-			try_futile = !may_grant(gl, gh);
+		if (test_bit(GLF_LOCK, &gl->gl_flags)) {
+			struct gfs2_holder *first_gh;
+
+			first_gh = find_first_holder(gl);
+			try_futile = !may_grant(gl, first_gh, gh);
+		}
 		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
 			goto fail;
 	}
-- 
2.26.3



  parent reply	other threads:[~2021-08-27 16:50 UTC|newest]

Thread overview: 309+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-27 16:49 [PATCH v7 00/19] gfs2: Fix mmap + page fault deadlocks Andreas Gruenbacher
2021-08-27 16:49 ` Andreas Gruenbacher
2021-08-27 16:49 ` [Cluster-devel] " Andreas Gruenbacher
2021-08-27 16:49 ` [Ocfs2-devel] " Andreas Gruenbacher
2021-08-27 16:49 ` [PATCH v7 01/19] iov_iter: Fix iov_iter_get_pages{,_alloc} page fault return value Andreas Gruenbacher
2021-08-27 16:49   ` [Cluster-devel] [PATCH v7 01/19] iov_iter: Fix iov_iter_get_pages{, _alloc} " Andreas Gruenbacher
2021-08-27 16:49   ` [Ocfs2-devel] " Andreas Gruenbacher
2021-09-09 11:09   ` [PATCH v7 01/19] iov_iter: Fix iov_iter_get_pages{,_alloc} " Christoph Hellwig
2021-09-09 11:09     ` [Cluster-devel] [PATCH v7 01/19] iov_iter: Fix iov_iter_get_pages{, _alloc} " Christoph Hellwig
2021-09-09 11:09     ` [Ocfs2-devel] " Christoph Hellwig
2021-08-27 16:49 ` [PATCH v7 02/19] powerpc/kvm: Fix kvm_use_magic_page Andreas Gruenbacher
2021-08-27 16:49   ` Andreas Gruenbacher
2021-08-27 16:49   ` [Cluster-devel] " Andreas Gruenbacher
2021-08-27 16:49   ` [Ocfs2-devel] " Andreas Gruenbacher
2021-08-27 16:49 ` [PATCH v7 03/19] gup: Turn fault_in_pages_{readable,writeable} into fault_in_{readable,writeable} Andreas Gruenbacher
2021-08-27 16:49   ` [Cluster-devel] [PATCH v7 03/19] gup: Turn fault_in_pages_{readable, writeable} into fault_in_{readable, writeable} Andreas Gruenbacher
2021-08-27 16:49   ` [Ocfs2-devel] " Andreas Gruenbacher
2021-08-27 19:08   ` [PATCH v7 03/19] gup: Turn fault_in_pages_{readable,writeable} into fault_in_{readable,writeable} Al Viro
2021-08-27 19:08     ` [Cluster-devel] [PATCH v7 03/19] gup: Turn fault_in_pages_{readable, writeable} into fault_in_{readable, writeable} Al Viro
2021-08-27 19:08     ` [Ocfs2-devel] " Al Viro
2021-09-03 14:56   ` [PATCH v7 03/19] gup: Turn fault_in_pages_{readable,writeable} into fault_in_{readable,writeable} Filipe Manana
2021-09-03 14:56     ` [Cluster-devel] [PATCH v7 03/19] gup: Turn fault_in_pages_{readable, writeable} into fault_in_{readable, writeable} Filipe Manana
2021-09-03 14:56     ` [Ocfs2-devel] " Filipe Manana
2021-09-28 15:02     ` [PATCH v7 03/19] gup: Turn fault_in_pages_{readable,writeable} into fault_in_{readable,writeable} Andreas Gruenbacher
2021-09-28 15:02       ` [Cluster-devel] [PATCH v7 03/19] gup: Turn fault_in_pages_{readable, writeable} into fault_in_{readable, writeable} Andreas Gruenbacher
2021-09-28 15:02       ` [Ocfs2-devel] " Andreas Gruenbacher
2021-09-28 16:37       ` [PATCH v7 03/19] gup: Turn fault_in_pages_{readable,writeable} into fault_in_{readable,writeable} Matthew Wilcox
2021-09-28 16:37         ` [Cluster-devel] [PATCH v7 03/19] gup: Turn fault_in_pages_{readable, writeable} into fault_in_{readable, writeable} Matthew Wilcox
2021-09-28 16:37         ` [Ocfs2-devel] " Matthew Wilcox
2021-09-28 20:41         ` [PATCH v7 03/19] gup: Turn fault_in_pages_{readable,writeable} into fault_in_{readable,writeable} Andreas Gruenbacher
2021-09-28 20:41           ` [Cluster-devel] [PATCH v7 03/19] gup: Turn fault_in_pages_{readable, writeable} into fault_in_{readable, writeable} Andreas Gruenbacher
2021-09-28 20:41           ` [Ocfs2-devel] " Andreas Gruenbacher
2021-08-27 16:49 ` [PATCH v7 04/19] iov_iter: Turn iov_iter_fault_in_readable into fault_in_iov_iter_readable Andreas Gruenbacher
2021-08-27 16:49   ` [Cluster-devel] " Andreas Gruenbacher
2021-08-27 16:49   ` [Ocfs2-devel] " Andreas Gruenbacher
2021-08-27 18:53   ` Al Viro
2021-08-27 18:53     ` [Cluster-devel] " Al Viro
2021-08-27 18:53     ` [Ocfs2-devel] " Al Viro
2021-08-27 18:57     ` Linus Torvalds
2021-08-27 18:57       ` [Cluster-devel] " Linus Torvalds
2021-08-27 18:57       ` [Ocfs2-devel] " Linus Torvalds
2021-08-27 19:16       ` Al Viro
2021-08-27 19:16         ` [Cluster-devel] " Al Viro
2021-08-27 19:16         ` [Ocfs2-devel] " Al Viro
2021-08-27 20:56   ` Kari Argillander
2021-08-27 20:56     ` [Cluster-devel] " Kari Argillander
2021-08-27 20:56     ` [Ocfs2-devel] " Kari Argillander
2021-08-27 21:05     ` Kari Argillander
2021-08-28 17:13     ` Linus Torvalds
2021-08-28 17:13       ` [Cluster-devel] " Linus Torvalds
2021-08-28 17:13       ` [Ocfs2-devel] " Linus Torvalds
2021-08-28 17:13       ` Linus Torvalds
2021-08-27 16:49 ` [PATCH v7 05/19] iov_iter: Introduce fault_in_iov_iter_writeable Andreas Gruenbacher
2021-08-27 16:49   ` [Cluster-devel] " Andreas Gruenbacher
2021-08-27 16:49   ` [Ocfs2-devel] " Andreas Gruenbacher
2021-08-27 18:49   ` Al Viro
2021-08-27 18:49     ` [Cluster-devel] " Al Viro
2021-08-27 18:49     ` [Ocfs2-devel] " Al Viro
2021-08-27 19:05     ` Linus Torvalds
2021-08-27 19:05       ` [Cluster-devel] " Linus Torvalds
2021-08-27 19:05       ` [Ocfs2-devel] " Linus Torvalds
2021-08-27 19:23       ` Al Viro
2021-08-27 19:23         ` [Cluster-devel] " Al Viro
2021-08-27 19:23         ` [Ocfs2-devel] " Al Viro
2021-08-27 19:33         ` Linus Torvalds
2021-08-27 19:33           ` [Cluster-devel] " Linus Torvalds
2021-08-27 19:33           ` [Ocfs2-devel] " Linus Torvalds
2021-08-27 19:37           ` Al Viro
2021-08-27 19:37             ` [Cluster-devel] " Al Viro
2021-08-27 19:37             ` [Ocfs2-devel] " Al Viro
2021-08-27 21:48             ` Al Viro
2021-08-27 21:48               ` [Cluster-devel] " Al Viro
2021-08-27 21:48               ` [Ocfs2-devel] " Al Viro
2021-08-27 21:57               ` Al Viro
2021-08-27 21:57                 ` [Cluster-devel] " Al Viro
2021-08-27 21:57                 ` [Ocfs2-devel] " Al Viro
2021-08-27 23:22                 ` Luck, Tony
2021-08-27 23:22                   ` [Cluster-devel] " Luck, Tony
2021-08-27 23:22                   ` [Ocfs2-devel] " Luck, Tony
2021-08-28  2:20                   ` Luck, Tony
2021-08-28  2:20                     ` [Cluster-devel] " Luck, Tony
2021-08-28  2:20                     ` [Ocfs2-devel] " Luck, Tony
2021-08-28 21:47                   ` Thomas Gleixner
2021-08-28 21:47                     ` [Cluster-devel] " Thomas Gleixner
2021-08-28 21:47                     ` [Ocfs2-devel] " Thomas Gleixner
2021-08-28 22:04                     ` Al Viro
2021-08-28 22:04                       ` [Cluster-devel] " Al Viro
2021-08-28 22:04                       ` [Ocfs2-devel] " Al Viro
2021-08-28 22:11                       ` Al Viro
2021-08-28 22:11                         ` [Cluster-devel] " Al Viro
2021-08-28 22:11                         ` [Ocfs2-devel] " Al Viro
2021-08-28 22:19                         ` Al Viro
2021-08-28 22:19                           ` [Cluster-devel] " Al Viro
2021-08-28 22:19                           ` [Ocfs2-devel] " Al Viro
2021-08-28 22:51                           ` Al Viro
2021-08-28 22:51                             ` [Cluster-devel] " Al Viro
2021-08-28 22:51                             ` Al Viro
2021-08-29 18:44                             ` Thomas Gleixner
2021-08-29 18:44                               ` [Cluster-devel] " Thomas Gleixner
2021-08-29 18:44                               ` [Ocfs2-devel] " Thomas Gleixner
2021-08-29 19:46                               ` Al Viro
2021-08-29 19:46                                 ` [Cluster-devel] " Al Viro
2021-08-29 19:46                                 ` [Ocfs2-devel] " Al Viro
2021-08-29 19:51                                 ` Thomas Gleixner
2021-08-29 19:51                                   ` [Cluster-devel] " Thomas Gleixner
2021-08-29 19:51                                   ` [Ocfs2-devel] " Thomas Gleixner
2021-08-28 22:20                         ` Tony Luck
2021-08-28 22:20                           ` [Cluster-devel] " Tony Luck
2021-08-28 22:20                           ` [Ocfs2-devel] " Tony Luck
2021-08-29  1:40                           ` Matthew Wilcox
2021-08-29  1:40                             ` [Cluster-devel] " Matthew Wilcox
2021-08-29  1:40                             ` [Ocfs2-devel] " Matthew Wilcox
2021-08-30 15:41                             ` Luck, Tony
2021-08-30 15:41                               ` [Cluster-devel] " Luck, Tony
2021-08-30 15:41                               ` [Ocfs2-devel] " Luck, Tony
2021-08-28 22:23                       ` Thomas Gleixner
2021-08-28 22:23                         ` [Cluster-devel] " Thomas Gleixner
2021-08-28 22:23                         ` [Ocfs2-devel] " Thomas Gleixner
2021-08-28 19:28               ` [RFC][arm64] possible infinite loop in btrfs search_ioctl() Al Viro
2021-08-28 19:28                 ` [Cluster-devel] " Al Viro
2021-08-28 19:28                 ` [Ocfs2-devel] " Al Viro
2021-08-31 13:54                 ` Catalin Marinas
2021-08-31 13:54                   ` [Cluster-devel] " Catalin Marinas
2021-08-31 13:54                   ` [Ocfs2-devel] " Catalin Marinas
2021-08-31 15:28                   ` Al Viro
2021-08-31 15:28                     ` [Cluster-devel] " Al Viro
2021-08-31 15:28                     ` [Ocfs2-devel] " Al Viro
2021-08-31 16:01                     ` Catalin Marinas
2021-08-31 16:01                       ` [Cluster-devel] " Catalin Marinas
2021-08-31 16:01                       ` [Ocfs2-devel] " Catalin Marinas
2021-10-11 17:37                     ` Catalin Marinas
2021-10-11 17:37                       ` [Cluster-devel] " Catalin Marinas
2021-10-11 17:37                       ` [Ocfs2-devel] " Catalin Marinas
2021-10-11 19:15                       ` Linus Torvalds
2021-10-11 19:15                         ` [Cluster-devel] " Linus Torvalds
2021-10-11 19:15                         ` [Ocfs2-devel] " Linus Torvalds
2021-10-11 21:08                         ` Catalin Marinas
2021-10-11 21:08                           ` [Cluster-devel] " Catalin Marinas
2021-10-11 21:08                           ` [Ocfs2-devel] " Catalin Marinas
2021-10-11 23:59                           ` Linus Torvalds
2021-10-11 23:59                             ` [Cluster-devel] " Linus Torvalds
2021-10-11 23:59                             ` [Ocfs2-devel] " Linus Torvalds
2021-10-12 17:27                             ` Catalin Marinas
2021-10-12 17:27                               ` [Cluster-devel] " Catalin Marinas
2021-10-12 17:27                               ` [Ocfs2-devel] " Catalin Marinas
2021-10-12 17:58                               ` Linus Torvalds
2021-10-12 17:58                                 ` [Cluster-devel] " Linus Torvalds
2021-10-12 17:58                                 ` [Ocfs2-devel] " Linus Torvalds
2021-10-18 17:13                                 ` Catalin Marinas
2021-10-18 17:13                                   ` [Cluster-devel] " Catalin Marinas
2021-10-18 17:13                                   ` Catalin Marinas
2021-10-21  0:46                             ` Andreas Gruenbacher
2021-10-21  0:46                               ` [Cluster-devel] " Andreas Gruenbacher
2021-10-21  0:46                               ` [Ocfs2-devel] " Andreas Gruenbacher
2021-10-21 10:05                               ` Catalin Marinas
2021-10-21 10:05                                 ` [Cluster-devel] " Catalin Marinas
2021-10-21 10:05                                 ` [Ocfs2-devel] " Catalin Marinas
2021-10-21 14:42                                 ` Andreas Gruenbacher
2021-10-21 14:42                                   ` [Cluster-devel] " Andreas Gruenbacher
2021-10-21 14:42                                   ` [Ocfs2-devel] " Andreas Gruenbacher
2021-10-21 17:09                                   ` Catalin Marinas
2021-10-21 17:09                                     ` [Cluster-devel] " Catalin Marinas
2021-10-21 17:09                                     ` [Ocfs2-devel] " Catalin Marinas
2021-10-21 18:00                                     ` Andreas Gruenbacher
2021-10-21 18:00                                       ` [Cluster-devel] " Andreas Gruenbacher
2021-10-21 18:00                                       ` [Ocfs2-devel] " Andreas Gruenbacher
2021-10-22 18:41                                       ` Catalin Marinas
2021-10-22 18:41                                         ` [Cluster-devel] " Catalin Marinas
2021-10-22 18:41                                         ` [Ocfs2-devel] " Catalin Marinas
2021-10-25 19:37                                         ` Andreas Gruenbacher
2021-10-25 19:37                                           ` [Cluster-devel] " Andreas Gruenbacher
2021-10-25 19:37                                           ` [Ocfs2-devel] " Andreas Gruenbacher
2021-10-22  2:30                                   ` Linus Torvalds
2021-10-22  2:30                                     ` [Cluster-devel] " Linus Torvalds
2021-10-22  2:30                                     ` [Ocfs2-devel] " Linus Torvalds
2021-10-22  9:34                                     ` Catalin Marinas
2021-10-22  9:34                                       ` [Cluster-devel] " Catalin Marinas
2021-10-22  9:34                                       ` [Ocfs2-devel] " Catalin Marinas
2021-08-29  0:58               ` [Ocfs2-devel] [PATCH v7 05/19] iov_iter: Introduce fault_in_iov_iter_writeable Al Viro
2021-08-29  0:58                 ` [Cluster-devel] " Al Viro
2021-08-29  0:58                 ` Al Viro
2021-08-27 16:49 ` [PATCH v7 06/19] gfs2: Add wrapper for iomap_file_buffered_write Andreas Gruenbacher
2021-08-27 16:49   ` [Cluster-devel] " Andreas Gruenbacher
2021-08-27 16:49   ` [Ocfs2-devel] " Andreas Gruenbacher
2021-08-27 16:49 ` Andreas Gruenbacher [this message]
2021-08-27 16:49   ` [Cluster-devel] [PATCH v7 07/19] gfs2: Clean up function may_grant Andreas Gruenbacher
2021-08-27 16:49   ` [Ocfs2-devel] " Andreas Gruenbacher
2021-08-27 16:49 ` [PATCH v7 08/19] gfs2: Eliminate vestigial HIF_FIRST Andreas Gruenbacher
2021-08-27 16:49   ` [Cluster-devel] " Andreas Gruenbacher
2021-08-27 16:49   ` [Ocfs2-devel] " Andreas Gruenbacher
2021-08-27 16:49 ` [PATCH v7 09/19] gfs2: Remove redundant check from gfs2_glock_dq Andreas Gruenbacher
2021-08-27 16:49   ` [Cluster-devel] " Andreas Gruenbacher
2021-08-27 16:49   ` [Ocfs2-devel] " Andreas Gruenbacher
2021-08-27 16:49 ` [PATCH v7 10/19] gfs2: Introduce flag for glock holder auto-demotion Andreas Gruenbacher
2021-08-27 16:49   ` [Cluster-devel] " Andreas Gruenbacher
2021-08-27 16:49   ` [Ocfs2-devel] " Andreas Gruenbacher
2021-08-27 16:49 ` [PATCH v7 11/19] gfs2: Move the inode glock locking to gfs2_file_buffered_write Andreas Gruenbacher
2021-08-27 16:49   ` [Cluster-devel] " Andreas Gruenbacher
2021-08-27 16:49   ` [Ocfs2-devel] " Andreas Gruenbacher
2021-08-27 16:49 ` [PATCH v7 12/19] gfs2: Eliminate ip->i_gh Andreas Gruenbacher
2021-08-27 16:49   ` [Cluster-devel] " Andreas Gruenbacher
2021-08-27 16:49   ` [Ocfs2-devel] " Andreas Gruenbacher
2021-08-27 16:49 ` [PATCH v7 13/19] gfs2: Fix mmap + page fault deadlocks for buffered I/O Andreas Gruenbacher
2021-08-27 16:49   ` [Cluster-devel] " Andreas Gruenbacher
2021-08-27 16:49   ` [Ocfs2-devel] " Andreas Gruenbacher
2021-08-27 16:49 ` [PATCH v7 14/19] iomap: Fix iomap_dio_rw return value for user copies Andreas Gruenbacher
2021-08-27 16:49   ` [Cluster-devel] " Andreas Gruenbacher
2021-08-27 16:49   ` [Ocfs2-devel] " Andreas Gruenbacher
2021-09-03 18:54   ` Darrick J. Wong
2021-09-03 18:54     ` [Cluster-devel] " Darrick J. Wong
2021-09-03 18:54     ` [Ocfs2-devel] " Darrick J. Wong
2021-09-09 11:17   ` Christoph Hellwig
2021-09-09 11:17     ` [Cluster-devel] " Christoph Hellwig
2021-09-09 11:17     ` [Ocfs2-devel] " Christoph Hellwig
2021-08-27 16:49 ` [PATCH v7 15/19] iomap: Support partial direct I/O on user copy failures Andreas Gruenbacher
2021-08-27 16:49   ` [Cluster-devel] " Andreas Gruenbacher
2021-08-27 16:49   ` [Ocfs2-devel] " Andreas Gruenbacher
2021-09-03 18:54   ` Darrick J. Wong
2021-09-03 18:54     ` [Cluster-devel] " Darrick J. Wong
2021-09-03 18:54     ` [Ocfs2-devel] " Darrick J. Wong
2021-09-09 11:20   ` Christoph Hellwig
2021-09-09 11:20     ` [Cluster-devel] " Christoph Hellwig
2021-09-09 11:20     ` [Ocfs2-devel] " Christoph Hellwig
2021-09-28 15:05     ` Andreas Gruenbacher
2021-09-28 15:05       ` [Cluster-devel] " Andreas Gruenbacher
2021-09-28 15:05       ` [Ocfs2-devel] " Andreas Gruenbacher
2021-08-27 16:49 ` [PATCH v7 16/19] iomap: Add done_before argument to iomap_dio_rw Andreas Gruenbacher
2021-08-27 16:49   ` [Cluster-devel] " Andreas Gruenbacher
2021-08-27 16:49   ` [Ocfs2-devel] " Andreas Gruenbacher
2021-08-27 18:30   ` Darrick J. Wong
2021-08-27 18:30     ` [Cluster-devel] " Darrick J. Wong
2021-08-27 18:30     ` [Ocfs2-devel] " Darrick J. Wong
2021-08-27 20:15     ` Andreas Gruenbacher
2021-08-27 20:15       ` [Cluster-devel] " Andreas Gruenbacher
2021-08-27 20:15       ` [Ocfs2-devel] " Andreas Gruenbacher
2021-08-27 21:32       ` Darrick J. Wong
2021-08-27 21:32         ` [Cluster-devel] " Darrick J. Wong
2021-08-27 21:32         ` [Ocfs2-devel] " Darrick J. Wong
2021-08-27 21:49         ` Andreas Grünbacher
2021-08-27 21:49           ` [Cluster-devel] " Andreas Grünbacher
2021-08-27 21:49           ` [Ocfs2-devel] " Andreas Grünbacher
2021-08-27 22:35         ` Linus Torvalds
2021-08-27 22:35           ` [Cluster-devel] " Linus Torvalds
2021-08-27 22:35           ` [Ocfs2-devel] " Linus Torvalds
2021-09-03 18:47           ` Darrick J. Wong
2021-09-03 18:47             ` [Cluster-devel] " Darrick J. Wong
2021-09-03 18:47             ` [Ocfs2-devel] " Darrick J. Wong
2021-09-03 18:53   ` Darrick J. Wong
2021-09-03 18:53     ` [Cluster-devel] " Darrick J. Wong
2021-09-03 18:53     ` [Ocfs2-devel] " Darrick J. Wong
2021-09-09 11:30   ` Christoph Hellwig
2021-09-09 11:30     ` [Cluster-devel] " Christoph Hellwig
2021-09-09 11:30     ` [Ocfs2-devel] " Christoph Hellwig
2021-09-09 17:22     ` Linus Torvalds
2021-09-09 17:22       ` [Cluster-devel] " Linus Torvalds
2021-09-09 17:22       ` [Ocfs2-devel] " Linus Torvalds
2021-09-10  7:36       ` Christoph Hellwig
2021-09-10  7:36         ` [Cluster-devel] " Christoph Hellwig
2021-09-10  7:36         ` [Ocfs2-devel] " Christoph Hellwig
2021-08-27 16:49 ` [PATCH v7 17/19] gup: Introduce FOLL_NOFAULT flag to disable page faults Andreas Gruenbacher
2021-08-27 16:49   ` [Cluster-devel] " Andreas Gruenbacher
2021-08-27 16:49   ` [Ocfs2-devel] " Andreas Gruenbacher
2021-09-09 11:36   ` Christoph Hellwig
2021-09-09 11:36     ` [Cluster-devel] " Christoph Hellwig
2021-09-09 11:36     ` [Ocfs2-devel] " Christoph Hellwig
2021-09-09 17:17     ` Linus Torvalds
2021-09-09 17:17       ` [Cluster-devel] " Linus Torvalds
2021-09-09 17:17       ` [Ocfs2-devel] " Linus Torvalds
2021-09-10  7:24       ` Christoph Hellwig
2021-09-10  7:24         ` [Cluster-devel] " Christoph Hellwig
2021-09-10  7:24         ` [Ocfs2-devel] " Christoph Hellwig
2021-08-27 16:49 ` [PATCH v7 18/19] iov_iter: Introduce nofault " Andreas Gruenbacher
2021-08-27 16:49   ` [Cluster-devel] " Andreas Gruenbacher
2021-08-27 16:49   ` [Ocfs2-devel] " Andreas Gruenbacher
2021-08-27 18:47   ` Al Viro
2021-08-27 18:47     ` [Cluster-devel] " Al Viro
2021-08-27 18:47     ` [Ocfs2-devel] " Al Viro
2021-08-27 19:56     ` Andreas Gruenbacher
2021-08-27 19:56       ` [Cluster-devel] " Andreas Gruenbacher
2021-08-27 19:56       ` [Ocfs2-devel] " Andreas Gruenbacher
2021-08-27 16:49 ` [PATCH v7 19/19] gfs2: Fix mmap + page fault deadlocks for direct I/O Andreas Gruenbacher
2021-08-27 16:49   ` [Cluster-devel] " Andreas Gruenbacher
2021-08-27 16:49   ` [Ocfs2-devel] " Andreas Gruenbacher
2021-08-27 17:16 ` [PATCH v7 00/19] gfs2: Fix mmap + page fault deadlocks Linus Torvalds
2021-08-27 17:16   ` Linus Torvalds
2021-08-27 17:16   ` [Cluster-devel] " Linus Torvalds
2021-08-27 17:16   ` [Ocfs2-devel] " Linus Torvalds
2021-09-01 19:52   ` Andreas Gruenbacher
2021-09-01 19:52     ` Andreas Gruenbacher
2021-09-01 19:52     ` [Cluster-devel] " Andreas Gruenbacher
2021-09-01 19:52     ` [Ocfs2-devel] " Andreas Gruenbacher
2021-09-03 15:52     ` Linus Torvalds
2021-09-03 15:52       ` Linus Torvalds
2021-09-03 15:52       ` [Cluster-devel] " Linus Torvalds
2021-09-03 15:52       ` [Ocfs2-devel] " Linus Torvalds
2021-09-03 18:25       ` Al Viro
2021-09-03 18:25         ` Al Viro
2021-09-03 18:25         ` [Cluster-devel] " Al Viro
2021-09-03 18:25         ` [Ocfs2-devel] " Al Viro
2021-09-03 18:47         ` Linus Torvalds
2021-09-03 18:47           ` Linus Torvalds
2021-09-03 18:47           ` [Cluster-devel] " Linus Torvalds
2021-09-03 18:47           ` [Ocfs2-devel] " Linus Torvalds
2021-09-03 19:51       ` Andreas Grünbacher
2021-09-03 19:51         ` [Cluster-devel] " Andreas Grünbacher
2021-09-03 15:07 ` Filipe Manana
2021-09-03 15:07   ` Filipe Manana
2021-09-03 15:07   ` [Cluster-devel] " Filipe Manana
2021-09-03 15:07   ` [Ocfs2-devel] " Filipe Manana

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210827164926.1726765-8-agruenba@redhat.com \
    --to=agruenba@redhat.com \
    --cc=cluster-devel@redhat.com \
    --cc=djwong@kernel.org \
    --cc=hch@infradead.org \
    --cc=jack@suse.cz \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=ocfs2-devel@oss.oracle.com \
    --cc=torvalds@linux-foundation.org \
    --cc=viro@zeniv.linux.org.uk \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.