All of lore.kernel.org
 help / color / mirror / Atom feed
From: xiubli@redhat.com
To: ceph-devel@vger.kernel.org, jlayton@kernel.org,
	idryomov@gmail.com, viro@zeniv.linux.org.uk
Cc: lhenriques@suse.de, mchangir@redhat.com,
	linux-kernel@vger.kernel.org, linux-fsdevel@vger.kernel.org,
	Xiubo Li <xiubli@redhat.com>,
	stable@vger.kernel.org
Subject: [PATCH 1/2 v2] ceph: add ceph_lock_info support for file_lock
Date: Mon, 14 Nov 2022 13:19:00 +0800	[thread overview]
Message-ID: <20221114051901.15371-2-xiubli@redhat.com> (raw)
In-Reply-To: <20221114051901.15371-1-xiubli@redhat.com>

From: Xiubo Li <xiubli@redhat.com>

When ceph releasing the file_lock it will try to get the inode pointer
from the fl->fl_file, which the memory could already be released by
another thread in filp_close(). Because in VFS layer the fl->fl_file
doesn't increase the file's reference counter.

Will switch to use ceph dedicate lock info to track the inode.

And in ceph_fl_release_lock() we should skip all the operations if
the fl->fl_u.ceph_fl.fl_inode is not set, which should come from
the request file_lock. And we will set fl->fl_u.ceph_fl.fl_inode when
inserting it to the inode lock list, which is when copying the lock.

Cc: stable@vger.kernel.org
URL: https://tracker.ceph.com/issues/57986
Signed-off-by: Xiubo Li <xiubli@redhat.com>
---
 fs/ceph/locks.c                 | 18 +++++++++++++++---
 include/linux/ceph/ceph_fs_fl.h | 26 ++++++++++++++++++++++++++
 include/linux/fs.h              |  2 ++
 3 files changed, 43 insertions(+), 3 deletions(-)
 create mode 100644 include/linux/ceph/ceph_fs_fl.h

diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 3e2843e86e27..d8385dd0076e 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -34,22 +34,34 @@ static void ceph_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
 {
 	struct ceph_file_info *fi = dst->fl_file->private_data;
 	struct inode *inode = file_inode(dst->fl_file);
+
 	atomic_inc(&ceph_inode(inode)->i_filelock_ref);
 	atomic_inc(&fi->num_locks);
+	dst->fl_u.ceph_fl.fl_inode = igrab(inode);
 }
 
 static void ceph_fl_release_lock(struct file_lock *fl)
 {
 	struct ceph_file_info *fi = fl->fl_file->private_data;
-	struct inode *inode = file_inode(fl->fl_file);
-	struct ceph_inode_info *ci = ceph_inode(inode);
-	atomic_dec(&fi->num_locks);
+	struct inode *inode = fl->fl_u.ceph_fl.fl_inode;
+	struct ceph_inode_info *ci;
+
+	/*
+	 * If inode is NULL it should be a request file_lock,
+	 * nothing we can do.
+	 */
+	if (!inode)
+		return;
+
+	ci = ceph_inode(inode);
 	if (atomic_dec_and_test(&ci->i_filelock_ref)) {
 		/* clear error when all locks are released */
 		spin_lock(&ci->i_ceph_lock);
 		ci->i_ceph_flags &= ~CEPH_I_ERROR_FILELOCK;
 		spin_unlock(&ci->i_ceph_lock);
 	}
+	iput(inode);
+	atomic_dec(&fi->num_locks);
 }
 
 static const struct file_lock_operations ceph_fl_lock_ops = {
diff --git a/include/linux/ceph/ceph_fs_fl.h b/include/linux/ceph/ceph_fs_fl.h
new file mode 100644
index 000000000000..02a412b26095
--- /dev/null
+++ b/include/linux/ceph/ceph_fs_fl.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ceph_fs.h - Ceph constants and data types to share between kernel and
+ * user space.
+ *
+ * Most types in this file are defined as little-endian, and are
+ * primarily intended to describe data structures that pass over the
+ * wire or that are stored on disk.
+ *
+ * LGPL2
+ */
+
+#ifndef CEPH_FS_FL_H
+#define CEPH_FS_FL_H
+
+#include <linux/fs.h>
+
+/*
+ * Ceph lock info
+ */
+
+struct ceph_lock_info {
+	struct inode *fl_inode;
+};
+
+#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e654435f1651..db4810d19e26 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1066,6 +1066,7 @@ bool opens_in_grace(struct net *);
 
 /* that will die - we need it for nfs_lock_info */
 #include <linux/nfs_fs_i.h>
+#include <linux/ceph/ceph_fs_fl.h>
 
 /*
  * struct file_lock represents a generic "file lock". It's used to represent
@@ -1119,6 +1120,7 @@ struct file_lock {
 			int state;		/* state of grant or error if -ve */
 			unsigned int	debug_id;
 		} afs;
+		struct ceph_lock_info	ceph_fl;
 	} fl_u;
 } __randomize_layout;
 
-- 
2.31.1


  reply	other threads:[~2022-11-14  5:20 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-14  5:18 [PATCH 0/2 v2] ceph: fix the use-after-free bug for file_lock xiubli
2022-11-14  5:19 ` xiubli [this message]
2022-11-14 11:24   ` [PATCH 1/2 v2] ceph: add ceph_lock_info support " Jeff Layton
2022-11-14 13:00     ` Xiubo Li
2022-11-14  5:19 ` [PATCH 2/2 v2] ceph: use a xarray to record all the opened files for each inode xiubli
2022-11-14  8:54   ` kernel test robot
2022-11-14 13:08     ` Xiubo Li
2022-11-14 13:39   ` Jeff Layton
2022-11-14 13:46     ` Xiubo Li

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221114051901.15371-2-xiubli@redhat.com \
    --to=xiubli@redhat.com \
    --cc=ceph-devel@vger.kernel.org \
    --cc=idryomov@gmail.com \
    --cc=jlayton@kernel.org \
    --cc=lhenriques@suse.de \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mchangir@redhat.com \
    --cc=stable@vger.kernel.org \
    --cc=viro@zeniv.linux.org.uk \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.