All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jeff Layton <jlayton@kernel.org>
To: ceph-devel@vger.kernel.org
Cc: zyan@redhat.com, sage@redhat.com, idryomov@gmail.com,
	pdonnell@redhat.com, xiubli@redhat.com
Subject: [RFC PATCH v2 09/10] ceph: cache layout in parent dir on first sync create
Date: Wed, 15 Jan 2020 15:59:11 -0500	[thread overview]
Message-ID: <20200115205912.38688-10-jlayton@kernel.org> (raw)
In-Reply-To: <20200115205912.38688-1-jlayton@kernel.org>

It doesn't do much good to do an asynchronous create unless we can do
I/O to it before the create reply comes in. That means we need layout
info the new file before we've gotten the response from the MDS.

All files created in a directory will initially inherit the same layout,
so copy off the requisite info from the first synchronous create in the
directory, and save it in a new i_cached_layout field. Zero out the
layout when we lose Dc caps in the dir.

Signed-off-by: Jeff Layton <jlayton@kernel.org>
---
 fs/ceph/caps.c  | 13 ++++++++++---
 fs/ceph/file.c  | 22 +++++++++++++++++++++-
 fs/ceph/inode.c |  2 ++
 fs/ceph/super.h |  1 +
 4 files changed, 34 insertions(+), 4 deletions(-)

diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 9d1a3d6831f7..70bf50f00c27 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -561,14 +561,14 @@ static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
 	spin_unlock(&mdsc->cap_delay_lock);
 }
 
-/*
- * Common issue checks for add_cap, handle_cap_grant.
- */
+/* Common issue checks for add_cap, handle_cap_grant. */
 static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
 			      unsigned issued)
 {
 	unsigned had = __ceph_caps_issued(ci, NULL);
 
+	lockdep_assert_held(&ci->i_ceph_lock);
+
 	/*
 	 * Each time we receive FILE_CACHE anew, we increment
 	 * i_rdcache_gen.
@@ -593,6 +593,13 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
 			__ceph_dir_clear_complete(ci);
 		}
 	}
+
+	/* Wipe saved layout if we're losing DIR_CREATE caps */
+	if (S_ISDIR(ci->vfs_inode.i_mode) && (had & CEPH_CAP_DIR_CREATE) &&
+		!(issued & CEPH_CAP_DIR_CREATE)) {
+	     ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
+	     memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
+	}
 }
 
 /*
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 1e6cdf2dfe90..b44ccbc85fe4 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -430,6 +430,23 @@ int ceph_open(struct inode *inode, struct file *file)
 	return err;
 }
 
+/* Clone the layout from a synchronous create, if the dir now has Dc caps */
+static void
+cache_file_layout(struct inode *dst, struct inode *src)
+{
+	struct ceph_inode_info *cdst = ceph_inode(dst);
+	struct ceph_inode_info *csrc = ceph_inode(src);
+
+	spin_lock(&cdst->i_ceph_lock);
+	if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) &&
+	    !ceph_file_layout_is_valid(&cdst->i_cached_layout)) {
+		memcpy(&cdst->i_cached_layout, &csrc->i_layout,
+			sizeof(cdst->i_cached_layout));
+		rcu_assign_pointer(cdst->i_cached_layout.pool_ns,
+				   ceph_try_get_string(csrc->i_layout.pool_ns));
+	}
+	spin_unlock(&cdst->i_ceph_lock);
+}
 
 /*
  * Do a lookup + open with a single request.  If we get a non-existent
@@ -518,7 +535,10 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
 	} else {
 		dout("atomic_open finish_open on dn %p\n", dn);
 		if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
-			ceph_init_inode_acls(d_inode(dentry), &as_ctx);
+			struct inode *newino = d_inode(dentry);
+
+			cache_file_layout(dir, newino);
+			ceph_init_inode_acls(newino, &as_ctx);
 			file->f_mode |= FMODE_CREATED;
 		}
 		err = finish_open(file, dentry, ceph_open);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 4056c7968b86..73f986efb1fd 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -447,6 +447,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
 	ci->i_max_files = 0;
 
 	memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
+	memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
 	RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
 
 	ci->i_fragtree = RB_ROOT;
@@ -587,6 +588,7 @@ void ceph_evict_inode(struct inode *inode)
 		ceph_buffer_put(ci->i_xattrs.prealloc_blob);
 
 	ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
+	ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
 }
 
 static inline blkcnt_t calc_inode_blocks(u64 size)
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 86dd4a2163e0..09bd4b71de91 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -326,6 +326,7 @@ struct ceph_inode_info {
 
 	struct ceph_dir_layout i_dir_layout;
 	struct ceph_file_layout i_layout;
+	struct ceph_file_layout i_cached_layout;	// for async creates
 	char *i_symlink;
 
 	/* for dirs */
-- 
2.24.1

  parent reply	other threads:[~2020-01-15 20:59 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-01-15 20:59 [RFC PATCH v2 00/10] ceph: asynchronous file create support Jeff Layton
2020-01-15 20:59 ` [RFC PATCH v2 01/10] libceph: export ceph_file_layout_is_valid Jeff Layton
2020-01-15 20:59 ` [RFC PATCH v2 02/10] ceph: make ceph_fill_inode non-static Jeff Layton
2020-01-15 20:59 ` [RFC PATCH v2 03/10] ceph: make dentry_lease_is_valid non-static Jeff Layton
2020-01-15 20:59 ` [RFC PATCH v2 04/10] ceph: make __take_cap_refs a public function Jeff Layton
2020-01-15 20:59 ` [RFC PATCH v2 05/10] ceph: decode interval_sets for delegated inos Jeff Layton
2020-01-16 14:32   ` Yan, Zheng
2020-01-16 15:37     ` Jeff Layton
2020-01-15 20:59 ` [RFC PATCH v2 06/10] ceph: add flag to designate that a request is asynchronous Jeff Layton
2020-01-15 20:59 ` [RFC PATCH v2 07/10] ceph: add infrastructure for waiting for async create to complete Jeff Layton
2020-01-17 15:00   ` Ilya Dryomov
2020-01-17 16:49     ` Jeff Layton
2020-01-15 20:59 ` [RFC PATCH v2 08/10] ceph: add new MDS req field to hold delegated inode number Jeff Layton
2020-01-17 14:47   ` Ilya Dryomov
2020-01-17 16:53     ` Jeff Layton
2020-01-17 17:42       ` Ilya Dryomov
2020-01-17 18:31         ` Jeff Layton
2020-01-20  9:41           ` Ilya Dryomov
2020-01-15 20:59 ` Jeff Layton [this message]
2020-01-15 20:59 ` [RFC PATCH v2 10/10] ceph: attempt to do async create when possible Jeff Layton
2020-01-16 15:09   ` Yan, Zheng
2020-01-16 16:21     ` Jeff Layton
2020-01-17 13:28   ` Yan, Zheng
2020-01-17 17:40     ` Jeff Layton
2020-01-18  2:42       ` Yan, Zheng
2020-01-15 21:30 ` [RFC PATCH v2 00/10] ceph: asynchronous file create support Jeff Layton
2020-01-16  8:57 ` Xiubo Li
2020-01-16 13:10 ` Yan, Zheng
2020-01-16 14:15   ` Jeff Layton
2020-01-20 13:20 ` Yan, Zheng
2020-01-21 10:56   ` Jeff Layton
2020-01-21 13:20     ` Yan, Zheng
2020-01-21 14:37       ` Jeff Layton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200115205912.38688-10-jlayton@kernel.org \
    --to=jlayton@kernel.org \
    --cc=ceph-devel@vger.kernel.org \
    --cc=idryomov@gmail.com \
    --cc=pdonnell@redhat.com \
    --cc=sage@redhat.com \
    --cc=xiubli@redhat.com \
    --cc=zyan@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.