All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/3] cifs: cache directory content for shroot
@ 2020-10-01 20:50 Ronnie Sahlberg
  2020-10-01 20:50 ` [PATCH 1/3] cifs: return cached_fid from open_shroot Ronnie Sahlberg
                   ` (3 more replies)
  0 siblings, 4 replies; 15+ messages in thread
From: Ronnie Sahlberg @ 2020-10-01 20:50 UTC (permalink / raw)
  To: linux-cifs; +Cc: Steve French

Steve, List

See initial implementation of a mechanism to cache the directory entries for
a shared cache handle (shroot).
We cache all the entries during the initial readdir() scan, using the context
from the vfs layer as the key to handle if there are multiple concurrent readir() scans
of the same directory.
Then if/when we have successfully cached the entire direcotry we will server any
subsequent readdir() from out of cache, avoinding making any query direcotry calls to the server.

As with all of shroot, the cache is kept until the direcotry lease is broken.


The first two patches are small and just a preparation for the third patch. They go as separate
patches to make review easier.
The third patch adds the actual meat of the dirent caching .


For now this might not be too exciting because the only cache the root handle.
I hope in the future we will expand the directory caching to handle any/many direcotries.


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 1/3] cifs: return cached_fid from open_shroot
  2020-10-01 20:50 [PATCH 0/3] cifs: cache directory content for shroot Ronnie Sahlberg
@ 2020-10-01 20:50 ` Ronnie Sahlberg
  2020-10-01 20:50 ` [PATCH 2/3] cifs: compute full_path already in cifs_readdir() Ronnie Sahlberg
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 15+ messages in thread
From: Ronnie Sahlberg @ 2020-10-01 20:50 UTC (permalink / raw)
  To: linux-cifs; +Cc: Steve French

Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com>
---
 fs/cifs/smb2inode.c | 11 ++++++-----
 fs/cifs/smb2ops.c   | 22 +++++++++++++++-------
 fs/cifs/smb2proto.h |  3 ++-
 3 files changed, 23 insertions(+), 13 deletions(-)

diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index eba01d0908dd..df6212e55e10 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -511,9 +511,9 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
 	int rc;
 	struct smb2_file_all_info *smb2_data;
 	__u32 create_options = 0;
-	struct cifs_fid fid;
 	bool no_cached_open = tcon->nohandlecache;
 	struct cifsFileInfo *cfile;
+	struct cached_fid *cfid = NULL;
 
 	*adjust_tz = false;
 	*symlink = false;
@@ -525,7 +525,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
 
 	/* If it is a root and its handle is cached then use it */
 	if (!strlen(full_path) && !no_cached_open) {
-		rc = open_shroot(xid, tcon, cifs_sb, &fid);
+		rc = open_shroot(xid, tcon, cifs_sb, &cfid);
 		if (rc)
 			goto out;
 
@@ -533,12 +533,13 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
 			move_smb2_info_to_cifs(data,
 					       &tcon->crfid.file_all_info);
 		} else {
-			rc = SMB2_query_info(xid, tcon, fid.persistent_fid,
-					     fid.volatile_fid, smb2_data);
+			rc = SMB2_query_info(xid, tcon,
+					     cfid->fid->persistent_fid,
+					     cfid->fid->volatile_fid, smb2_data);
 			if (!rc)
 				move_smb2_info_to_cifs(data, smb2_data);
 		}
-		close_shroot(&tcon->crfid);
+		close_shroot(cfid);
 		goto out;
 	}
 
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 32f90dc82c84..fd6c136066df 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -651,7 +651,8 @@ smb2_cached_lease_break(struct work_struct *work)
  * Open the directory at the root of a share
  */
 int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
-		struct cifs_sb_info *cifs_sb, struct cifs_fid *pfid)
+		struct cifs_sb_info *cifs_sb,
+		struct cached_fid **cfid)
 {
 	struct cifs_ses *ses = tcon->ses;
 	struct TCP_Server_Info *server = ses->server;
@@ -666,11 +667,12 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
 	int rc, flags = 0;
 	__le16 utf16_path = 0; /* Null - since an open of top of share */
 	u8 oplock = SMB2_OPLOCK_LEVEL_II;
+	struct cifs_fid *pfid;
 
 	mutex_lock(&tcon->crfid.fid_mutex);
 	if (tcon->crfid.is_valid) {
 		cifs_dbg(FYI, "found a cached root file handle\n");
-		memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
+		*cfid = &tcon->crfid;
 		kref_get(&tcon->crfid.refcount);
 		mutex_unlock(&tcon->crfid.fid_mutex);
 		return 0;
@@ -691,6 +693,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
 	if (!server->ops->new_lease_key)
 		return -EIO;
 
+	pfid = tcon->crfid.fid;
 	server->ops->new_lease_key(pfid);
 
 	memset(rqst, 0, sizeof(rqst));
@@ -820,6 +823,8 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
 	SMB2_query_info_free(&rqst[1]);
 	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
 	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+	if (rc == 0)
+		*cfid = &tcon->crfid;
 	return rc;
 }
 
@@ -833,6 +838,7 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
 	struct cifs_open_parms oparms;
 	struct cifs_fid fid;
 	bool no_cached_open = tcon->nohandlecache;
+	struct cached_fid *cfid = NULL;
 
 	oparms.tcon = tcon;
 	oparms.desired_access = FILE_READ_ATTRIBUTES;
@@ -841,12 +847,14 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
 	oparms.fid = &fid;
 	oparms.reconnect = false;
 
-	if (no_cached_open)
+	if (no_cached_open) {
 		rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
 			       NULL, NULL);
-	else
-		rc = open_shroot(xid, tcon, cifs_sb, &fid);
-
+	} else {
+		rc = open_shroot(xid, tcon, cifs_sb, &cfid);
+		if (rc == 0)
+			memcpy(&fid, cfid->fid, sizeof(struct cifs_fid));
+	}
 	if (rc)
 		return;
 
@@ -863,7 +871,7 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
 	if (no_cached_open)
 		SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
 	else
-		close_shroot(&tcon->crfid);
+		close_shroot(cfid);
 }
 
 static void
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 2f8ecbf54214..67c50d78caa1 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -70,7 +70,8 @@ extern int smb3_handle_read_data(struct TCP_Server_Info *server,
 				 struct mid_q_entry *mid);
 
 extern int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
-		       struct cifs_sb_info *cifs_sb, struct cifs_fid *pfid);
+		       struct cifs_sb_info *cifs_sb,
+		       struct cached_fid **cfid);
 extern void close_shroot(struct cached_fid *cfid);
 extern void close_shroot_lease(struct cached_fid *cfid);
 extern void close_shroot_lease_locked(struct cached_fid *cfid);
-- 
2.13.6


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 2/3] cifs: compute full_path already in cifs_readdir()
  2020-10-01 20:50 [PATCH 0/3] cifs: cache directory content for shroot Ronnie Sahlberg
  2020-10-01 20:50 ` [PATCH 1/3] cifs: return cached_fid from open_shroot Ronnie Sahlberg
@ 2020-10-01 20:50 ` Ronnie Sahlberg
  2020-10-01 20:50 ` [PATCH 3/3] cifs: cache the directory content for shroot Ronnie Sahlberg
  2020-10-02  5:07 ` [PATCH 0/3] cifs: cache " Steve French
  3 siblings, 0 replies; 15+ messages in thread
From: Ronnie Sahlberg @ 2020-10-01 20:50 UTC (permalink / raw)
  To: linux-cifs; +Cc: Steve French

Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com>
---
 fs/cifs/readdir.c | 30 ++++++++++++++++--------------
 1 file changed, 16 insertions(+), 14 deletions(-)

diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 6df0922e7e30..31a18aae5e64 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -360,11 +360,11 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
  */
 
 static int
-initiate_cifs_search(const unsigned int xid, struct file *file)
+initiate_cifs_search(const unsigned int xid, struct file *file,
+		     char *full_path)
 {
 	__u16 search_flags;
 	int rc = 0;
-	char *full_path = NULL;
 	struct cifsFileInfo *cifsFile;
 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
 	struct tcon_link *tlink = NULL;
@@ -400,12 +400,6 @@ initiate_cifs_search(const unsigned int xid, struct file *file)
 	cifsFile->invalidHandle = true;
 	cifsFile->srch_inf.endOfSearch = false;
 
-	full_path = build_path_from_dentry(file_dentry(file));
-	if (full_path == NULL) {
-		rc = -ENOMEM;
-		goto error_exit;
-	}
-
 	cifs_dbg(FYI, "Full path: %s start at: %lld\n", full_path, file->f_pos);
 
 ffirst_retry:
@@ -444,7 +438,6 @@ initiate_cifs_search(const unsigned int xid, struct file *file)
 		goto ffirst_retry;
 	}
 error_exit:
-	kfree(full_path);
 	cifs_put_tlink(tlink);
 	return rc;
 }
@@ -688,7 +681,8 @@ static int cifs_save_resume_key(const char *current_entry,
  */
 static int
 find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
-		struct file *file, char **current_entry, int *num_to_ret)
+		struct file *file, char *full_path,
+		char **current_entry, int *num_to_ret)
 {
 	__u16 search_flags;
 	int rc = 0;
@@ -741,7 +735,7 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
 						ntwrk_buf_start);
 			cfile->srch_inf.ntwrk_buf_start = NULL;
 		}
-		rc = initiate_cifs_search(xid, file);
+		rc = initiate_cifs_search(xid, file, full_path);
 		if (rc) {
 			cifs_dbg(FYI, "error %d reinitiating a search on rewind\n",
 				 rc);
@@ -925,15 +919,22 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
 	char *tmp_buf = NULL;
 	char *end_of_smb;
 	unsigned int max_len;
+	char *full_path = NULL;
 
 	xid = get_xid();
 
+	full_path = build_path_from_dentry(file_dentry(file));
+	if (full_path == NULL) {
+		rc = -ENOMEM;
+		goto rddir2_exit;
+	}
+
 	/*
 	 * Ensure FindFirst doesn't fail before doing filldir() for '.' and
 	 * '..'. Otherwise we won't be able to notify VFS in case of failure.
 	 */
 	if (file->private_data == NULL) {
-		rc = initiate_cifs_search(xid, file);
+		rc = initiate_cifs_search(xid, file, full_path);
 		cifs_dbg(FYI, "initiate cifs search rc %d\n", rc);
 		if (rc)
 			goto rddir2_exit;
@@ -960,8 +961,8 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
 	} */
 
 	tcon = tlink_tcon(cifsFile->tlink);
-	rc = find_cifs_entry(xid, tcon, ctx->pos, file, &current_entry,
-			     &num_to_fill);
+	rc = find_cifs_entry(xid, tcon, ctx->pos, file, full_path,
+			     &current_entry, &num_to_fill);
 	if (rc) {
 		cifs_dbg(FYI, "fce error %d\n", rc);
 		goto rddir2_exit;
@@ -1019,6 +1020,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
 	kfree(tmp_buf);
 
 rddir2_exit:
+	kfree(full_path);
 	free_xid(xid);
 	return rc;
 }
-- 
2.13.6


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 3/3] cifs: cache the directory content for shroot
  2020-10-01 20:50 [PATCH 0/3] cifs: cache directory content for shroot Ronnie Sahlberg
  2020-10-01 20:50 ` [PATCH 1/3] cifs: return cached_fid from open_shroot Ronnie Sahlberg
  2020-10-01 20:50 ` [PATCH 2/3] cifs: compute full_path already in cifs_readdir() Ronnie Sahlberg
@ 2020-10-01 20:50 ` Ronnie Sahlberg
  2020-10-01 22:24   ` Steve French
                     ` (2 more replies)
  2020-10-02  5:07 ` [PATCH 0/3] cifs: cache " Steve French
  3 siblings, 3 replies; 15+ messages in thread
From: Ronnie Sahlberg @ 2020-10-01 20:50 UTC (permalink / raw)
  To: linux-cifs; +Cc: Steve French

Add caching of the directory content for the shroot.
Populate the cache during the initial scan in readdir()
and then try to serve out of the cache for subsequent scans.

Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com>
---
 fs/cifs/cifsglob.h |  21 +++++++
 fs/cifs/misc.c     |   2 +
 fs/cifs/readdir.c  | 173 ++++++++++++++++++++++++++++++++++++++++++++++++++---
 fs/cifs/smb2ops.c  |  14 +++++
 4 files changed, 203 insertions(+), 7 deletions(-)

diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index b565d83ba89e..e8f2b4a642d4 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1073,6 +1073,26 @@ cap_unix(struct cifs_ses *ses)
 	return ses->server->vals->cap_unix & ses->capabilities;
 }
 
+struct cached_dirent {
+	struct list_head entry;
+	char *name;
+	int namelen;
+	loff_t pos;
+	u64 ino;
+	unsigned type;
+};
+
+struct cached_dirents {
+	bool is_valid:1;
+	bool is_failed:1;
+	struct dir_context *ctx; /* Only used to make sure we only take entries
+				  * from a single context. Never dereferenced.
+				  */
+	struct mutex de_mutex;
+	int pos;		 /* Expected ctx->pos */
+	struct list_head entries;
+};
+
 struct cached_fid {
 	bool is_valid:1;	/* Do we have a useable root fid */
 	bool file_all_info_is_valid:1;
@@ -1083,6 +1103,7 @@ struct cached_fid {
 	struct cifs_tcon *tcon;
 	struct work_struct lease_break;
 	struct smb2_file_all_info file_all_info;
+	struct cached_dirents dirents;
 };
 
 /*
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 1c14cf01dbef..a106bd3cc20b 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -124,6 +124,8 @@ tconInfoAlloc(void)
 		kfree(ret_buf);
 		return NULL;
 	}
+	INIT_LIST_HEAD(&ret_buf->crfid.dirents.entries);
+	mutex_init(&ret_buf->crfid.dirents.de_mutex);
 
 	atomic_inc(&tconInfoAllocCount);
 	ret_buf->tidStatus = CifsNew;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 31a18aae5e64..17861c3d2e08 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -811,9 +811,119 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
 	return rc;
 }
 
+static void init_cached_dirents(struct cached_dirents *cde,
+				struct dir_context *ctx)
+{
+	if (ctx->pos == 2 && cde->ctx == NULL) {
+		cde->ctx = ctx;
+		cde->pos = 2;
+	}
+}
+
+static bool emit_cached_dirents(struct cached_dirents *cde,
+				struct dir_context *ctx)
+{
+	struct list_head *tmp;
+	struct cached_dirent *dirent;
+	int rc;
+
+	list_for_each(tmp, &cde->entries) {
+		dirent = list_entry(tmp, struct cached_dirent, entry);
+		if (ctx->pos >= dirent->pos)
+			continue;
+		ctx->pos = dirent->pos;
+		rc = dir_emit(ctx, dirent->name, dirent->namelen,
+			      dirent->ino, dirent->type);
+		if (!rc)
+			return rc;
+	}
+	return true;
+}
+
+static void update_cached_dirents_count(struct cached_dirents *cde,
+					struct dir_context *ctx)
+{
+	if (cde->ctx != ctx)
+		return;
+	if (cde->is_valid || cde->is_failed)
+		return;
+
+	cde->pos++;
+}
+
+static void finished_cached_dirents_count(struct cached_dirents *cde,
+					struct dir_context *ctx)
+{
+	if (cde->ctx != ctx)
+		return;
+	if (cde->is_valid || cde->is_failed)
+		return;
+	if (ctx->pos != cde->pos)
+		return;
+
+	cde->is_valid = 1;
+}
+
+static void add_cached_dirent(struct cached_dirents *cde,
+			      struct dir_context *ctx,
+			      const char *name, int namelen,
+			      u64 ino, unsigned type)
+{
+	struct cached_dirent *de;
+
+	if (cde->ctx != ctx)
+		return;
+	if (cde->is_valid || cde->is_failed)
+		return;
+	if (ctx->pos != cde->pos) {
+		cde->is_failed = 1;
+		return;
+	}
+	de = kzalloc(sizeof(*de), GFP_ATOMIC);
+	if (de == NULL) {
+		cde->is_failed = 1;
+		return;
+	}
+	de->name = kzalloc(namelen, GFP_ATOMIC);
+	if (de->name == NULL) {
+		kfree(de);
+		cde->is_failed = 1;
+		return;
+	}
+	memcpy(de->name, name, namelen);
+	de->namelen = namelen;
+	de->pos = ctx->pos;
+	de->ino = ino;
+	de->type = type;
+
+	list_add_tail(&de->entry, &cde->entries);
+}
+
+static bool cifs_dir_emit(struct dir_context *ctx,
+			  const char *name, int namelen,
+			  u64 ino, unsigned type,
+			  struct cached_fid *cfid)
+{
+	bool rc;
+
+	rc = dir_emit(ctx, name, namelen, ino, type);
+	if (!rc)
+		return rc;
+
+	if (cfid) {
+		mutex_lock(&cfid->dirents.de_mutex);
+		add_cached_dirent(&cfid->dirents, ctx, name, namelen, ino,
+				  type);
+		mutex_unlock(&cfid->dirents.de_mutex);
+	}
+
+	return rc;
+}
+
 static int cifs_filldir(char *find_entry, struct file *file,
-		struct dir_context *ctx,
-		char *scratch_buf, unsigned int max_len)
+			struct dir_context *ctx,
+			char *scratch_buf, unsigned int max_len,
+			struct cached_fid *cfid)
 {
 	struct cifsFileInfo *file_info = file->private_data;
 	struct super_block *sb = file_inode(file)->i_sb;
@@ -903,10 +1013,10 @@ static int cifs_filldir(char *find_entry, struct file *file,
 	cifs_prime_dcache(file_dentry(file), &name, &fattr);
 
 	ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid);
-	return !dir_emit(ctx, name.name, name.len, ino, fattr.cf_dtype);
+	return !cifs_dir_emit(ctx, name.name, name.len, ino, fattr.cf_dtype,
+			      cfid);
 }
 
-
 int cifs_readdir(struct file *file, struct dir_context *ctx)
 {
 	int rc = 0;
@@ -920,6 +1030,8 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
 	char *end_of_smb;
 	unsigned int max_len;
 	char *full_path = NULL;
+	struct cached_fid *cfid = NULL;
+	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
 
 	xid = get_xid();
 
@@ -928,7 +1040,6 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
 		rc = -ENOMEM;
 		goto rddir2_exit;
 	}
-
 	/*
 	 * Ensure FindFirst doesn't fail before doing filldir() for '.' and
 	 * '..'. Otherwise we won't be able to notify VFS in case of failure.
@@ -949,6 +1060,31 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
 		if after then keep searching till find it */
 
 	cifsFile = file->private_data;
+	tcon = tlink_tcon(cifsFile->tlink);
+	if (tcon->ses->server->vals->header_preamble_size == 0 &&
+		!strcmp(full_path, "")) {
+		rc = open_shroot(xid, tcon, cifs_sb, &cfid);
+		if (rc)
+			goto cache_not_found;
+
+		mutex_lock(&cfid->dirents.de_mutex);
+		/* if this was reading from the start of the directory
+		 * we might need to initialize scanning and storing the
+		 * directory content.
+		 */
+		init_cached_dirents(&cfid->dirents, ctx);
+		/* If we already have the entire directory cached then
+		 * we cna just serve the cache.
+		 */
+		if (cfid->dirents.is_valid) {
+			emit_cached_dirents(&cfid->dirents, ctx);
+			mutex_unlock(&cfid->dirents.de_mutex);
+			goto rddir2_exit;
+		}
+		mutex_unlock(&cfid->dirents.de_mutex);
+	}
+ cache_not_found:
+
 	if (cifsFile->srch_inf.endOfSearch) {
 		if (cifsFile->srch_inf.emptyDir) {
 			cifs_dbg(FYI, "End of search, empty dir\n");
@@ -960,15 +1096,30 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
 		tcon->ses->server->close(xid, tcon, &cifsFile->fid);
 	} */
 
-	tcon = tlink_tcon(cifsFile->tlink);
+	/* Drop the cache while calling find_cifs_entry in case there will
+	 * be reconnects during query_directory.
+	 */
+	if (cfid) {
+		close_shroot(cfid);
+		cfid = NULL;
+	}
 	rc = find_cifs_entry(xid, tcon, ctx->pos, file, full_path,
 			     &current_entry, &num_to_fill);
+	if (tcon->ses->server->vals->header_preamble_size == 0 &&
+		!strcmp(full_path, "")) {
+		open_shroot(xid, tcon, cifs_sb, &cfid);
+	}
 	if (rc) {
 		cifs_dbg(FYI, "fce error %d\n", rc);
 		goto rddir2_exit;
 	} else if (current_entry != NULL) {
 		cifs_dbg(FYI, "entry %lld found\n", ctx->pos);
 	} else {
+		if (cfid) {
+			mutex_lock(&cfid->dirents.de_mutex);
+			finished_cached_dirents_count(&cfid->dirents, ctx);
+			mutex_unlock(&cfid->dirents.de_mutex);
+		}
 		cifs_dbg(FYI, "Could not find entry\n");
 		goto rddir2_exit;
 	}
@@ -998,7 +1149,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
 		 */
 		*tmp_buf = 0;
 		rc = cifs_filldir(current_entry, file, ctx,
-				  tmp_buf, max_len);
+				  tmp_buf, max_len, cfid);
 		if (rc) {
 			if (rc > 0)
 				rc = 0;
@@ -1006,6 +1157,12 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
 		}
 
 		ctx->pos++;
+		if (cfid) {
+			mutex_lock(&cfid->dirents.de_mutex);
+			update_cached_dirents_count(&cfid->dirents, ctx);
+			mutex_unlock(&cfid->dirents.de_mutex);
+		}
+
 		if (ctx->pos ==
 			cifsFile->srch_inf.index_of_last_entry) {
 			cifs_dbg(FYI, "last entry in buf at pos %lld %s\n",
@@ -1020,6 +1177,8 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
 	kfree(tmp_buf);
 
 rddir2_exit:
+	if (cfid)
+		close_shroot(cfid);
 	kfree(full_path);
 	free_xid(xid);
 	return rc;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index fd6c136066df..280464e21a5f 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -605,6 +605,8 @@ smb2_close_cached_fid(struct kref *ref)
 {
 	struct cached_fid *cfid = container_of(ref, struct cached_fid,
 					       refcount);
+	struct list_head *pos, *q;
+	struct cached_dirent *dirent;
 
 	if (cfid->is_valid) {
 		cifs_dbg(FYI, "clear cached root file handle\n");
@@ -613,6 +615,18 @@ smb2_close_cached_fid(struct kref *ref)
 		cfid->is_valid = false;
 		cfid->file_all_info_is_valid = false;
 		cfid->has_lease = false;
+		mutex_lock(&cfid->dirents.de_mutex);
+		list_for_each_safe(pos, q, &cfid->dirents.entries) {
+			dirent = list_entry(pos, struct cached_dirent, entry);
+			list_del(pos);
+			kfree(dirent->name);
+			kfree(dirent);
+		}
+		cfid->dirents.is_valid = 0;
+		cfid->dirents.is_failed = 0;
+		cfid->dirents.ctx = NULL;
+		cfid->dirents.pos = 0;
+		mutex_unlock(&cfid->dirents.de_mutex);
 	}
 }
 
-- 
2.13.6


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH 3/3] cifs: cache the directory content for shroot
  2020-10-01 20:50 ` [PATCH 3/3] cifs: cache the directory content for shroot Ronnie Sahlberg
@ 2020-10-01 22:24   ` Steve French
  2020-10-02 15:29   ` Aurélien Aptel
  2020-10-20 10:28   ` Shyam Prasad N
  2 siblings, 0 replies; 15+ messages in thread
From: Steve French @ 2020-10-01 22:24 UTC (permalink / raw)
  To: Ronnie Sahlberg; +Cc: linux-cifs

minor nit pointed out by checkpatch script

0003-cifs-cache-the-directory-content-for-shroot.patch
------------------------------------------------------
WARNING: Prefer 'unsigned int' to bare use of 'unsigned'
#33: FILE: fs/cifs/cifsglob.h:1082:
+ unsigned type;

WARNING: Prefer 'unsigned int' to bare use of 'unsigned'
#135: FILE: fs/cifs/readdir.c:870:
+       u64 ino, unsigned type)

WARNING: Prefer 'unsigned int' to bare use of 'unsigned'
#169: FILE: fs/cifs/readdir.c:904:
+   u64 ino, unsigned type,

total: 0 errors, 3 warnings, 305 lines checked

On Thu, Oct 1, 2020 at 3:50 PM Ronnie Sahlberg <lsahlber@redhat.com> wrote:
>
> Add caching of the directory content for the shroot.
> Populate the cache during the initial scan in readdir()
> and then try to serve out of the cache for subsequent scans.
>
> Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com>
> ---
>  fs/cifs/cifsglob.h |  21 +++++++
>  fs/cifs/misc.c     |   2 +
>  fs/cifs/readdir.c  | 173 ++++++++++++++++++++++++++++++++++++++++++++++++++---
>  fs/cifs/smb2ops.c  |  14 +++++
>  4 files changed, 203 insertions(+), 7 deletions(-)
>
> diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
> index b565d83ba89e..e8f2b4a642d4 100644
> --- a/fs/cifs/cifsglob.h
> +++ b/fs/cifs/cifsglob.h
> @@ -1073,6 +1073,26 @@ cap_unix(struct cifs_ses *ses)
>         return ses->server->vals->cap_unix & ses->capabilities;
>  }
>
> +struct cached_dirent {
> +       struct list_head entry;
> +       char *name;
> +       int namelen;
> +       loff_t pos;
> +       u64 ino;
> +       unsigned type;
> +};
> +
> +struct cached_dirents {
> +       bool is_valid:1;
> +       bool is_failed:1;
> +       struct dir_context *ctx; /* Only used to make sure we only take entries
> +                                 * from a single context. Never dereferenced.
> +                                 */
> +       struct mutex de_mutex;
> +       int pos;                 /* Expected ctx->pos */
> +       struct list_head entries;
> +};
> +
>  struct cached_fid {
>         bool is_valid:1;        /* Do we have a useable root fid */
>         bool file_all_info_is_valid:1;
> @@ -1083,6 +1103,7 @@ struct cached_fid {
>         struct cifs_tcon *tcon;
>         struct work_struct lease_break;
>         struct smb2_file_all_info file_all_info;
> +       struct cached_dirents dirents;
>  };
>
>  /*
> diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
> index 1c14cf01dbef..a106bd3cc20b 100644
> --- a/fs/cifs/misc.c
> +++ b/fs/cifs/misc.c
> @@ -124,6 +124,8 @@ tconInfoAlloc(void)
>                 kfree(ret_buf);
>                 return NULL;
>         }
> +       INIT_LIST_HEAD(&ret_buf->crfid.dirents.entries);
> +       mutex_init(&ret_buf->crfid.dirents.de_mutex);
>
>         atomic_inc(&tconInfoAllocCount);
>         ret_buf->tidStatus = CifsNew;
> diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
> index 31a18aae5e64..17861c3d2e08 100644
> --- a/fs/cifs/readdir.c
> +++ b/fs/cifs/readdir.c
> @@ -811,9 +811,119 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
>         return rc;
>  }
>
> +static void init_cached_dirents(struct cached_dirents *cde,
> +                               struct dir_context *ctx)
> +{
> +       if (ctx->pos == 2 && cde->ctx == NULL) {
> +               cde->ctx = ctx;
> +               cde->pos = 2;
> +       }
> +}
> +
> +static bool emit_cached_dirents(struct cached_dirents *cde,
> +                               struct dir_context *ctx)
> +{
> +       struct list_head *tmp;
> +       struct cached_dirent *dirent;
> +       int rc;
> +
> +       list_for_each(tmp, &cde->entries) {
> +               dirent = list_entry(tmp, struct cached_dirent, entry);
> +               if (ctx->pos >= dirent->pos)
> +                       continue;
> +               ctx->pos = dirent->pos;
> +               rc = dir_emit(ctx, dirent->name, dirent->namelen,
> +                             dirent->ino, dirent->type);
> +               if (!rc)
> +                       return rc;
> +       }
> +       return true;
> +}
> +
> +static void update_cached_dirents_count(struct cached_dirents *cde,
> +                                       struct dir_context *ctx)
> +{
> +       if (cde->ctx != ctx)
> +               return;
> +       if (cde->is_valid || cde->is_failed)
> +               return;
> +
> +       cde->pos++;
> +}
> +
> +static void finished_cached_dirents_count(struct cached_dirents *cde,
> +                                       struct dir_context *ctx)
> +{
> +       if (cde->ctx != ctx)
> +               return;
> +       if (cde->is_valid || cde->is_failed)
> +               return;
> +       if (ctx->pos != cde->pos)
> +               return;
> +
> +       cde->is_valid = 1;
> +}
> +
> +static void add_cached_dirent(struct cached_dirents *cde,
> +                             struct dir_context *ctx,
> +                             const char *name, int namelen,
> +                             u64 ino, unsigned type)
> +{
> +       struct cached_dirent *de;
> +
> +       if (cde->ctx != ctx)
> +               return;
> +       if (cde->is_valid || cde->is_failed)
> +               return;
> +       if (ctx->pos != cde->pos) {
> +               cde->is_failed = 1;
> +               return;
> +       }
> +       de = kzalloc(sizeof(*de), GFP_ATOMIC);
> +       if (de == NULL) {
> +               cde->is_failed = 1;
> +               return;
> +       }
> +       de->name = kzalloc(namelen, GFP_ATOMIC);
> +       if (de->name == NULL) {
> +               kfree(de);
> +               cde->is_failed = 1;
> +               return;
> +       }
> +       memcpy(de->name, name, namelen);
> +       de->namelen = namelen;
> +       de->pos = ctx->pos;
> +       de->ino = ino;
> +       de->type = type;
> +
> +       list_add_tail(&de->entry, &cde->entries);
> +}
> +
> +static bool cifs_dir_emit(struct dir_context *ctx,
> +                         const char *name, int namelen,
> +                         u64 ino, unsigned type,
> +                         struct cached_fid *cfid)
> +{
> +       bool rc;
> +
> +       rc = dir_emit(ctx, name, namelen, ino, type);
> +       if (!rc)
> +               return rc;
> +
> +       if (cfid) {
> +               mutex_lock(&cfid->dirents.de_mutex);
> +               add_cached_dirent(&cfid->dirents, ctx, name, namelen, ino,
> +                                 type);
> +               mutex_unlock(&cfid->dirents.de_mutex);
> +       }
> +
> +       return rc;
> +}
> +
>  static int cifs_filldir(char *find_entry, struct file *file,
> -               struct dir_context *ctx,
> -               char *scratch_buf, unsigned int max_len)
> +                       struct dir_context *ctx,
> +                       char *scratch_buf, unsigned int max_len,
> +                       struct cached_fid *cfid)
>  {
>         struct cifsFileInfo *file_info = file->private_data;
>         struct super_block *sb = file_inode(file)->i_sb;
> @@ -903,10 +1013,10 @@ static int cifs_filldir(char *find_entry, struct file *file,
>         cifs_prime_dcache(file_dentry(file), &name, &fattr);
>
>         ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid);
> -       return !dir_emit(ctx, name.name, name.len, ino, fattr.cf_dtype);
> +       return !cifs_dir_emit(ctx, name.name, name.len, ino, fattr.cf_dtype,
> +                             cfid);
>  }
>
> -
>  int cifs_readdir(struct file *file, struct dir_context *ctx)
>  {
>         int rc = 0;
> @@ -920,6 +1030,8 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>         char *end_of_smb;
>         unsigned int max_len;
>         char *full_path = NULL;
> +       struct cached_fid *cfid = NULL;
> +       struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
>
>         xid = get_xid();
>
> @@ -928,7 +1040,6 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>                 rc = -ENOMEM;
>                 goto rddir2_exit;
>         }
> -
>         /*
>          * Ensure FindFirst doesn't fail before doing filldir() for '.' and
>          * '..'. Otherwise we won't be able to notify VFS in case of failure.
> @@ -949,6 +1060,31 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>                 if after then keep searching till find it */
>
>         cifsFile = file->private_data;
> +       tcon = tlink_tcon(cifsFile->tlink);
> +       if (tcon->ses->server->vals->header_preamble_size == 0 &&
> +               !strcmp(full_path, "")) {
> +               rc = open_shroot(xid, tcon, cifs_sb, &cfid);
> +               if (rc)
> +                       goto cache_not_found;
> +
> +               mutex_lock(&cfid->dirents.de_mutex);
> +               /* if this was reading from the start of the directory
> +                * we might need to initialize scanning and storing the
> +                * directory content.
> +                */
> +               init_cached_dirents(&cfid->dirents, ctx);
> +               /* If we already have the entire directory cached then
> +                * we cna just serve the cache.
> +                */
> +               if (cfid->dirents.is_valid) {
> +                       emit_cached_dirents(&cfid->dirents, ctx);
> +                       mutex_unlock(&cfid->dirents.de_mutex);
> +                       goto rddir2_exit;
> +               }
> +               mutex_unlock(&cfid->dirents.de_mutex);
> +       }
> + cache_not_found:
> +
>         if (cifsFile->srch_inf.endOfSearch) {
>                 if (cifsFile->srch_inf.emptyDir) {
>                         cifs_dbg(FYI, "End of search, empty dir\n");
> @@ -960,15 +1096,30 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>                 tcon->ses->server->close(xid, tcon, &cifsFile->fid);
>         } */
>
> -       tcon = tlink_tcon(cifsFile->tlink);
> +       /* Drop the cache while calling find_cifs_entry in case there will
> +        * be reconnects during query_directory.
> +        */
> +       if (cfid) {
> +               close_shroot(cfid);
> +               cfid = NULL;
> +       }
>         rc = find_cifs_entry(xid, tcon, ctx->pos, file, full_path,
>                              &current_entry, &num_to_fill);
> +       if (tcon->ses->server->vals->header_preamble_size == 0 &&
> +               !strcmp(full_path, "")) {
> +               open_shroot(xid, tcon, cifs_sb, &cfid);
> +       }
>         if (rc) {
>                 cifs_dbg(FYI, "fce error %d\n", rc);
>                 goto rddir2_exit;
>         } else if (current_entry != NULL) {
>                 cifs_dbg(FYI, "entry %lld found\n", ctx->pos);
>         } else {
> +               if (cfid) {
> +                       mutex_lock(&cfid->dirents.de_mutex);
> +                       finished_cached_dirents_count(&cfid->dirents, ctx);
> +                       mutex_unlock(&cfid->dirents.de_mutex);
> +               }
>                 cifs_dbg(FYI, "Could not find entry\n");
>                 goto rddir2_exit;
>         }
> @@ -998,7 +1149,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>                  */
>                 *tmp_buf = 0;
>                 rc = cifs_filldir(current_entry, file, ctx,
> -                                 tmp_buf, max_len);
> +                                 tmp_buf, max_len, cfid);
>                 if (rc) {
>                         if (rc > 0)
>                                 rc = 0;
> @@ -1006,6 +1157,12 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>                 }
>
>                 ctx->pos++;
> +               if (cfid) {
> +                       mutex_lock(&cfid->dirents.de_mutex);
> +                       update_cached_dirents_count(&cfid->dirents, ctx);
> +                       mutex_unlock(&cfid->dirents.de_mutex);
> +               }
> +
>                 if (ctx->pos ==
>                         cifsFile->srch_inf.index_of_last_entry) {
>                         cifs_dbg(FYI, "last entry in buf at pos %lld %s\n",
> @@ -1020,6 +1177,8 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>         kfree(tmp_buf);
>
>  rddir2_exit:
> +       if (cfid)
> +               close_shroot(cfid);
>         kfree(full_path);
>         free_xid(xid);
>         return rc;
> diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
> index fd6c136066df..280464e21a5f 100644
> --- a/fs/cifs/smb2ops.c
> +++ b/fs/cifs/smb2ops.c
> @@ -605,6 +605,8 @@ smb2_close_cached_fid(struct kref *ref)
>  {
>         struct cached_fid *cfid = container_of(ref, struct cached_fid,
>                                                refcount);
> +       struct list_head *pos, *q;
> +       struct cached_dirent *dirent;
>
>         if (cfid->is_valid) {
>                 cifs_dbg(FYI, "clear cached root file handle\n");
> @@ -613,6 +615,18 @@ smb2_close_cached_fid(struct kref *ref)
>                 cfid->is_valid = false;
>                 cfid->file_all_info_is_valid = false;
>                 cfid->has_lease = false;
> +               mutex_lock(&cfid->dirents.de_mutex);
> +               list_for_each_safe(pos, q, &cfid->dirents.entries) {
> +                       dirent = list_entry(pos, struct cached_dirent, entry);
> +                       list_del(pos);
> +                       kfree(dirent->name);
> +                       kfree(dirent);
> +               }
> +               cfid->dirents.is_valid = 0;
> +               cfid->dirents.is_failed = 0;
> +               cfid->dirents.ctx = NULL;
> +               cfid->dirents.pos = 0;
> +               mutex_unlock(&cfid->dirents.de_mutex);
>         }
>  }
>
> --
> 2.13.6
>


-- 
Thanks,

Steve

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 0/3] cifs: cache directory content for shroot
  2020-10-01 20:50 [PATCH 0/3] cifs: cache directory content for shroot Ronnie Sahlberg
                   ` (2 preceding siblings ...)
  2020-10-01 20:50 ` [PATCH 3/3] cifs: cache the directory content for shroot Ronnie Sahlberg
@ 2020-10-02  5:07 ` Steve French
  2020-10-02  8:04   ` ronnie sahlberg
  3 siblings, 1 reply; 15+ messages in thread
From: Steve French @ 2020-10-02  5:07 UTC (permalink / raw)
  To: Ronnie Sahlberg; +Cc: linux-cifs

My initial test of this was to Windows 10, doing ls of the root directory.

During mount as expected I see the initial compounded smb3.1.1
create/query(file_all_info) work and get a directory lease (read
lease) on the root directory

Then doing the ls
I see the expected ls of the root directory query dir return
successfully, but it is a compounded open/query-dir (no lease
requested) not a query dir using the already open root file handle.
We should find a way to allow it to use the root file handle if
possible although perhaps less important when the caching code is
fully working as it will only be requested once.

The next ls though does an open/query but then does stat of all the
files (which is bad, lots of compounded open/query-info/close).  Then
the next ls will do open/query-dir

So the patch set is promising but currently isn't caching the root
directory contents in a way that is recognized by the subsequent ls
commands.   I will try to look at this more this weekend - but let me
know if updated version of the patchset - it will be very, very useful
when we can get this working - very exciting.

On Thu, Oct 1, 2020 at 3:50 PM Ronnie Sahlberg <lsahlber@redhat.com> wrote:
>
> Steve, List
>
> See initial implementation of a mechanism to cache the directory entries for
> a shared cache handle (shroot).
> We cache all the entries during the initial readdir() scan, using the context
> from the vfs layer as the key to handle if there are multiple concurrent readir() scans
> of the same directory.
> Then if/when we have successfully cached the entire direcotry we will server any
> subsequent readdir() from out of cache, avoinding making any query direcotry calls to the server.
>
> As with all of shroot, the cache is kept until the direcotry lease is broken.
>
>
> The first two patches are small and just a preparation for the third patch. They go as separate
> patches to make review easier.
> The third patch adds the actual meat of the dirent caching .
>
>
> For now this might not be too exciting because the only cache the root handle.
> I hope in the future we will expand the directory caching to handle any/many direcotries.
>


-- 
Thanks,

Steve

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 0/3] cifs: cache directory content for shroot
  2020-10-02  5:07 ` [PATCH 0/3] cifs: cache " Steve French
@ 2020-10-02  8:04   ` ronnie sahlberg
  2020-10-02  8:07     ` ronnie sahlberg
  0 siblings, 1 reply; 15+ messages in thread
From: ronnie sahlberg @ 2020-10-02  8:04 UTC (permalink / raw)
  To: Steve French; +Cc: Ronnie Sahlberg, linux-cifs

On Fri, Oct 2, 2020 at 3:08 PM Steve French <smfrench@gmail.com> wrote:
>
> My initial test of this was to Windows 10, doing ls of the root directory.
>
> During mount as expected I see the initial compounded smb3.1.1
> create/query(file_all_info) work and get a directory lease (read
> lease) on the root directory
>
> Then doing the ls
> I see the expected ls of the root directory query dir return
> successfully, but it is a compounded open/query-dir (no lease
> requested) not a query dir using the already open root file handle.
> We should find a way to allow it to use the root file handle if
> possible although perhaps less important when the caching code is
> fully working as it will only be requested once.

That is something we can improve as aesthetics. Yes, we should try to
use already open directory handles if ones are available.
Right now this is probably low priority as we only cache the root directory.
When we expand this to other directories as well,   maybe cashe with a
lease the last n directories? and not just ""
we can do this and likely see improvements.

>
> The next ls though does an open/query but then does stat of all the
> files (which is bad, lots of compounded open/query-info/close).  Then
> the next ls will do open/query-dir

I don't think we can avoid this. The directory lease AFAIK only
triggers and breaks on when the directory itself is modified,
i.e. dirents are added/deleted/renamed   but not is pre-existing
direntries have changes to their inodes.

I.e. does a directory lease break just because an existing file in it
was extended? Does the lease break if an immediate subdirectory have
files added/removed, i.e. st_nlink changes, not for the directory we
have a lease on but a subdirectory where we do not  have a lease?
>
> So the patch set is promising but currently isn't caching the root
> directory contents in a way that is recognized by the subsequent ls
> commands.   I will try to look at this more this weekend - but let me
> know if updated version of the patchset - it will be very, very useful
> when we can get this working - very exciting.
>
> On Thu, Oct 1, 2020 at 3:50 PM Ronnie Sahlberg <lsahlber@redhat.com> wrote:
> >
> > Steve, List
> >
> > See initial implementation of a mechanism to cache the directory entries for
> > a shared cache handle (shroot).
> > We cache all the entries during the initial readdir() scan, using the context
> > from the vfs layer as the key to handle if there are multiple concurrent readir() scans
> > of the same directory.
> > Then if/when we have successfully cached the entire direcotry we will server any
> > subsequent readdir() from out of cache, avoinding making any query direcotry calls to the server.
> >
> > As with all of shroot, the cache is kept until the direcotry lease is broken.
> >
> >
> > The first two patches are small and just a preparation for the third patch. They go as separate
> > patches to make review easier.
> > The third patch adds the actual meat of the dirent caching .
> >
> >
> > For now this might not be too exciting because the only cache the root handle.
> > I hope in the future we will expand the directory caching to handle any/many direcotries.
> >
>
>
> --
> Thanks,
>
> Steve

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 0/3] cifs: cache directory content for shroot
  2020-10-02  8:04   ` ronnie sahlberg
@ 2020-10-02  8:07     ` ronnie sahlberg
  2020-10-02 14:13       ` Steve French
  0 siblings, 1 reply; 15+ messages in thread
From: ronnie sahlberg @ 2020-10-02  8:07 UTC (permalink / raw)
  To: Steve French; +Cc: Ronnie Sahlberg, linux-cifs

On Fri, Oct 2, 2020 at 6:04 PM ronnie sahlberg <ronniesahlberg@gmail.com> wrote:
>
> On Fri, Oct 2, 2020 at 3:08 PM Steve French <smfrench@gmail.com> wrote:
> >
> > My initial test of this was to Windows 10, doing ls of the root directory.
> >
> > During mount as expected I see the initial compounded smb3.1.1
> > create/query(file_all_info) work and get a directory lease (read
> > lease) on the root directory
> >
> > Then doing the ls
> > I see the expected ls of the root directory query dir return
> > successfully, but it is a compounded open/query-dir (no lease
> > requested) not a query dir using the already open root file handle.
> > We should find a way to allow it to use the root file handle if
> > possible although perhaps less important when the caching code is
> > fully working as it will only be requested once.
>
> That is something we can improve as aesthetics. Yes, we should try to
> use already open directory handles if ones are available.
> Right now this is probably low priority as we only cache the root directory.
> When we expand this to other directories as well,   maybe cashe with a
> lease the last n directories? and not just ""
> we can do this and likely see improvements.
>
> >
> > The next ls though does an open/query but then does stat of all the
> > files (which is bad, lots of compounded open/query-info/close).  Then
> > the next ls will do open/query-dir
>
> I don't think we can avoid this. The directory lease AFAIK only
> triggers and breaks on when the directory itself is modified,
> i.e. dirents are added/deleted/renamed   but not is pre-existing
> direntries have changes to their inodes.
>
> I.e. does a directory lease break just because an existing file in it
> was extended? Does the lease break if an immediate subdirectory have
> files added/removed, i.e. st_nlink changes, not for the directory we
> have a lease on but a subdirectory where we do not  have a lease?

I mean, even with directory caching ls -l would still need to stat()
each dirent ?

> >
> > So the patch set is promising but currently isn't caching the root
> > directory contents in a way that is recognized by the subsequent ls
> > commands.   I will try to look at this more this weekend - but let me
> > know if updated version of the patchset - it will be very, very useful
> > when we can get this working - very exciting.
> >
> > On Thu, Oct 1, 2020 at 3:50 PM Ronnie Sahlberg <lsahlber@redhat.com> wrote:
> > >
> > > Steve, List
> > >
> > > See initial implementation of a mechanism to cache the directory entries for
> > > a shared cache handle (shroot).
> > > We cache all the entries during the initial readdir() scan, using the context
> > > from the vfs layer as the key to handle if there are multiple concurrent readir() scans
> > > of the same directory.
> > > Then if/when we have successfully cached the entire direcotry we will server any
> > > subsequent readdir() from out of cache, avoinding making any query direcotry calls to the server.
> > >
> > > As with all of shroot, the cache is kept until the direcotry lease is broken.
> > >
> > >
> > > The first two patches are small and just a preparation for the third patch. They go as separate
> > > patches to make review easier.
> > > The third patch adds the actual meat of the dirent caching .
> > >
> > >
> > > For now this might not be too exciting because the only cache the root handle.
> > > I hope in the future we will expand the directory caching to handle any/many direcotries.
> > >
> >
> >
> > --
> > Thanks,
> >
> > Steve

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 0/3] cifs: cache directory content for shroot
  2020-10-02  8:07     ` ronnie sahlberg
@ 2020-10-02 14:13       ` Steve French
  0 siblings, 0 replies; 15+ messages in thread
From: Steve French @ 2020-10-02 14:13 UTC (permalink / raw)
  To: ronnie sahlberg; +Cc: Ronnie Sahlberg, linux-cifs

query dir is doing a FIND_ID_FULL_DIRECTORY_INFO and stat is doing a
FILE_ALL_INFO?  What are we missing from the query dir response that
we would get from FILE_ALL_INFO in the query info one by one?

On Fri, Oct 2, 2020 at 3:07 AM ronnie sahlberg <ronniesahlberg@gmail.com> wrote:
>
> On Fri, Oct 2, 2020 at 6:04 PM ronnie sahlberg <ronniesahlberg@gmail.com> wrote:
> >
> > On Fri, Oct 2, 2020 at 3:08 PM Steve French <smfrench@gmail.com> wrote:
> > >
> > > My initial test of this was to Windows 10, doing ls of the root directory.
> > >
> > > During mount as expected I see the initial compounded smb3.1.1
> > > create/query(file_all_info) work and get a directory lease (read
> > > lease) on the root directory
> > >
> > > Then doing the ls
> > > I see the expected ls of the root directory query dir return
> > > successfully, but it is a compounded open/query-dir (no lease
> > > requested) not a query dir using the already open root file handle.
> > > We should find a way to allow it to use the root file handle if
> > > possible although perhaps less important when the caching code is
> > > fully working as it will only be requested once.
> >
> > That is something we can improve as aesthetics. Yes, we should try to
> > use already open directory handles if ones are available.
> > Right now this is probably low priority as we only cache the root directory.
> > When we expand this to other directories as well,   maybe cashe with a
> > lease the last n directories? and not just ""
> > we can do this and likely see improvements.
> >
> > >
> > > The next ls though does an open/query but then does stat of all the
> > > files (which is bad, lots of compounded open/query-info/close).  Then
> > > the next ls will do open/query-dir
> >
> > I don't think we can avoid this. The directory lease AFAIK only
> > triggers and breaks on when the directory itself is modified,
> > i.e. dirents are added/deleted/renamed   but not is pre-existing
> > direntries have changes to their inodes.
> >
> > I.e. does a directory lease break just because an existing file in it
> > was extended? Does the lease break if an immediate subdirectory have
> > files added/removed, i.e. st_nlink changes, not for the directory we
> > have a lease on but a subdirectory where we do not  have a lease?
>
> I mean, even with directory caching ls -l would still need to stat()
> each dirent ?
>
> > >
> > > So the patch set is promising but currently isn't caching the root
> > > directory contents in a way that is recognized by the subsequent ls
> > > commands.   I will try to look at this more this weekend - but let me
> > > know if updated version of the patchset - it will be very, very useful
> > > when we can get this working - very exciting.
> > >
> > > On Thu, Oct 1, 2020 at 3:50 PM Ronnie Sahlberg <lsahlber@redhat.com> wrote:
> > > >
> > > > Steve, List
> > > >
> > > > See initial implementation of a mechanism to cache the directory entries for
> > > > a shared cache handle (shroot).
> > > > We cache all the entries during the initial readdir() scan, using the context
> > > > from the vfs layer as the key to handle if there are multiple concurrent readir() scans
> > > > of the same directory.
> > > > Then if/when we have successfully cached the entire direcotry we will server any
> > > > subsequent readdir() from out of cache, avoinding making any query direcotry calls to the server.
> > > >
> > > > As with all of shroot, the cache is kept until the direcotry lease is broken.
> > > >
> > > >
> > > > The first two patches are small and just a preparation for the third patch. They go as separate
> > > > patches to make review easier.
> > > > The third patch adds the actual meat of the dirent caching .
> > > >
> > > >
> > > > For now this might not be too exciting because the only cache the root handle.
> > > > I hope in the future we will expand the directory caching to handle any/many direcotries.
> > > >
> > >
> > >
> > > --
> > > Thanks,
> > >
> > > Steve



-- 
Thanks,

Steve

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 3/3] cifs: cache the directory content for shroot
  2020-10-01 20:50 ` [PATCH 3/3] cifs: cache the directory content for shroot Ronnie Sahlberg
  2020-10-01 22:24   ` Steve French
@ 2020-10-02 15:29   ` Aurélien Aptel
  2020-10-04 23:19     ` ronnie sahlberg
  2020-10-20 10:28   ` Shyam Prasad N
  2 siblings, 1 reply; 15+ messages in thread
From: Aurélien Aptel @ 2020-10-02 15:29 UTC (permalink / raw)
  To: Ronnie Sahlberg, linux-cifs; +Cc: Steve French

Hi Ronnie,

The more we isolate code with clear opaque interfaces, the less we have
to understand about their low-level implementation.

I was thinking we could move all dir cache related code to a new dcache.c
file or something like that.

One question: when does the cache gets cleared? We dont want to have the
same stale content every time we query the root. Should it be time based?

Comments on code below:

Ronnie Sahlberg <lsahlber@redhat.com> writes:
>  fs/cifs/cifsglob.h |  21 +++++++
>  fs/cifs/misc.c     |   2 +
>  fs/cifs/readdir.c  | 173 ++++++++++++++++++++++++++++++++++++++++++++++++++---
>  fs/cifs/smb2ops.c  |  14 +++++
>  4 files changed, 203 insertions(+), 7 deletions(-)


>
> diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
> index b565d83ba89e..e8f2b4a642d4 100644
> --- a/fs/cifs/cifsglob.h
> +++ b/fs/cifs/cifsglob.h
> @@ -1073,6 +1073,26 @@ cap_unix(struct cifs_ses *ses)
>  	return ses->server->vals->cap_unix & ses->capabilities;
>  }
>  
> +struct cached_dirent {
> +	struct list_head entry;
> +	char *name;
> +	int namelen;
> +	loff_t pos;
> +	u64 ino;
> +	unsigned type;
> +};
> +
> +struct cached_dirents {
> +	bool is_valid:1;
> +	bool is_failed:1;
> +	struct dir_context *ctx; /* Only used to make sure we only take entries
> +				  * from a single context. Never dereferenced.
> +				  */

This type of comments are not in the kernel coding style. First comment
line with "/*" shouldnt have text.

> +	struct mutex de_mutex;
> +	int pos;		 /* Expected ctx->pos */
> +	struct list_head entries;
> +};
> +
>  struct cached_fid {
>  	bool is_valid:1;	/* Do we have a useable root fid */
>  	bool file_all_info_is_valid:1;
> @@ -1083,6 +1103,7 @@ struct cached_fid {
>  	struct cifs_tcon *tcon;
>  	struct work_struct lease_break;
>  	struct smb2_file_all_info file_all_info;
> +	struct cached_dirents dirents;
>  };

>  	}
> +	INIT_LIST_HEAD(&ret_buf->crfid.dirents.entries);
> +	mutex_init(&ret_buf->crfid.dirents.de_mutex);
>  
>  	atomic_inc(&tconInfoAllocCount);
>  	ret_buf->tidStatus = CifsNew;
> diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
> index 31a18aae5e64..17861c3d2e08 100644
> --- a/fs/cifs/readdir.c
> +++ b/fs/cifs/readdir.c
> @@ -811,9 +811,119 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
>  	return rc;
>  }
>  
> +static void init_cached_dirents(struct cached_dirents *cde,
> +				struct dir_context *ctx)
> +{
> +	if (ctx->pos == 2 && cde->ctx == NULL) {
> +		cde->ctx = ctx;
> +		cde->pos = 2;
> +	}
> +}

2 is for "." and ".."? Can you add more comments to document this?

Can you document which functions are expecting to be called with a lock
held?

> +
> +static bool emit_cached_dirents(struct cached_dirents *cde,
> +				struct dir_context *ctx)
> +{
> +	struct list_head *tmp;
> +	struct cached_dirent *dirent;
> +	int rc;
> +
> +	list_for_each(tmp, &cde->entries) {
> +		dirent = list_entry(tmp, struct cached_dirent, entry);
> +		if (ctx->pos >= dirent->pos)
> +			continue;
> +		ctx->pos = dirent->pos;
> +		rc = dir_emit(ctx, dirent->name, dirent->namelen,
> +			      dirent->ino, dirent->type);
> +		if (!rc)
> +			return rc;
> +	}
> +	return true;
> +}
> +
> +static void update_cached_dirents_count(struct cached_dirents *cde,
> +					struct dir_context *ctx)
> +{
> +	if (cde->ctx != ctx)
> +		return;
> +	if (cde->is_valid || cde->is_failed)
> +		return;
> +
> +	cde->pos++;
> +}
> +
> +static void finished_cached_dirents_count(struct cached_dirents *cde,
> +					struct dir_context *ctx)
> +{
> +	if (cde->ctx != ctx)
> +		return;
> +	if (cde->is_valid || cde->is_failed)
> +		return;
> +	if (ctx->pos != cde->pos)
> +		return;
> +
> +	cde->is_valid = 1;
> +}
> +
> +static void add_cached_dirent(struct cached_dirents *cde,
> +			      struct dir_context *ctx,
> +			      const char *name, int namelen,
> +			      u64 ino, unsigned type)
> +{
> +	struct cached_dirent *de;
> +
> +	if (cde->ctx != ctx)
> +		return;
> +	if (cde->is_valid || cde->is_failed)
> +		return;
> +	if (ctx->pos != cde->pos) {
> +		cde->is_failed = 1;
> +		return;
> +	}
> +	de = kzalloc(sizeof(*de), GFP_ATOMIC);
> +	if (de == NULL) {
> +		cde->is_failed = 1;
> +		return;
> +	}
> +	de->name = kzalloc(namelen, GFP_ATOMIC);
> +	if (de->name == NULL) {
> +		kfree(de);
> +		cde->is_failed = 1;
> +		return;
> +	}
> +	memcpy(de->name, name, namelen);
> +	de->namelen = namelen;
> +	de->pos = ctx->pos;
> +	de->ino = ino;
> +	de->type = type;
> +
> +	list_add_tail(&de->entry, &cde->entries);
> +}
> +
> +static bool cifs_dir_emit(struct dir_context *ctx,
> +			  const char *name, int namelen,
> +			  u64 ino, unsigned type,
> +			  struct cached_fid *cfid)
> +{
> +	bool rc;
> +
> +	rc = dir_emit(ctx, name, namelen, ino, type);
> +	if (!rc)
> +		return rc;
> +
> +	if (cfid) {
> +		mutex_lock(&cfid->dirents.de_mutex);
> +		add_cached_dirent(&cfid->dirents, ctx, name, namelen, ino,
> +				  type);
> +		mutex_unlock(&cfid->dirents.de_mutex);
> +	}
> +
> +	return rc;
> +}
> +
>  static int cifs_filldir(char *find_entry, struct file *file,
> -		struct dir_context *ctx,
> -		char *scratch_buf, unsigned int max_len)
> +			struct dir_context *ctx,
> +			char *scratch_buf, unsigned int max_len,
> +			struct cached_fid *cfid)
>  {
>  	struct cifsFileInfo *file_info = file->private_data;
>  	struct super_block *sb = file_inode(file)->i_sb;
> @@ -903,10 +1013,10 @@ static int cifs_filldir(char *find_entry, struct file *file,
>  	cifs_prime_dcache(file_dentry(file), &name, &fattr);
>  
>  	ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid);
> -	return !dir_emit(ctx, name.name, name.len, ino, fattr.cf_dtype);
> +	return !cifs_dir_emit(ctx, name.name, name.len, ino, fattr.cf_dtype,
> +			      cfid);
>  }
>  
> -
>  int cifs_readdir(struct file *file, struct dir_context *ctx)
>  {
>  	int rc = 0;
> @@ -920,6 +1030,8 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>  	char *end_of_smb;
>  	unsigned int max_len;
>  	char *full_path = NULL;
> +	struct cached_fid *cfid = NULL;
> +	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
>  
>  	xid = get_xid();
>  
> @@ -928,7 +1040,6 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>  		rc = -ENOMEM;
>  		goto rddir2_exit;
>  	}
> -
>  	/*
>  	 * Ensure FindFirst doesn't fail before doing filldir() for '.' and
>  	 * '..'. Otherwise we won't be able to notify VFS in case of failure.
> @@ -949,6 +1060,31 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>  		if after then keep searching till find it */
>  
>  	cifsFile = file->private_data;
> +	tcon = tlink_tcon(cifsFile->tlink);
> +	if (tcon->ses->server->vals->header_preamble_size == 0 &&

header_preamble_size == 0 is a test for smb1? we have is_smb1_server()

> +		!strcmp(full_path, "")) {
> +		rc = open_shroot(xid, tcon, cifs_sb, &cfid);
> +		if (rc)
> +			goto cache_not_found;
> +
> +		mutex_lock(&cfid->dirents.de_mutex);
> +		/* if this was reading from the start of the directory
> +		 * we might need to initialize scanning and storing the
> +		 * directory content.
> +		 */
> +		init_cached_dirents(&cfid->dirents, ctx);
> +		/* If we already have the entire directory cached then
> +		 * we cna just serve the cache.
> +		 */

comment style

> +		if (cfid->dirents.is_valid) {
> +			emit_cached_dirents(&cfid->dirents, ctx);
> +			mutex_unlock(&cfid->dirents.de_mutex);
> +			goto rddir2_exit;
> +		}
> +		mutex_unlock(&cfid->dirents.de_mutex);
> +	}
> + cache_not_found:
> +
>  	if (cifsFile->srch_inf.endOfSearch) {
>  		if (cifsFile->srch_inf.emptyDir) {
>  			cifs_dbg(FYI, "End of search, empty dir\n");
> @@ -960,15 +1096,30 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>  		tcon->ses->server->close(xid, tcon, &cifsFile->fid);
>  	} */
>  
> -	tcon = tlink_tcon(cifsFile->tlink);
> +	/* Drop the cache while calling find_cifs_entry in case there will
> +	 * be reconnects during query_directory.
> +	 */

comment style

> +	if (cfid) {
> +		close_shroot(cfid);
> +		cfid = NULL;
> +	}
>  	rc = find_cifs_entry(xid, tcon, ctx->pos, file, full_path,
>  			     &current_entry, &num_to_fill);
> +	if (tcon->ses->server->vals->header_preamble_size == 0 &&
> +		!strcmp(full_path, "")) {
> +		open_shroot(xid, tcon, cifs_sb, &cfid);
> +	}
>  	if (rc) {
>  		cifs_dbg(FYI, "fce error %d\n", rc);
>  		goto rddir2_exit;
>  	} else if (current_entry != NULL) {
>  		cifs_dbg(FYI, "entry %lld found\n", ctx->pos);
>  	} else {
> +		if (cfid) {
> +			mutex_lock(&cfid->dirents.de_mutex);
> +			finished_cached_dirents_count(&cfid->dirents, ctx);
> +			mutex_unlock(&cfid->dirents.de_mutex);
> +		}
>  		cifs_dbg(FYI, "Could not find entry\n");
>  		goto rddir2_exit;
>  	}
> @@ -998,7 +1149,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>  		 */
>  		*tmp_buf = 0;
>  		rc = cifs_filldir(current_entry, file, ctx,
> -				  tmp_buf, max_len);
> +				  tmp_buf, max_len, cfid);
>  		if (rc) {
>  			if (rc > 0)
>  				rc = 0;
> @@ -1006,6 +1157,12 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>  		}
>  
>  		ctx->pos++;
> +		if (cfid) {
> +			mutex_lock(&cfid->dirents.de_mutex);
> +			update_cached_dirents_count(&cfid->dirents, ctx);
> +			mutex_unlock(&cfid->dirents.de_mutex);
> +		}
> +
>  		if (ctx->pos ==
>  			cifsFile->srch_inf.index_of_last_entry) {
>  			cifs_dbg(FYI, "last entry in buf at pos %lld %s\n",
> @@ -1020,6 +1177,8 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>  	kfree(tmp_buf);
>  
>  rddir2_exit:
> +	if (cfid)
> +		close_shroot(cfid);
>  	kfree(full_path);
>  	free_xid(xid);
>  	return rc;
> diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
> index fd6c136066df..280464e21a5f 100644
> --- a/fs/cifs/smb2ops.c
> +++ b/fs/cifs/smb2ops.c
> @@ -605,6 +605,8 @@ smb2_close_cached_fid(struct kref *ref)
>  {
>  	struct cached_fid *cfid = container_of(ref, struct cached_fid,
>  					       refcount);
> +	struct list_head *pos, *q;
> +	struct cached_dirent *dirent;
>  
>  	if (cfid->is_valid) {
>  		cifs_dbg(FYI, "clear cached root file handle\n");
> @@ -613,6 +615,18 @@ smb2_close_cached_fid(struct kref *ref)
>  		cfid->is_valid = false;
>  		cfid->file_all_info_is_valid = false;
>  		cfid->has_lease = false;
> +		mutex_lock(&cfid->dirents.de_mutex);
> +		list_for_each_safe(pos, q, &cfid->dirents.entries) {
> +			dirent = list_entry(pos, struct cached_dirent, entry);
> +			list_del(pos);
> +			kfree(dirent->name);
> +			kfree(dirent);
> +		}
> +		cfid->dirents.is_valid = 0;
> +		cfid->dirents.is_failed = 0;
> +		cfid->dirents.ctx = NULL;
> +		cfid->dirents.pos = 0;
> +		mutex_unlock(&cfid->dirents.de_mutex);
>  	}
>  }


Cheers,
-- 
Aurélien Aptel / SUSE Labs Samba Team
GPG: 1839 CB5F 9F5B FB9B AA97  8C99 03C8 A49B 521B D5D3
SUSE Software Solutions Germany GmbH, Maxfeldstr. 5, 90409 Nürnberg, DE
GF: Felix Imendörffer, Mary Higgins, Sri Rasiah HRB 247165 (AG München)

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 3/3] cifs: cache the directory content for shroot
  2020-10-02 15:29   ` Aurélien Aptel
@ 2020-10-04 23:19     ` ronnie sahlberg
       [not found]       ` <CAH2r5mvijc=-JdmPMUxAUqmJKy0-x3o72NsHx+QcByBnggGXMA@mail.gmail.com>
  0 siblings, 1 reply; 15+ messages in thread
From: ronnie sahlberg @ 2020-10-04 23:19 UTC (permalink / raw)
  To: Aurélien Aptel; +Cc: Ronnie Sahlberg, linux-cifs, Steve French

Thanks Aurelien,

On Sat, Oct 3, 2020 at 1:30 AM Aurélien Aptel <aaptel@suse.com> wrote:
>
> Hi Ronnie,
>
> The more we isolate code with clear opaque interfaces, the less we have
> to understand about their low-level implementation.
>
> I was thinking we could move all dir cache related code to a new dcache.c
> file or something like that.

Lets do that as a follow up.
I would also like to expand the caching from not just "" but, say, the
n latest directories we have used.

>
> One question: when does the cache gets cleared? We dont want to have the
> same stale content every time we query the root. Should it be time based?

Right now it still depends on the lease break.
We can add a timeout value to it as well.
Let's do that as a separate patch. I will do that once this is done.

>
> Comments on code below:
>
> Ronnie Sahlberg <lsahlber@redhat.com> writes:
> >  fs/cifs/cifsglob.h |  21 +++++++
> >  fs/cifs/misc.c     |   2 +
> >  fs/cifs/readdir.c  | 173 ++++++++++++++++++++++++++++++++++++++++++++++++++---
> >  fs/cifs/smb2ops.c  |  14 +++++
> >  4 files changed, 203 insertions(+), 7 deletions(-)
>
>
> >
> > diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
> > index b565d83ba89e..e8f2b4a642d4 100644
> > --- a/fs/cifs/cifsglob.h
> > +++ b/fs/cifs/cifsglob.h
> > @@ -1073,6 +1073,26 @@ cap_unix(struct cifs_ses *ses)
> >       return ses->server->vals->cap_unix & ses->capabilities;
> >  }
> >
> > +struct cached_dirent {
> > +     struct list_head entry;
> > +     char *name;
> > +     int namelen;
> > +     loff_t pos;
> > +     u64 ino;
> > +     unsigned type;
> > +};
> > +
> > +struct cached_dirents {
> > +     bool is_valid:1;
> > +     bool is_failed:1;
> > +     struct dir_context *ctx; /* Only used to make sure we only take entries
> > +                               * from a single context. Never dereferenced.
> > +                               */
>
> This type of comments are not in the kernel coding style. First comment
> line with "/*" shouldnt have text.

Will fix and resubmit.

>
> > +     struct mutex de_mutex;
> > +     int pos;                 /* Expected ctx->pos */
> > +     struct list_head entries;
> > +};
> > +
> >  struct cached_fid {
> >       bool is_valid:1;        /* Do we have a useable root fid */
> >       bool file_all_info_is_valid:1;
> > @@ -1083,6 +1103,7 @@ struct cached_fid {
> >       struct cifs_tcon *tcon;
> >       struct work_struct lease_break;
> >       struct smb2_file_all_info file_all_info;
> > +     struct cached_dirents dirents;
> >  };
>
> >       }
> > +     INIT_LIST_HEAD(&ret_buf->crfid.dirents.entries);
> > +     mutex_init(&ret_buf->crfid.dirents.de_mutex);
> >
> >       atomic_inc(&tconInfoAllocCount);
> >       ret_buf->tidStatus = CifsNew;
> > diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
> > index 31a18aae5e64..17861c3d2e08 100644
> > --- a/fs/cifs/readdir.c
> > +++ b/fs/cifs/readdir.c
> > @@ -811,9 +811,119 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
> >       return rc;
> >  }
> >
> > +static void init_cached_dirents(struct cached_dirents *cde,
> > +                             struct dir_context *ctx)
> > +{
> > +     if (ctx->pos == 2 && cde->ctx == NULL) {
> > +             cde->ctx = ctx;
> > +             cde->pos = 2;
> > +     }
> > +}
>
> 2 is for "." and ".."? Can you add more comments to document this?
>
> Can you document which functions are expecting to be called with a lock
> held?

Yes, I will clarify that pos==2 is due to dir_emit_dots() and it means
we have just started a
fresh scan.

>
> > +
> > +static bool emit_cached_dirents(struct cached_dirents *cde,
> > +                             struct dir_context *ctx)
> > +{
> > +     struct list_head *tmp;
> > +     struct cached_dirent *dirent;
> > +     int rc;
> > +
> > +     list_for_each(tmp, &cde->entries) {
> > +             dirent = list_entry(tmp, struct cached_dirent, entry);
> > +             if (ctx->pos >= dirent->pos)
> > +                     continue;
> > +             ctx->pos = dirent->pos;
> > +             rc = dir_emit(ctx, dirent->name, dirent->namelen,
> > +                           dirent->ino, dirent->type);
> > +             if (!rc)
> > +                     return rc;
> > +     }
> > +     return true;
> > +}
> > +
> > +static void update_cached_dirents_count(struct cached_dirents *cde,
> > +                                     struct dir_context *ctx)
> > +{
> > +     if (cde->ctx != ctx)
> > +             return;
> > +     if (cde->is_valid || cde->is_failed)
> > +             return;
> > +
> > +     cde->pos++;
> > +}
> > +
> > +static void finished_cached_dirents_count(struct cached_dirents *cde,
> > +                                     struct dir_context *ctx)
> > +{
> > +     if (cde->ctx != ctx)
> > +             return;
> > +     if (cde->is_valid || cde->is_failed)
> > +             return;
> > +     if (ctx->pos != cde->pos)
> > +             return;
> > +
> > +     cde->is_valid = 1;
> > +}
> > +
> > +static void add_cached_dirent(struct cached_dirents *cde,
> > +                           struct dir_context *ctx,
> > +                           const char *name, int namelen,
> > +                           u64 ino, unsigned type)
> > +{
> > +     struct cached_dirent *de;
> > +
> > +     if (cde->ctx != ctx)
> > +             return;
> > +     if (cde->is_valid || cde->is_failed)
> > +             return;
> > +     if (ctx->pos != cde->pos) {
> > +             cde->is_failed = 1;
> > +             return;
> > +     }
> > +     de = kzalloc(sizeof(*de), GFP_ATOMIC);
> > +     if (de == NULL) {
> > +             cde->is_failed = 1;
> > +             return;
> > +     }
> > +     de->name = kzalloc(namelen, GFP_ATOMIC);
> > +     if (de->name == NULL) {
> > +             kfree(de);
> > +             cde->is_failed = 1;
> > +             return;
> > +     }
> > +     memcpy(de->name, name, namelen);
> > +     de->namelen = namelen;
> > +     de->pos = ctx->pos;
> > +     de->ino = ino;
> > +     de->type = type;
> > +
> > +     list_add_tail(&de->entry, &cde->entries);
> > +}
> > +
> > +static bool cifs_dir_emit(struct dir_context *ctx,
> > +                       const char *name, int namelen,
> > +                       u64 ino, unsigned type,
> > +                       struct cached_fid *cfid)
> > +{
> > +     bool rc;
> > +
> > +     rc = dir_emit(ctx, name, namelen, ino, type);
> > +     if (!rc)
> > +             return rc;
> > +
> > +     if (cfid) {
> > +             mutex_lock(&cfid->dirents.de_mutex);
> > +             add_cached_dirent(&cfid->dirents, ctx, name, namelen, ino,
> > +                               type);
> > +             mutex_unlock(&cfid->dirents.de_mutex);
> > +     }
> > +
> > +     return rc;
> > +}
> > +
> >  static int cifs_filldir(char *find_entry, struct file *file,
> > -             struct dir_context *ctx,
> > -             char *scratch_buf, unsigned int max_len)
> > +                     struct dir_context *ctx,
> > +                     char *scratch_buf, unsigned int max_len,
> > +                     struct cached_fid *cfid)
> >  {
> >       struct cifsFileInfo *file_info = file->private_data;
> >       struct super_block *sb = file_inode(file)->i_sb;
> > @@ -903,10 +1013,10 @@ static int cifs_filldir(char *find_entry, struct file *file,
> >       cifs_prime_dcache(file_dentry(file), &name, &fattr);
> >
> >       ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid);
> > -     return !dir_emit(ctx, name.name, name.len, ino, fattr.cf_dtype);
> > +     return !cifs_dir_emit(ctx, name.name, name.len, ino, fattr.cf_dtype,
> > +                           cfid);
> >  }
> >
> > -
> >  int cifs_readdir(struct file *file, struct dir_context *ctx)
> >  {
> >       int rc = 0;
> > @@ -920,6 +1030,8 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
> >       char *end_of_smb;
> >       unsigned int max_len;
> >       char *full_path = NULL;
> > +     struct cached_fid *cfid = NULL;
> > +     struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
> >
> >       xid = get_xid();
> >
> > @@ -928,7 +1040,6 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
> >               rc = -ENOMEM;
> >               goto rddir2_exit;
> >       }
> > -
> >       /*
> >        * Ensure FindFirst doesn't fail before doing filldir() for '.' and
> >        * '..'. Otherwise we won't be able to notify VFS in case of failure.
> > @@ -949,6 +1060,31 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
> >               if after then keep searching till find it */
> >
> >       cifsFile = file->private_data;
> > +     tcon = tlink_tcon(cifsFile->tlink);
> > +     if (tcon->ses->server->vals->header_preamble_size == 0 &&
>
> header_preamble_size == 0 is a test for smb1? we have is_smb1_server()
>
> > +             !strcmp(full_path, "")) {
> > +             rc = open_shroot(xid, tcon, cifs_sb, &cfid);
> > +             if (rc)
> > +                     goto cache_not_found;
> > +
> > +             mutex_lock(&cfid->dirents.de_mutex);
> > +             /* if this was reading from the start of the directory
> > +              * we might need to initialize scanning and storing the
> > +              * directory content.
> > +              */
> > +             init_cached_dirents(&cfid->dirents, ctx);
> > +             /* If we already have the entire directory cached then
> > +              * we cna just serve the cache.
> > +              */
>
> comment style
>
> > +             if (cfid->dirents.is_valid) {
> > +                     emit_cached_dirents(&cfid->dirents, ctx);
> > +                     mutex_unlock(&cfid->dirents.de_mutex);
> > +                     goto rddir2_exit;
> > +             }
> > +             mutex_unlock(&cfid->dirents.de_mutex);
> > +     }
> > + cache_not_found:
> > +
> >       if (cifsFile->srch_inf.endOfSearch) {
> >               if (cifsFile->srch_inf.emptyDir) {
> >                       cifs_dbg(FYI, "End of search, empty dir\n");
> > @@ -960,15 +1096,30 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
> >               tcon->ses->server->close(xid, tcon, &cifsFile->fid);
> >       } */
> >
> > -     tcon = tlink_tcon(cifsFile->tlink);
> > +     /* Drop the cache while calling find_cifs_entry in case there will
> > +      * be reconnects during query_directory.
> > +      */
>
> comment style
>
> > +     if (cfid) {
> > +             close_shroot(cfid);
> > +             cfid = NULL;
> > +     }
> >       rc = find_cifs_entry(xid, tcon, ctx->pos, file, full_path,
> >                            &current_entry, &num_to_fill);
> > +     if (tcon->ses->server->vals->header_preamble_size == 0 &&
> > +             !strcmp(full_path, "")) {
> > +             open_shroot(xid, tcon, cifs_sb, &cfid);
> > +     }
> >       if (rc) {
> >               cifs_dbg(FYI, "fce error %d\n", rc);
> >               goto rddir2_exit;
> >       } else if (current_entry != NULL) {
> >               cifs_dbg(FYI, "entry %lld found\n", ctx->pos);
> >       } else {
> > +             if (cfid) {
> > +                     mutex_lock(&cfid->dirents.de_mutex);
> > +                     finished_cached_dirents_count(&cfid->dirents, ctx);
> > +                     mutex_unlock(&cfid->dirents.de_mutex);
> > +             }
> >               cifs_dbg(FYI, "Could not find entry\n");
> >               goto rddir2_exit;
> >       }
> > @@ -998,7 +1149,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
> >                */
> >               *tmp_buf = 0;
> >               rc = cifs_filldir(current_entry, file, ctx,
> > -                               tmp_buf, max_len);
> > +                               tmp_buf, max_len, cfid);
> >               if (rc) {
> >                       if (rc > 0)
> >                               rc = 0;
> > @@ -1006,6 +1157,12 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
> >               }
> >
> >               ctx->pos++;
> > +             if (cfid) {
> > +                     mutex_lock(&cfid->dirents.de_mutex);
> > +                     update_cached_dirents_count(&cfid->dirents, ctx);
> > +                     mutex_unlock(&cfid->dirents.de_mutex);
> > +             }
> > +
> >               if (ctx->pos ==
> >                       cifsFile->srch_inf.index_of_last_entry) {
> >                       cifs_dbg(FYI, "last entry in buf at pos %lld %s\n",
> > @@ -1020,6 +1177,8 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
> >       kfree(tmp_buf);
> >
> >  rddir2_exit:
> > +     if (cfid)
> > +             close_shroot(cfid);
> >       kfree(full_path);
> >       free_xid(xid);
> >       return rc;
> > diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
> > index fd6c136066df..280464e21a5f 100644
> > --- a/fs/cifs/smb2ops.c
> > +++ b/fs/cifs/smb2ops.c
> > @@ -605,6 +605,8 @@ smb2_close_cached_fid(struct kref *ref)
> >  {
> >       struct cached_fid *cfid = container_of(ref, struct cached_fid,
> >                                              refcount);
> > +     struct list_head *pos, *q;
> > +     struct cached_dirent *dirent;
> >
> >       if (cfid->is_valid) {
> >               cifs_dbg(FYI, "clear cached root file handle\n");
> > @@ -613,6 +615,18 @@ smb2_close_cached_fid(struct kref *ref)
> >               cfid->is_valid = false;
> >               cfid->file_all_info_is_valid = false;
> >               cfid->has_lease = false;
> > +             mutex_lock(&cfid->dirents.de_mutex);
> > +             list_for_each_safe(pos, q, &cfid->dirents.entries) {
> > +                     dirent = list_entry(pos, struct cached_dirent, entry);
> > +                     list_del(pos);
> > +                     kfree(dirent->name);
> > +                     kfree(dirent);
> > +             }
> > +             cfid->dirents.is_valid = 0;
> > +             cfid->dirents.is_failed = 0;
> > +             cfid->dirents.ctx = NULL;
> > +             cfid->dirents.pos = 0;
> > +             mutex_unlock(&cfid->dirents.de_mutex);
> >       }
> >  }
>
>
> Cheers,
> --
> Aurélien Aptel / SUSE Labs Samba Team
> GPG: 1839 CB5F 9F5B FB9B AA97  8C99 03C8 A49B 521B D5D3
> SUSE Software Solutions Germany GmbH, Maxfeldstr. 5, 90409 Nürnberg, DE
> GF: Felix Imendörffer, Mary Higgins, Sri Rasiah HRB 247165 (AG München)

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 3/3] cifs: cache the directory content for shroot
       [not found]       ` <CAH2r5mvijc=-JdmPMUxAUqmJKy0-x3o72NsHx+QcByBnggGXMA@mail.gmail.com>
@ 2020-10-05  0:20         ` ronnie sahlberg
  0 siblings, 0 replies; 15+ messages in thread
From: ronnie sahlberg @ 2020-10-05  0:20 UTC (permalink / raw)
  To: Steve French; +Cc: Aurélien Aptel, Ronnie Sahlberg, linux-cifs

On Mon, Oct 5, 2020 at 9:47 AM Steve French <smfrench@gmail.com> wrote:
>
> How do we get linkcount? That seems to be the only thing missing from query dir vs query file info

Do you mean the link-count for subdirectories/files that we cache the
dirent for?
I don't think we can cache that.
If we have a lease on "/" and we have an already existing subdirectory
"/foo" in our dirent cache.
If/when the link count for /foo changes, this does not trigger a lease
break for "/"

For this I think we can probably only cache
* name of object
* type of object
* inode number of object

Changes to any of these are probably the only thing that is guaranteed
to give us a lease break and thus tell us we need to refresh.



>
> On Sun, Oct 4, 2020, 17:20 ronnie sahlberg <ronniesahlberg@gmail.com> wrote:
>>
>> Thanks Aurelien,
>>
>> On Sat, Oct 3, 2020 at 1:30 AM Aurélien Aptel <aaptel@suse.com> wrote:
>> >
>> > Hi Ronnie,
>> >
>> > The more we isolate code with clear opaque interfaces, the less we have
>> > to understand about their low-level implementation.
>> >
>> > I was thinking we could move all dir cache related code to a new dcache.c
>> > file or something like that.
>>
>> Lets do that as a follow up.
>> I would also like to expand the caching from not just "" but, say, the
>> n latest directories we have used.
>>
>> >
>> > One question: when does the cache gets cleared? We dont want to have the
>> > same stale content every time we query the root. Should it be time based?
>>
>> Right now it still depends on the lease break.
>> We can add a timeout value to it as well.
>> Let's do that as a separate patch. I will do that once this is done.
>>
>> >
>> > Comments on code below:
>> >
>> > Ronnie Sahlberg <lsahlber@redhat.com> writes:
>> > >  fs/cifs/cifsglob.h |  21 +++++++
>> > >  fs/cifs/misc.c     |   2 +
>> > >  fs/cifs/readdir.c  | 173 ++++++++++++++++++++++++++++++++++++++++++++++++++---
>> > >  fs/cifs/smb2ops.c  |  14 +++++
>> > >  4 files changed, 203 insertions(+), 7 deletions(-)
>> >
>> >
>> > >
>> > > diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
>> > > index b565d83ba89e..e8f2b4a642d4 100644
>> > > --- a/fs/cifs/cifsglob.h
>> > > +++ b/fs/cifs/cifsglob.h
>> > > @@ -1073,6 +1073,26 @@ cap_unix(struct cifs_ses *ses)
>> > >       return ses->server->vals->cap_unix & ses->capabilities;
>> > >  }
>> > >
>> > > +struct cached_dirent {
>> > > +     struct list_head entry;
>> > > +     char *name;
>> > > +     int namelen;
>> > > +     loff_t pos;
>> > > +     u64 ino;
>> > > +     unsigned type;
>> > > +};
>> > > +
>> > > +struct cached_dirents {
>> > > +     bool is_valid:1;
>> > > +     bool is_failed:1;
>> > > +     struct dir_context *ctx; /* Only used to make sure we only take entries
>> > > +                               * from a single context. Never dereferenced.
>> > > +                               */
>> >
>> > This type of comments are not in the kernel coding style. First comment
>> > line with "/*" shouldnt have text.
>>
>> Will fix and resubmit.
>>
>> >
>> > > +     struct mutex de_mutex;
>> > > +     int pos;                 /* Expected ctx->pos */
>> > > +     struct list_head entries;
>> > > +};
>> > > +
>> > >  struct cached_fid {
>> > >       bool is_valid:1;        /* Do we have a useable root fid */
>> > >       bool file_all_info_is_valid:1;
>> > > @@ -1083,6 +1103,7 @@ struct cached_fid {
>> > >       struct cifs_tcon *tcon;
>> > >       struct work_struct lease_break;
>> > >       struct smb2_file_all_info file_all_info;
>> > > +     struct cached_dirents dirents;
>> > >  };
>> >
>> > >       }
>> > > +     INIT_LIST_HEAD(&ret_buf->crfid.dirents.entries);
>> > > +     mutex_init(&ret_buf->crfid.dirents.de_mutex);
>> > >
>> > >       atomic_inc(&tconInfoAllocCount);
>> > >       ret_buf->tidStatus = CifsNew;
>> > > diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
>> > > index 31a18aae5e64..17861c3d2e08 100644
>> > > --- a/fs/cifs/readdir.c
>> > > +++ b/fs/cifs/readdir.c
>> > > @@ -811,9 +811,119 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
>> > >       return rc;
>> > >  }
>> > >
>> > > +static void init_cached_dirents(struct cached_dirents *cde,
>> > > +                             struct dir_context *ctx)
>> > > +{
>> > > +     if (ctx->pos == 2 && cde->ctx == NULL) {
>> > > +             cde->ctx = ctx;
>> > > +             cde->pos = 2;
>> > > +     }
>> > > +}
>> >
>> > 2 is for "." and ".."? Can you add more comments to document this?
>> >
>> > Can you document which functions are expecting to be called with a lock
>> > held?
>>
>> Yes, I will clarify that pos==2 is due to dir_emit_dots() and it means
>> we have just started a
>> fresh scan.
>>
>> >
>> > > +
>> > > +static bool emit_cached_dirents(struct cached_dirents *cde,
>> > > +                             struct dir_context *ctx)
>> > > +{
>> > > +     struct list_head *tmp;
>> > > +     struct cached_dirent *dirent;
>> > > +     int rc;
>> > > +
>> > > +     list_for_each(tmp, &cde->entries) {
>> > > +             dirent = list_entry(tmp, struct cached_dirent, entry);
>> > > +             if (ctx->pos >= dirent->pos)
>> > > +                     continue;
>> > > +             ctx->pos = dirent->pos;
>> > > +             rc = dir_emit(ctx, dirent->name, dirent->namelen,
>> > > +                           dirent->ino, dirent->type);
>> > > +             if (!rc)
>> > > +                     return rc;
>> > > +     }
>> > > +     return true;
>> > > +}
>> > > +
>> > > +static void update_cached_dirents_count(struct cached_dirents *cde,
>> > > +                                     struct dir_context *ctx)
>> > > +{
>> > > +     if (cde->ctx != ctx)
>> > > +             return;
>> > > +     if (cde->is_valid || cde->is_failed)
>> > > +             return;
>> > > +
>> > > +     cde->pos++;
>> > > +}
>> > > +
>> > > +static void finished_cached_dirents_count(struct cached_dirents *cde,
>> > > +                                     struct dir_context *ctx)
>> > > +{
>> > > +     if (cde->ctx != ctx)
>> > > +             return;
>> > > +     if (cde->is_valid || cde->is_failed)
>> > > +             return;
>> > > +     if (ctx->pos != cde->pos)
>> > > +             return;
>> > > +
>> > > +     cde->is_valid = 1;
>> > > +}
>> > > +
>> > > +static void add_cached_dirent(struct cached_dirents *cde,
>> > > +                           struct dir_context *ctx,
>> > > +                           const char *name, int namelen,
>> > > +                           u64 ino, unsigned type)
>> > > +{
>> > > +     struct cached_dirent *de;
>> > > +
>> > > +     if (cde->ctx != ctx)
>> > > +             return;
>> > > +     if (cde->is_valid || cde->is_failed)
>> > > +             return;
>> > > +     if (ctx->pos != cde->pos) {
>> > > +             cde->is_failed = 1;
>> > > +             return;
>> > > +     }
>> > > +     de = kzalloc(sizeof(*de), GFP_ATOMIC);
>> > > +     if (de == NULL) {
>> > > +             cde->is_failed = 1;
>> > > +             return;
>> > > +     }
>> > > +     de->name = kzalloc(namelen, GFP_ATOMIC);
>> > > +     if (de->name == NULL) {
>> > > +             kfree(de);
>> > > +             cde->is_failed = 1;
>> > > +             return;
>> > > +     }
>> > > +     memcpy(de->name, name, namelen);
>> > > +     de->namelen = namelen;
>> > > +     de->pos = ctx->pos;
>> > > +     de->ino = ino;
>> > > +     de->type = type;
>> > > +
>> > > +     list_add_tail(&de->entry, &cde->entries);
>> > > +}
>> > > +
>> > > +static bool cifs_dir_emit(struct dir_context *ctx,
>> > > +                       const char *name, int namelen,
>> > > +                       u64 ino, unsigned type,
>> > > +                       struct cached_fid *cfid)
>> > > +{
>> > > +     bool rc;
>> > > +
>> > > +     rc = dir_emit(ctx, name, namelen, ino, type);
>> > > +     if (!rc)
>> > > +             return rc;
>> > > +
>> > > +     if (cfid) {
>> > > +             mutex_lock(&cfid->dirents.de_mutex);
>> > > +             add_cached_dirent(&cfid->dirents, ctx, name, namelen, ino,
>> > > +                               type);
>> > > +             mutex_unlock(&cfid->dirents.de_mutex);
>> > > +     }
>> > > +
>> > > +     return rc;
>> > > +}
>> > > +
>> > >  static int cifs_filldir(char *find_entry, struct file *file,
>> > > -             struct dir_context *ctx,
>> > > -             char *scratch_buf, unsigned int max_len)
>> > > +                     struct dir_context *ctx,
>> > > +                     char *scratch_buf, unsigned int max_len,
>> > > +                     struct cached_fid *cfid)
>> > >  {
>> > >       struct cifsFileInfo *file_info = file->private_data;
>> > >       struct super_block *sb = file_inode(file)->i_sb;
>> > > @@ -903,10 +1013,10 @@ static int cifs_filldir(char *find_entry, struct file *file,
>> > >       cifs_prime_dcache(file_dentry(file), &name, &fattr);
>> > >
>> > >       ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid);
>> > > -     return !dir_emit(ctx, name.name, name.len, ino, fattr.cf_dtype);
>> > > +     return !cifs_dir_emit(ctx, name.name, name.len, ino, fattr.cf_dtype,
>> > > +                           cfid);
>> > >  }
>> > >
>> > > -
>> > >  int cifs_readdir(struct file *file, struct dir_context *ctx)
>> > >  {
>> > >       int rc = 0;
>> > > @@ -920,6 +1030,8 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>> > >       char *end_of_smb;
>> > >       unsigned int max_len;
>> > >       char *full_path = NULL;
>> > > +     struct cached_fid *cfid = NULL;
>> > > +     struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
>> > >
>> > >       xid = get_xid();
>> > >
>> > > @@ -928,7 +1040,6 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>> > >               rc = -ENOMEM;
>> > >               goto rddir2_exit;
>> > >       }
>> > > -
>> > >       /*
>> > >        * Ensure FindFirst doesn't fail before doing filldir() for '.' and
>> > >        * '..'. Otherwise we won't be able to notify VFS in case of failure.
>> > > @@ -949,6 +1060,31 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>> > >               if after then keep searching till find it */
>> > >
>> > >       cifsFile = file->private_data;
>> > > +     tcon = tlink_tcon(cifsFile->tlink);
>> > > +     if (tcon->ses->server->vals->header_preamble_size == 0 &&
>> >
>> > header_preamble_size == 0 is a test for smb1? we have is_smb1_server()
>> >
>> > > +             !strcmp(full_path, "")) {
>> > > +             rc = open_shroot(xid, tcon, cifs_sb, &cfid);
>> > > +             if (rc)
>> > > +                     goto cache_not_found;
>> > > +
>> > > +             mutex_lock(&cfid->dirents.de_mutex);
>> > > +             /* if this was reading from the start of the directory
>> > > +              * we might need to initialize scanning and storing the
>> > > +              * directory content.
>> > > +              */
>> > > +             init_cached_dirents(&cfid->dirents, ctx);
>> > > +             /* If we already have the entire directory cached then
>> > > +              * we cna just serve the cache.
>> > > +              */
>> >
>> > comment style
>> >
>> > > +             if (cfid->dirents.is_valid) {
>> > > +                     emit_cached_dirents(&cfid->dirents, ctx);
>> > > +                     mutex_unlock(&cfid->dirents.de_mutex);
>> > > +                     goto rddir2_exit;
>> > > +             }
>> > > +             mutex_unlock(&cfid->dirents.de_mutex);
>> > > +     }
>> > > + cache_not_found:
>> > > +
>> > >       if (cifsFile->srch_inf.endOfSearch) {
>> > >               if (cifsFile->srch_inf.emptyDir) {
>> > >                       cifs_dbg(FYI, "End of search, empty dir\n");
>> > > @@ -960,15 +1096,30 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>> > >               tcon->ses->server->close(xid, tcon, &cifsFile->fid);
>> > >       } */
>> > >
>> > > -     tcon = tlink_tcon(cifsFile->tlink);
>> > > +     /* Drop the cache while calling find_cifs_entry in case there will
>> > > +      * be reconnects during query_directory.
>> > > +      */
>> >
>> > comment style
>> >
>> > > +     if (cfid) {
>> > > +             close_shroot(cfid);
>> > > +             cfid = NULL;
>> > > +     }
>> > >       rc = find_cifs_entry(xid, tcon, ctx->pos, file, full_path,
>> > >                            &current_entry, &num_to_fill);
>> > > +     if (tcon->ses->server->vals->header_preamble_size == 0 &&
>> > > +             !strcmp(full_path, "")) {
>> > > +             open_shroot(xid, tcon, cifs_sb, &cfid);
>> > > +     }
>> > >       if (rc) {
>> > >               cifs_dbg(FYI, "fce error %d\n", rc);
>> > >               goto rddir2_exit;
>> > >       } else if (current_entry != NULL) {
>> > >               cifs_dbg(FYI, "entry %lld found\n", ctx->pos);
>> > >       } else {
>> > > +             if (cfid) {
>> > > +                     mutex_lock(&cfid->dirents.de_mutex);
>> > > +                     finished_cached_dirents_count(&cfid->dirents, ctx);
>> > > +                     mutex_unlock(&cfid->dirents.de_mutex);
>> > > +             }
>> > >               cifs_dbg(FYI, "Could not find entry\n");
>> > >               goto rddir2_exit;
>> > >       }
>> > > @@ -998,7 +1149,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>> > >                */
>> > >               *tmp_buf = 0;
>> > >               rc = cifs_filldir(current_entry, file, ctx,
>> > > -                               tmp_buf, max_len);
>> > > +                               tmp_buf, max_len, cfid);
>> > >               if (rc) {
>> > >                       if (rc > 0)
>> > >                               rc = 0;
>> > > @@ -1006,6 +1157,12 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>> > >               }
>> > >
>> > >               ctx->pos++;
>> > > +             if (cfid) {
>> > > +                     mutex_lock(&cfid->dirents.de_mutex);
>> > > +                     update_cached_dirents_count(&cfid->dirents, ctx);
>> > > +                     mutex_unlock(&cfid->dirents.de_mutex);
>> > > +             }
>> > > +
>> > >               if (ctx->pos ==
>> > >                       cifsFile->srch_inf.index_of_last_entry) {
>> > >                       cifs_dbg(FYI, "last entry in buf at pos %lld %s\n",
>> > > @@ -1020,6 +1177,8 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>> > >       kfree(tmp_buf);
>> > >
>> > >  rddir2_exit:
>> > > +     if (cfid)
>> > > +             close_shroot(cfid);
>> > >       kfree(full_path);
>> > >       free_xid(xid);
>> > >       return rc;
>> > > diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
>> > > index fd6c136066df..280464e21a5f 100644
>> > > --- a/fs/cifs/smb2ops.c
>> > > +++ b/fs/cifs/smb2ops.c
>> > > @@ -605,6 +605,8 @@ smb2_close_cached_fid(struct kref *ref)
>> > >  {
>> > >       struct cached_fid *cfid = container_of(ref, struct cached_fid,
>> > >                                              refcount);
>> > > +     struct list_head *pos, *q;
>> > > +     struct cached_dirent *dirent;
>> > >
>> > >       if (cfid->is_valid) {
>> > >               cifs_dbg(FYI, "clear cached root file handle\n");
>> > > @@ -613,6 +615,18 @@ smb2_close_cached_fid(struct kref *ref)
>> > >               cfid->is_valid = false;
>> > >               cfid->file_all_info_is_valid = false;
>> > >               cfid->has_lease = false;
>> > > +             mutex_lock(&cfid->dirents.de_mutex);
>> > > +             list_for_each_safe(pos, q, &cfid->dirents.entries) {
>> > > +                     dirent = list_entry(pos, struct cached_dirent, entry);
>> > > +                     list_del(pos);
>> > > +                     kfree(dirent->name);
>> > > +                     kfree(dirent);
>> > > +             }
>> > > +             cfid->dirents.is_valid = 0;
>> > > +             cfid->dirents.is_failed = 0;
>> > > +             cfid->dirents.ctx = NULL;
>> > > +             cfid->dirents.pos = 0;
>> > > +             mutex_unlock(&cfid->dirents.de_mutex);
>> > >       }
>> > >  }
>> >
>> >
>> > Cheers,
>> > --
>> > Aurélien Aptel / SUSE Labs Samba Team
>> > GPG: 1839 CB5F 9F5B FB9B AA97  8C99 03C8 A49B 521B D5D3
>> > SUSE Software Solutions Germany GmbH, Maxfeldstr. 5, 90409 Nürnberg, DE
>> > GF: Felix Imendörffer, Mary Higgins, Sri Rasiah HRB 247165 (AG München)

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 3/3] cifs: cache the directory content for shroot
  2020-10-01 20:50 ` [PATCH 3/3] cifs: cache the directory content for shroot Ronnie Sahlberg
  2020-10-01 22:24   ` Steve French
  2020-10-02 15:29   ` Aurélien Aptel
@ 2020-10-20 10:28   ` Shyam Prasad N
  2 siblings, 0 replies; 15+ messages in thread
From: Shyam Prasad N @ 2020-10-20 10:28 UTC (permalink / raw)
  To: Ronnie Sahlberg; +Cc: linux-cifs, Steve French

Please read my comments inline...

On Fri, Oct 2, 2020 at 2:21 AM Ronnie Sahlberg <lsahlber@redhat.com> wrote:
>
> Add caching of the directory content for the shroot.
> Populate the cache during the initial scan in readdir()
> and then try to serve out of the cache for subsequent scans.
>
> Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com>
> ---
>  fs/cifs/cifsglob.h |  21 +++++++
>  fs/cifs/misc.c     |   2 +
>  fs/cifs/readdir.c  | 173 ++++++++++++++++++++++++++++++++++++++++++++++++++---
>  fs/cifs/smb2ops.c  |  14 +++++
>  4 files changed, 203 insertions(+), 7 deletions(-)
>
> diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
> index b565d83ba89e..e8f2b4a642d4 100644
> --- a/fs/cifs/cifsglob.h
> +++ b/fs/cifs/cifsglob.h
> @@ -1073,6 +1073,26 @@ cap_unix(struct cifs_ses *ses)
>         return ses->server->vals->cap_unix & ses->capabilities;
>  }
>
> +struct cached_dirent {
> +       struct list_head entry;
> +       char *name;
> +       int namelen;
> +       loff_t pos;
> +       u64 ino;
> +       unsigned type;
> +};
> +
> +struct cached_dirents {
> +       bool is_valid:1;
> +       bool is_failed:1;
> +       struct dir_context *ctx; /* Only used to make sure we only take entries
> +                                 * from a single context. Never dereferenced.
> +                                 */
Since this is an opaque value, can we save this as a non-pointer or
void * so that any references in the future will emit compiler errors?

> +       struct mutex de_mutex;
> +       int pos;                 /* Expected ctx->pos */
> +       struct list_head entries;
> +};
> +
>  struct cached_fid {
>         bool is_valid:1;        /* Do we have a useable root fid */
>         bool file_all_info_is_valid:1;
> @@ -1083,6 +1103,7 @@ struct cached_fid {
>         struct cifs_tcon *tcon;
>         struct work_struct lease_break;
>         struct smb2_file_all_info file_all_info;
> +       struct cached_dirents dirents;
>  };
>
>  /*
> diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
> index 1c14cf01dbef..a106bd3cc20b 100644
> --- a/fs/cifs/misc.c
> +++ b/fs/cifs/misc.c
> @@ -124,6 +124,8 @@ tconInfoAlloc(void)
>                 kfree(ret_buf);
>                 return NULL;
>         }
> +       INIT_LIST_HEAD(&ret_buf->crfid.dirents.entries);
> +       mutex_init(&ret_buf->crfid.dirents.de_mutex);
>
>         atomic_inc(&tconInfoAllocCount);
>         ret_buf->tidStatus = CifsNew;
> diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
> index 31a18aae5e64..17861c3d2e08 100644
> --- a/fs/cifs/readdir.c
> +++ b/fs/cifs/readdir.c
> @@ -811,9 +811,119 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
>         return rc;
>  }
>
> +static void init_cached_dirents(struct cached_dirents *cde,
> +                               struct dir_context *ctx)
> +{
> +       if (ctx->pos == 2 && cde->ctx == NULL) {
> +               cde->ctx = ctx;
> +               cde->pos = 2;
Is this needed? pos is already 2.

> +       }
> +}
> +
> +static bool emit_cached_dirents(struct cached_dirents *cde,
> +                               struct dir_context *ctx)
> +{
> +       struct list_head *tmp;
> +       struct cached_dirent *dirent;
> +       int rc;
> +
> +       list_for_each(tmp, &cde->entries) {
If the directory is large, can this loop take a long time?
Maybe okay for now. But maybe something to think?

> +               dirent = list_entry(tmp, struct cached_dirent, entry);
> +               if (ctx->pos >= dirent->pos)
> +                       continue;
> +               ctx->pos = dirent->pos;
> +               rc = dir_emit(ctx, dirent->name, dirent->namelen,
> +                             dirent->ino, dirent->type);
> +               if (!rc)
> +                       return rc;
> +       }
> +       return true;
> +}
> +
> +static void update_cached_dirents_count(struct cached_dirents *cde,
> +                                       struct dir_context *ctx)
> +{
> +       if (cde->ctx != ctx)
> +               return;
> +       if (cde->is_valid || cde->is_failed)
> +               return;
> +
> +       cde->pos++;
> +}
> +
> +static void finished_cached_dirents_count(struct cached_dirents *cde,
> +                                       struct dir_context *ctx)
> +{
> +       if (cde->ctx != ctx)
> +               return;
> +       if (cde->is_valid || cde->is_failed)
> +               return;
> +       if (ctx->pos != cde->pos)
> +               return;
> +
> +       cde->is_valid = 1;
> +}
> +
> +static void add_cached_dirent(struct cached_dirents *cde,
> +                             struct dir_context *ctx,
> +                             const char *name, int namelen,
> +                             u64 ino, unsigned type)
> +{
> +       struct cached_dirent *de;
> +
> +       if (cde->ctx != ctx)
> +               return;
> +       if (cde->is_valid || cde->is_failed)
> +               return;
> +       if (ctx->pos != cde->pos) {
> +               cde->is_failed = 1;
> +               return;
> +       }
> +       de = kzalloc(sizeof(*de), GFP_ATOMIC);
> +       if (de == NULL) {
> +               cde->is_failed = 1;
> +               return;
> +       }
> +       de->name = kzalloc(namelen, GFP_ATOMIC);
> +       if (de->name == NULL) {
> +               kfree(de);
> +               cde->is_failed = 1;
> +               return;
> +       }
> +       memcpy(de->name, name, namelen);
> +       de->namelen = namelen;
> +       de->pos = ctx->pos;
> +       de->ino = ino;
> +       de->type = type;
> +
> +       list_add_tail(&de->entry, &cde->entries);
> +}
> +
> +static bool cifs_dir_emit(struct dir_context *ctx,
> +                         const char *name, int namelen,
> +                         u64 ino, unsigned type,
> +                         struct cached_fid *cfid)
> +{
> +       bool rc;
> +
> +       rc = dir_emit(ctx, name, namelen, ino, type);
> +       if (!rc)
> +               return rc;
> +
> +       if (cfid) {
> +               mutex_lock(&cfid->dirents.de_mutex);
> +               add_cached_dirent(&cfid->dirents, ctx, name, namelen, ino,
> +                                 type);
> +               mutex_unlock(&cfid->dirents.de_mutex);
> +       }
> +
> +       return rc;
> +}
> +
>  static int cifs_filldir(char *find_entry, struct file *file,
> -               struct dir_context *ctx,
> -               char *scratch_buf, unsigned int max_len)
> +                       struct dir_context *ctx,
> +                       char *scratch_buf, unsigned int max_len,
> +                       struct cached_fid *cfid)
>  {
>         struct cifsFileInfo *file_info = file->private_data;
>         struct super_block *sb = file_inode(file)->i_sb;
> @@ -903,10 +1013,10 @@ static int cifs_filldir(char *find_entry, struct file *file,
>         cifs_prime_dcache(file_dentry(file), &name, &fattr);
>
>         ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid);
> -       return !dir_emit(ctx, name.name, name.len, ino, fattr.cf_dtype);
> +       return !cifs_dir_emit(ctx, name.name, name.len, ino, fattr.cf_dtype,
> +                             cfid);
>  }
>
> -
>  int cifs_readdir(struct file *file, struct dir_context *ctx)
>  {
>         int rc = 0;
> @@ -920,6 +1030,8 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>         char *end_of_smb;
>         unsigned int max_len;
>         char *full_path = NULL;
> +       struct cached_fid *cfid = NULL;
> +       struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
>
>         xid = get_xid();
>
> @@ -928,7 +1040,6 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>                 rc = -ENOMEM;
>                 goto rddir2_exit;
>         }
> -
>         /*
>          * Ensure FindFirst doesn't fail before doing filldir() for '.' and
>          * '..'. Otherwise we won't be able to notify VFS in case of failure.
> @@ -949,6 +1060,31 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>                 if after then keep searching till find it */
>
>         cifsFile = file->private_data;
> +       tcon = tlink_tcon(cifsFile->tlink);
> +       if (tcon->ses->server->vals->header_preamble_size == 0 &&
> +               !strcmp(full_path, "")) {
> +               rc = open_shroot(xid, tcon, cifs_sb, &cfid);
> +               if (rc)
> +                       goto cache_not_found;
> +
> +               mutex_lock(&cfid->dirents.de_mutex);
> +               /* if this was reading from the start of the directory
> +                * we might need to initialize scanning and storing the
> +                * directory content.
> +                */
> +               init_cached_dirents(&cfid->dirents, ctx);
If ctx passed is different from the ctx in cfid->dirents, should we
skip serving from cache?
Do we need to invalidate the cached dentries in that case?
Also, do we need to check if cifsFile->invalidHandle?

> +               /* If we already have the entire directory cached then
> +                * we cna just serve the cache.
> +                */
Typo: cna -> can

> +               if (cfid->dirents.is_valid) {
Should we also check if is_dir_changed(file)? If the dir has changed,
we should not invalidate the cache.

> +                       emit_cached_dirents(&cfid->dirents, ctx);
> +                       mutex_unlock(&cfid->dirents.de_mutex);
> +                       goto rddir2_exit;
What if we can cache only a portion of the request from cache? Can
there still be entries which we have not served from cache?
I think we'd still be okay, since the user would call readdir again
with pos updated. But can we not go to rddir2_exit and proceed here?
Since ctx->pos would be updated anyway, I think we don't need to end here.

> +               }
> +               mutex_unlock(&cfid->dirents.de_mutex);
> +       }
> + cache_not_found:
> +
>         if (cifsFile->srch_inf.endOfSearch) {
>                 if (cifsFile->srch_inf.emptyDir) {
>                         cifs_dbg(FYI, "End of search, empty dir\n");
> @@ -960,15 +1096,30 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>                 tcon->ses->server->close(xid, tcon, &cifsFile->fid);
>         } */
>
> -       tcon = tlink_tcon(cifsFile->tlink);
> +       /* Drop the cache while calling find_cifs_entry in case there will
> +        * be reconnects during query_directory.
> +        */
> +       if (cfid) {
> +               close_shroot(cfid);
> +               cfid = NULL;
> +       }
>         rc = find_cifs_entry(xid, tcon, ctx->pos, file, full_path,
>                              &current_entry, &num_to_fill);
> +       if (tcon->ses->server->vals->header_preamble_size == 0 &&
> +               !strcmp(full_path, "")) {
> +               open_shroot(xid, tcon, cifs_sb, &cfid);
> +       }
>         if (rc) {
>                 cifs_dbg(FYI, "fce error %d\n", rc);
>                 goto rddir2_exit;
>         } else if (current_entry != NULL) {
>                 cifs_dbg(FYI, "entry %lld found\n", ctx->pos);
>         } else {
> +               if (cfid) {
> +                       mutex_lock(&cfid->dirents.de_mutex);
> +                       finished_cached_dirents_count(&cfid->dirents, ctx);
> +                       mutex_unlock(&cfid->dirents.de_mutex);
> +               }
>                 cifs_dbg(FYI, "Could not find entry\n");
>                 goto rddir2_exit;
>         }
> @@ -998,7 +1149,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>                  */
>                 *tmp_buf = 0;
>                 rc = cifs_filldir(current_entry, file, ctx,
> -                                 tmp_buf, max_len);
> +                                 tmp_buf, max_len, cfid);
>                 if (rc) {
>                         if (rc > 0)
>                                 rc = 0;
> @@ -1006,6 +1157,12 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>                 }
>
>                 ctx->pos++;
> +               if (cfid) {
> +                       mutex_lock(&cfid->dirents.de_mutex);
> +                       update_cached_dirents_count(&cfid->dirents, ctx);
> +                       mutex_unlock(&cfid->dirents.de_mutex);
> +               }
> +
>                 if (ctx->pos ==
>                         cifsFile->srch_inf.index_of_last_entry) {
>                         cifs_dbg(FYI, "last entry in buf at pos %lld %s\n",
> @@ -1020,6 +1177,8 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
>         kfree(tmp_buf);
>
>  rddir2_exit:
> +       if (cfid)
> +               close_shroot(cfid);
>         kfree(full_path);
>         free_xid(xid);
>         return rc;
> diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
> index fd6c136066df..280464e21a5f 100644
> --- a/fs/cifs/smb2ops.c
> +++ b/fs/cifs/smb2ops.c
> @@ -605,6 +605,8 @@ smb2_close_cached_fid(struct kref *ref)
>  {
>         struct cached_fid *cfid = container_of(ref, struct cached_fid,
>                                                refcount);
> +       struct list_head *pos, *q;
> +       struct cached_dirent *dirent;
>
>         if (cfid->is_valid) {
>                 cifs_dbg(FYI, "clear cached root file handle\n");
> @@ -613,6 +615,18 @@ smb2_close_cached_fid(struct kref *ref)
>                 cfid->is_valid = false;
>                 cfid->file_all_info_is_valid = false;
>                 cfid->has_lease = false;
> +               mutex_lock(&cfid->dirents.de_mutex);
> +               list_for_each_safe(pos, q, &cfid->dirents.entries) {
> +                       dirent = list_entry(pos, struct cached_dirent, entry);
> +                       list_del(pos);
> +                       kfree(dirent->name);
> +                       kfree(dirent);
> +               }
> +               cfid->dirents.is_valid = 0;
> +               cfid->dirents.is_failed = 0;
> +               cfid->dirents.ctx = NULL;
> +               cfid->dirents.pos = 0;
> +               mutex_unlock(&cfid->dirents.de_mutex);
>         }
>  }
>
> --
> 2.13.6
>

Also, does it make sense to keep a mount option to enable/disable
directory caching till this feature is stabilized?

-- 
-Shyam

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 2/3] cifs: compute full_path already in cifs_readdir()
  2020-10-05  2:37 [PATCH 0/3 V1] cifs: cache the directory content for shroot Ronnie Sahlberg
@ 2020-10-05  2:37 ` Ronnie Sahlberg
  0 siblings, 0 replies; 15+ messages in thread
From: Ronnie Sahlberg @ 2020-10-05  2:37 UTC (permalink / raw)
  To: linux-cifs; +Cc: Steve French

Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com>
---
 fs/cifs/readdir.c | 30 ++++++++++++++++--------------
 1 file changed, 16 insertions(+), 14 deletions(-)

diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 6df0922e7e30..31a18aae5e64 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -360,11 +360,11 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
  */
 
 static int
-initiate_cifs_search(const unsigned int xid, struct file *file)
+initiate_cifs_search(const unsigned int xid, struct file *file,
+		     char *full_path)
 {
 	__u16 search_flags;
 	int rc = 0;
-	char *full_path = NULL;
 	struct cifsFileInfo *cifsFile;
 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
 	struct tcon_link *tlink = NULL;
@@ -400,12 +400,6 @@ initiate_cifs_search(const unsigned int xid, struct file *file)
 	cifsFile->invalidHandle = true;
 	cifsFile->srch_inf.endOfSearch = false;
 
-	full_path = build_path_from_dentry(file_dentry(file));
-	if (full_path == NULL) {
-		rc = -ENOMEM;
-		goto error_exit;
-	}
-
 	cifs_dbg(FYI, "Full path: %s start at: %lld\n", full_path, file->f_pos);
 
 ffirst_retry:
@@ -444,7 +438,6 @@ initiate_cifs_search(const unsigned int xid, struct file *file)
 		goto ffirst_retry;
 	}
 error_exit:
-	kfree(full_path);
 	cifs_put_tlink(tlink);
 	return rc;
 }
@@ -688,7 +681,8 @@ static int cifs_save_resume_key(const char *current_entry,
  */
 static int
 find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
-		struct file *file, char **current_entry, int *num_to_ret)
+		struct file *file, char *full_path,
+		char **current_entry, int *num_to_ret)
 {
 	__u16 search_flags;
 	int rc = 0;
@@ -741,7 +735,7 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
 						ntwrk_buf_start);
 			cfile->srch_inf.ntwrk_buf_start = NULL;
 		}
-		rc = initiate_cifs_search(xid, file);
+		rc = initiate_cifs_search(xid, file, full_path);
 		if (rc) {
 			cifs_dbg(FYI, "error %d reinitiating a search on rewind\n",
 				 rc);
@@ -925,15 +919,22 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
 	char *tmp_buf = NULL;
 	char *end_of_smb;
 	unsigned int max_len;
+	char *full_path = NULL;
 
 	xid = get_xid();
 
+	full_path = build_path_from_dentry(file_dentry(file));
+	if (full_path == NULL) {
+		rc = -ENOMEM;
+		goto rddir2_exit;
+	}
+
 	/*
 	 * Ensure FindFirst doesn't fail before doing filldir() for '.' and
 	 * '..'. Otherwise we won't be able to notify VFS in case of failure.
 	 */
 	if (file->private_data == NULL) {
-		rc = initiate_cifs_search(xid, file);
+		rc = initiate_cifs_search(xid, file, full_path);
 		cifs_dbg(FYI, "initiate cifs search rc %d\n", rc);
 		if (rc)
 			goto rddir2_exit;
@@ -960,8 +961,8 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
 	} */
 
 	tcon = tlink_tcon(cifsFile->tlink);
-	rc = find_cifs_entry(xid, tcon, ctx->pos, file, &current_entry,
-			     &num_to_fill);
+	rc = find_cifs_entry(xid, tcon, ctx->pos, file, full_path,
+			     &current_entry, &num_to_fill);
 	if (rc) {
 		cifs_dbg(FYI, "fce error %d\n", rc);
 		goto rddir2_exit;
@@ -1019,6 +1020,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
 	kfree(tmp_buf);
 
 rddir2_exit:
+	kfree(full_path);
 	free_xid(xid);
 	return rc;
 }
-- 
2.13.6


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 2/3] cifs: compute full_path already in cifs_readdir()
  2020-10-04 23:37 [PATCH 0/3 V2]: " Ronnie Sahlberg
@ 2020-10-04 23:37 ` Ronnie Sahlberg
  0 siblings, 0 replies; 15+ messages in thread
From: Ronnie Sahlberg @ 2020-10-04 23:37 UTC (permalink / raw)
  To: linux-cifs; +Cc: Steve French

Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com>
---
 fs/cifs/readdir.c | 30 ++++++++++++++++--------------
 1 file changed, 16 insertions(+), 14 deletions(-)

diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 6df0922e7e30..31a18aae5e64 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -360,11 +360,11 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
  */
 
 static int
-initiate_cifs_search(const unsigned int xid, struct file *file)
+initiate_cifs_search(const unsigned int xid, struct file *file,
+		     char *full_path)
 {
 	__u16 search_flags;
 	int rc = 0;
-	char *full_path = NULL;
 	struct cifsFileInfo *cifsFile;
 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
 	struct tcon_link *tlink = NULL;
@@ -400,12 +400,6 @@ initiate_cifs_search(const unsigned int xid, struct file *file)
 	cifsFile->invalidHandle = true;
 	cifsFile->srch_inf.endOfSearch = false;
 
-	full_path = build_path_from_dentry(file_dentry(file));
-	if (full_path == NULL) {
-		rc = -ENOMEM;
-		goto error_exit;
-	}
-
 	cifs_dbg(FYI, "Full path: %s start at: %lld\n", full_path, file->f_pos);
 
 ffirst_retry:
@@ -444,7 +438,6 @@ initiate_cifs_search(const unsigned int xid, struct file *file)
 		goto ffirst_retry;
 	}
 error_exit:
-	kfree(full_path);
 	cifs_put_tlink(tlink);
 	return rc;
 }
@@ -688,7 +681,8 @@ static int cifs_save_resume_key(const char *current_entry,
  */
 static int
 find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
-		struct file *file, char **current_entry, int *num_to_ret)
+		struct file *file, char *full_path,
+		char **current_entry, int *num_to_ret)
 {
 	__u16 search_flags;
 	int rc = 0;
@@ -741,7 +735,7 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
 						ntwrk_buf_start);
 			cfile->srch_inf.ntwrk_buf_start = NULL;
 		}
-		rc = initiate_cifs_search(xid, file);
+		rc = initiate_cifs_search(xid, file, full_path);
 		if (rc) {
 			cifs_dbg(FYI, "error %d reinitiating a search on rewind\n",
 				 rc);
@@ -925,15 +919,22 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
 	char *tmp_buf = NULL;
 	char *end_of_smb;
 	unsigned int max_len;
+	char *full_path = NULL;
 
 	xid = get_xid();
 
+	full_path = build_path_from_dentry(file_dentry(file));
+	if (full_path == NULL) {
+		rc = -ENOMEM;
+		goto rddir2_exit;
+	}
+
 	/*
 	 * Ensure FindFirst doesn't fail before doing filldir() for '.' and
 	 * '..'. Otherwise we won't be able to notify VFS in case of failure.
 	 */
 	if (file->private_data == NULL) {
-		rc = initiate_cifs_search(xid, file);
+		rc = initiate_cifs_search(xid, file, full_path);
 		cifs_dbg(FYI, "initiate cifs search rc %d\n", rc);
 		if (rc)
 			goto rddir2_exit;
@@ -960,8 +961,8 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
 	} */
 
 	tcon = tlink_tcon(cifsFile->tlink);
-	rc = find_cifs_entry(xid, tcon, ctx->pos, file, &current_entry,
-			     &num_to_fill);
+	rc = find_cifs_entry(xid, tcon, ctx->pos, file, full_path,
+			     &current_entry, &num_to_fill);
 	if (rc) {
 		cifs_dbg(FYI, "fce error %d\n", rc);
 		goto rddir2_exit;
@@ -1019,6 +1020,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
 	kfree(tmp_buf);
 
 rddir2_exit:
+	kfree(full_path);
 	free_xid(xid);
 	return rc;
 }
-- 
2.13.6


^ permalink raw reply related	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2020-10-20 10:29 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-01 20:50 [PATCH 0/3] cifs: cache directory content for shroot Ronnie Sahlberg
2020-10-01 20:50 ` [PATCH 1/3] cifs: return cached_fid from open_shroot Ronnie Sahlberg
2020-10-01 20:50 ` [PATCH 2/3] cifs: compute full_path already in cifs_readdir() Ronnie Sahlberg
2020-10-01 20:50 ` [PATCH 3/3] cifs: cache the directory content for shroot Ronnie Sahlberg
2020-10-01 22:24   ` Steve French
2020-10-02 15:29   ` Aurélien Aptel
2020-10-04 23:19     ` ronnie sahlberg
     [not found]       ` <CAH2r5mvijc=-JdmPMUxAUqmJKy0-x3o72NsHx+QcByBnggGXMA@mail.gmail.com>
2020-10-05  0:20         ` ronnie sahlberg
2020-10-20 10:28   ` Shyam Prasad N
2020-10-02  5:07 ` [PATCH 0/3] cifs: cache " Steve French
2020-10-02  8:04   ` ronnie sahlberg
2020-10-02  8:07     ` ronnie sahlberg
2020-10-02 14:13       ` Steve French
2020-10-04 23:37 [PATCH 0/3 V2]: " Ronnie Sahlberg
2020-10-04 23:37 ` [PATCH 2/3] cifs: compute full_path already in cifs_readdir() Ronnie Sahlberg
2020-10-05  2:37 [PATCH 0/3 V1] cifs: cache the directory content for shroot Ronnie Sahlberg
2020-10-05  2:37 ` [PATCH 2/3] cifs: compute full_path already in cifs_readdir() Ronnie Sahlberg

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.