All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 01/12] NFS: Don't flush caches for a getattr that races with writeback
@ 2016-06-14 19:05 Trond Myklebust
  2016-06-14 19:05 ` [PATCH 02/12] NFS: Cache access checks more aggressively Trond Myklebust
  0 siblings, 1 reply; 39+ messages in thread
From: Trond Myklebust @ 2016-06-14 19:05 UTC (permalink / raw)
  To: linux-nfs

If there were outstanding writes then chalk up the unexpected change
attribute on the server to them.

Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
---
 fs/nfs/inode.c | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 52e7d6869e3b..60051e62d3f1 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1729,12 +1729,15 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
 		if (inode->i_version != fattr->change_attr) {
 			dprintk("NFS: change_attr change on server for file %s/%ld\n",
 					inode->i_sb->s_id, inode->i_ino);
-			invalid |= NFS_INO_INVALID_ATTR
-				| NFS_INO_INVALID_DATA
-				| NFS_INO_INVALID_ACCESS
-				| NFS_INO_INVALID_ACL;
-			if (S_ISDIR(inode->i_mode))
-				nfs_force_lookup_revalidate(inode);
+			/* Could it be a race with writeback? */
+			if (nfsi->nrequests == 0) {
+				invalid |= NFS_INO_INVALID_ATTR
+					| NFS_INO_INVALID_DATA
+					| NFS_INO_INVALID_ACCESS
+					| NFS_INO_INVALID_ACL;
+				if (S_ISDIR(inode->i_mode))
+					nfs_force_lookup_revalidate(inode);
+			}
 			inode->i_version = fattr->change_attr;
 		}
 	} else {
-- 
2.5.5


^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH 02/12] NFS: Cache access checks more aggressively
  2016-06-14 19:05 [PATCH 01/12] NFS: Don't flush caches for a getattr that races with writeback Trond Myklebust
@ 2016-06-14 19:05 ` Trond Myklebust
  2016-06-14 19:05   ` [PATCH 03/12] NFS: Cache aggressively when file is open for writing Trond Myklebust
  0 siblings, 1 reply; 39+ messages in thread
From: Trond Myklebust @ 2016-06-14 19:05 UTC (permalink / raw)
  To: linux-nfs

If an attribute revalidation fails, then we already know that we'll
zap the access cache. If, OTOH, the inode isn't changing, there should
be no need to eject access calls just because they are old.

Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
---
 fs/nfs/dir.c | 42 ++++++++++++++++++++++++------------------
 1 file changed, 24 insertions(+), 18 deletions(-)

diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index aaf7bd0cbae2..d76f0b2b3ee2 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -2232,17 +2232,30 @@ static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, str
 {
 	struct nfs_inode *nfsi = NFS_I(inode);
 	struct nfs_access_entry *cache;
-	int err = -ENOENT;
+	bool retry = true;
+	int err;
 
 	spin_lock(&inode->i_lock);
-	if (nfsi->cache_validity & NFS_INO_INVALID_ACCESS)
-		goto out_zap;
-	cache = nfs_access_search_rbtree(inode, cred);
-	if (cache == NULL)
-		goto out;
-	if (!nfs_have_delegated_attributes(inode) &&
-	    !time_in_range_open(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo))
-		goto out_stale;
+	for(;;) {
+		if (nfsi->cache_validity & NFS_INO_INVALID_ACCESS)
+			goto out_zap;
+		cache = nfs_access_search_rbtree(inode, cred);
+		err = -ENOENT;
+		if (cache == NULL)
+			goto out;
+		/* Found an entry, is our attribute cache valid? */
+		if (!nfs_attribute_cache_expired(inode) &&
+		    !(nfsi->cache_validity & NFS_INO_INVALID_ATTR))
+			break;
+		if (!retry)
+			goto out_zap;
+		spin_unlock(&inode->i_lock);
+		err = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
+		if (err)
+			return err;
+		spin_lock(&inode->i_lock);
+		retry = false;
+	}
 	res->jiffies = cache->jiffies;
 	res->cred = cache->cred;
 	res->mask = cache->mask;
@@ -2251,12 +2264,6 @@ static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, str
 out:
 	spin_unlock(&inode->i_lock);
 	return err;
-out_stale:
-	rb_erase(&cache->rb_node, &nfsi->access_cache);
-	list_del(&cache->lru);
-	spin_unlock(&inode->i_lock);
-	nfs_access_free_entry(cache);
-	return -ENOENT;
 out_zap:
 	spin_unlock(&inode->i_lock);
 	nfs_access_zap_cache(inode);
@@ -2283,13 +2290,12 @@ static int nfs_access_get_cached_rcu(struct inode *inode, struct rpc_cred *cred,
 		cache = NULL;
 	if (cache == NULL)
 		goto out;
-	if (!nfs_have_delegated_attributes(inode) &&
-	    !time_in_range_open(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo))
+	err = nfs_revalidate_inode_rcu(NFS_SERVER(inode), inode);
+	if (err)
 		goto out;
 	res->jiffies = cache->jiffies;
 	res->cred = cache->cred;
 	res->mask = cache->mask;
-	err = 0;
 out:
 	rcu_read_unlock();
 	return err;
-- 
2.5.5


^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH 03/12] NFS: Cache aggressively when file is open for writing
  2016-06-14 19:05 ` [PATCH 02/12] NFS: Cache access checks more aggressively Trond Myklebust
@ 2016-06-14 19:05   ` Trond Myklebust
  2016-06-14 19:05     ` [PATCH 04/12] NFS: Kill NFS_INO_NFS_INO_FLUSHING: it is a performance killer Trond Myklebust
  2016-06-17  1:11     ` [PATCH 03/12] NFS: Cache aggressively when file is open for writing Oleg Drokin
  0 siblings, 2 replies; 39+ messages in thread
From: Trond Myklebust @ 2016-06-14 19:05 UTC (permalink / raw)
  To: linux-nfs

Unless the user is using file locking, we must assume close-to-open
cache consistency when the file is open for writing. Adjust the
caching algorithm so that it does not clear the cache on out-of-order
writes and/or attribute revalidations.

Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
---
 fs/nfs/file.c  | 13 ++-----------
 fs/nfs/inode.c | 56 +++++++++++++++++++++++++++++++++++++++-----------------
 2 files changed, 41 insertions(+), 28 deletions(-)

diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 717a8d6af52d..2d39d9f9da7d 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -780,11 +780,6 @@ do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
 }
 
 static int
-is_time_granular(struct timespec *ts) {
-	return ((ts->tv_sec == 0) && (ts->tv_nsec <= 1000));
-}
-
-static int
 do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
 {
 	struct inode *inode = filp->f_mapping->host;
@@ -817,12 +812,8 @@ do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
 	 * This makes locking act as a cache coherency point.
 	 */
 	nfs_sync_mapping(filp->f_mapping);
-	if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) {
-		if (is_time_granular(&NFS_SERVER(inode)->time_delta))
-			__nfs_revalidate_inode(NFS_SERVER(inode), inode);
-		else
-			nfs_zap_caches(inode);
-	}
+	if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
+		nfs_zap_mapping(inode, filp->f_mapping);
 out:
 	return status;
 }
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 60051e62d3f1..8a808d25dbc8 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -878,7 +878,10 @@ void nfs_inode_attach_open_context(struct nfs_open_context *ctx)
 	struct nfs_inode *nfsi = NFS_I(inode);
 
 	spin_lock(&inode->i_lock);
-	list_add(&ctx->list, &nfsi->open_files);
+	if (ctx->mode & FMODE_WRITE)
+		list_add(&ctx->list, &nfsi->open_files);
+	else
+		list_add_tail(&ctx->list, &nfsi->open_files);
 	spin_unlock(&inode->i_lock);
 }
 EXPORT_SYMBOL_GPL(nfs_inode_attach_open_context);
@@ -1215,6 +1218,21 @@ int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *
 	return __nfs_revalidate_mapping(inode, mapping, true);
 }
 
+static bool nfs_file_has_writers(struct nfs_inode *nfsi)
+{
+	assert_spin_locked(&nfsi->vfs_inode.i_lock);
+
+	if (list_empty(&nfsi->open_files))
+		return false;
+	/* Note: This relies on nfsi->open_files being ordered with writers
+	 *       being placed at the head of the list.
+	 *       See nfs_inode_attach_open_context()
+	 */
+	return (list_first_entry(&nfsi->open_files,
+			struct nfs_open_context,
+			list)->mode & FMODE_WRITE) == FMODE_WRITE;
+}
+
 static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
 {
 	struct nfs_inode *nfsi = NFS_I(inode);
@@ -1279,22 +1297,24 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
 	if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
 		return -EIO;
 
-	if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 &&
-			inode->i_version != fattr->change_attr)
-		invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+	if (!nfs_file_has_writers(nfsi)) {
+		/* Verify a few of the more important attributes */
+		if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && inode->i_version != fattr->change_attr)
+			invalid |= NFS_INO_INVALID_ATTR | NFS_INO_REVAL_PAGECACHE;
 
-	/* Verify a few of the more important attributes */
-	if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec_equal(&inode->i_mtime, &fattr->mtime))
-		invalid |= NFS_INO_INVALID_ATTR;
+		if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec_equal(&inode->i_mtime, &fattr->mtime))
+			invalid |= NFS_INO_INVALID_ATTR;
 
-	if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
-		cur_size = i_size_read(inode);
-		new_isize = nfs_size_to_loff_t(fattr->size);
-		if (cur_size != new_isize)
-			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+		if ((fattr->valid & NFS_ATTR_FATTR_CTIME) && !timespec_equal(&inode->i_ctime, &fattr->ctime))
+			invalid |= NFS_INO_INVALID_ATTR;
+
+		if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
+			cur_size = i_size_read(inode);
+			new_isize = nfs_size_to_loff_t(fattr->size);
+			if (cur_size != new_isize)
+				invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+		}
 	}
-	if (nfsi->nrequests != 0)
-		invalid &= ~NFS_INO_REVAL_PAGECACHE;
 
 	/* Have any file permissions changed? */
 	if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
@@ -1675,6 +1695,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
 	unsigned long invalid = 0;
 	unsigned long now = jiffies;
 	unsigned long save_cache_validity;
+	bool have_writers = nfs_file_has_writers(nfsi);
 	bool cache_revalidated = true;
 
 	dfprintk(VFS, "NFS: %s(%s/%lu fh_crc=0x%08x ct=%d info=0x%x)\n",
@@ -1730,7 +1751,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
 			dprintk("NFS: change_attr change on server for file %s/%ld\n",
 					inode->i_sb->s_id, inode->i_ino);
 			/* Could it be a race with writeback? */
-			if (nfsi->nrequests == 0) {
+			if (!have_writers) {
 				invalid |= NFS_INO_INVALID_ATTR
 					| NFS_INO_INVALID_DATA
 					| NFS_INO_INVALID_ACCESS
@@ -1770,9 +1791,10 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
 		if (new_isize != cur_isize) {
 			/* Do we perhaps have any outstanding writes, or has
 			 * the file grown beyond our last write? */
-			if ((nfsi->nrequests == 0) || new_isize > cur_isize) {
+			if (nfsi->nrequests == 0 || new_isize > cur_isize) {
 				i_size_write(inode, new_isize);
-				invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
+				if (!have_writers)
+					invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
 			}
 			dprintk("NFS: isize change on server for file %s/%ld "
 					"(%Ld to %Ld)\n",
-- 
2.5.5


^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH 04/12] NFS: Kill NFS_INO_NFS_INO_FLUSHING: it is a performance killer
  2016-06-14 19:05   ` [PATCH 03/12] NFS: Cache aggressively when file is open for writing Trond Myklebust
@ 2016-06-14 19:05     ` Trond Myklebust
  2016-06-14 19:05       ` [PATCH 05/12] NFS: writepage of a single page should not be synchronous Trond Myklebust
  2016-06-17  1:11     ` [PATCH 03/12] NFS: Cache aggressively when file is open for writing Oleg Drokin
  1 sibling, 1 reply; 39+ messages in thread
From: Trond Myklebust @ 2016-06-14 19:05 UTC (permalink / raw)
  To: linux-nfs

filemap_datawrite() and friends already deal just fine with livelock.

Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
---
 fs/nfs/file.c          |  8 --------
 fs/nfs/nfstrace.h      |  1 -
 fs/nfs/write.c         | 11 -----------
 include/linux/nfs_fs.h |  1 -
 4 files changed, 21 deletions(-)

diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 2d39d9f9da7d..29d7477a62e8 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -360,14 +360,6 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
 
 start:
 	/*
-	 * Prevent starvation issues if someone is doing a consistency
-	 * sync-to-disk
-	 */
-	ret = wait_on_bit_action(&NFS_I(mapping->host)->flags, NFS_INO_FLUSHING,
-				 nfs_wait_bit_killable, TASK_KILLABLE);
-	if (ret)
-		return ret;
-	/*
 	 * Wait for O_DIRECT to complete
 	 */
 	inode_dio_wait(mapping->host);
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 0b9e5cc9a747..fe80a1c26340 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -37,7 +37,6 @@
 			{ 1 << NFS_INO_ADVISE_RDPLUS, "ADVISE_RDPLUS" }, \
 			{ 1 << NFS_INO_STALE, "STALE" }, \
 			{ 1 << NFS_INO_INVALIDATING, "INVALIDATING" }, \
-			{ 1 << NFS_INO_FLUSHING, "FLUSHING" }, \
 			{ 1 << NFS_INO_FSCACHE, "FSCACHE" }, \
 			{ 1 << NFS_INO_LAYOUTCOMMIT, "NEED_LAYOUTCOMMIT" }, \
 			{ 1 << NFS_INO_LAYOUTCOMMITTING, "LAYOUTCOMMIT" })
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index e1c74d3db64d..980d44f3a84c 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -657,16 +657,9 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control *
 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
 {
 	struct inode *inode = mapping->host;
-	unsigned long *bitlock = &NFS_I(inode)->flags;
 	struct nfs_pageio_descriptor pgio;
 	int err;
 
-	/* Stop dirtying of new pages while we sync */
-	err = wait_on_bit_lock_action(bitlock, NFS_INO_FLUSHING,
-			nfs_wait_bit_killable, TASK_KILLABLE);
-	if (err)
-		goto out_err;
-
 	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
 
 	nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false,
@@ -674,10 +667,6 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
 	err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
 	nfs_pageio_complete(&pgio);
 
-	clear_bit_unlock(NFS_INO_FLUSHING, bitlock);
-	smp_mb__after_atomic();
-	wake_up_bit(bitlock, NFS_INO_FLUSHING);
-
 	if (err < 0)
 		goto out_err;
 	err = pgio.pg_error;
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index d71278c3c5bd..120dd04b553c 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -205,7 +205,6 @@ struct nfs_inode {
 #define NFS_INO_STALE		(1)		/* possible stale inode */
 #define NFS_INO_ACL_LRU_SET	(2)		/* Inode is on the LRU list */
 #define NFS_INO_INVALIDATING	(3)		/* inode is being invalidated */
-#define NFS_INO_FLUSHING	(4)		/* inode is flushing out data */
 #define NFS_INO_FSCACHE		(5)		/* inode can be cached by FS-Cache */
 #define NFS_INO_FSCACHE_LOCK	(6)		/* FS-Cache cookie management lock */
 #define NFS_INO_LAYOUTCOMMIT	(9)		/* layoutcommit required */
-- 
2.5.5


^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH 05/12] NFS: writepage of a single page should not be synchronous
  2016-06-14 19:05     ` [PATCH 04/12] NFS: Kill NFS_INO_NFS_INO_FLUSHING: it is a performance killer Trond Myklebust
@ 2016-06-14 19:05       ` Trond Myklebust
  2016-06-14 19:05         ` [PATCH 06/12] NFS: Don't hold the inode lock across fsync() Trond Myklebust
  0 siblings, 1 reply; 39+ messages in thread
From: Trond Myklebust @ 2016-06-14 19:05 UTC (permalink / raw)
  To: linux-nfs

It is almost always better to wait for more so that we can issue a
bulk commit.

Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
---
 fs/nfs/write.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 980d44f3a84c..b13d48881d3a 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -625,7 +625,7 @@ static int nfs_writepage_locked(struct page *page,
 	int err;
 
 	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
-	nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
+	nfs_pageio_init_write(&pgio, inode, 0,
 				false, &nfs_async_write_completion_ops);
 	err = nfs_do_writepage(page, wbc, &pgio, launder);
 	nfs_pageio_complete(&pgio);
-- 
2.5.5


^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH 06/12] NFS: Don't hold the inode lock across fsync()
  2016-06-14 19:05       ` [PATCH 05/12] NFS: writepage of a single page should not be synchronous Trond Myklebust
@ 2016-06-14 19:05         ` Trond Myklebust
  2016-06-14 19:05           ` [PATCH 07/12] NFS: Don't enable deep stack recursion when doing memory reclaim Trond Myklebust
  2016-06-15  7:08           ` [PATCH 06/12] NFS: Don't hold the inode lock across fsync() Christoph Hellwig
  0 siblings, 2 replies; 39+ messages in thread
From: Trond Myklebust @ 2016-06-14 19:05 UTC (permalink / raw)
  To: linux-nfs

Commits are no longer required to be serialised.

Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
---
 fs/nfs/file.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 29d7477a62e8..249262b6bcbe 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -277,11 +277,9 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 		ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
 		if (ret != 0)
 			break;
-		inode_lock(inode);
 		ret = nfs_file_fsync_commit(file, start, end, datasync);
 		if (!ret)
 			ret = pnfs_sync_inode(inode, !!datasync);
-		inode_unlock(inode);
 		/*
 		 * If nfs_file_fsync_commit detected a server reboot, then
 		 * resend all dirty pages that might have been covered by
-- 
2.5.5


^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH 07/12] NFS: Don't enable deep stack recursion when doing memory reclaim
  2016-06-14 19:05         ` [PATCH 06/12] NFS: Don't hold the inode lock across fsync() Trond Myklebust
@ 2016-06-14 19:05           ` Trond Myklebust
  2016-06-14 19:05             ` [PATCH 08/12] NFS: Fix O_DIRECT verifier problems Trond Myklebust
  2016-06-15  7:09             ` [PATCH 07/12] NFS: Don't enable deep stack recursion when doing memory reclaim Christoph Hellwig
  2016-06-15  7:08           ` [PATCH 06/12] NFS: Don't hold the inode lock across fsync() Christoph Hellwig
  1 sibling, 2 replies; 39+ messages in thread
From: Trond Myklebust @ 2016-06-14 19:05 UTC (permalink / raw)
  To: linux-nfs

Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
---
 fs/nfs/file.c | 23 -----------------------
 1 file changed, 23 deletions(-)

diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 249262b6bcbe..df4dd8e7e62e 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -460,31 +460,8 @@ static void nfs_invalidate_page(struct page *page, unsigned int offset,
  */
 static int nfs_release_page(struct page *page, gfp_t gfp)
 {
-	struct address_space *mapping = page->mapping;
-
 	dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
 
-	/* Always try to initiate a 'commit' if relevant, but only
-	 * wait for it if the caller allows blocking.  Even then,
-	 * only wait 1 second and only if the 'bdi' is not congested.
-	 * Waiting indefinitely can cause deadlocks when the NFS
-	 * server is on this machine, when a new TCP connection is
-	 * needed and in other rare cases.  There is no particular
-	 * need to wait extensively here.  A short wait has the
-	 * benefit that someone else can worry about the freezer.
-	 */
-	if (mapping) {
-		struct nfs_server *nfss = NFS_SERVER(mapping->host);
-		nfs_commit_inode(mapping->host, 0);
-		if (gfpflags_allow_blocking(gfp) &&
-		    !bdi_write_congested(&nfss->backing_dev_info)) {
-			wait_on_page_bit_killable_timeout(page, PG_private,
-							  HZ);
-			if (PagePrivate(page))
-				set_bdi_congested(&nfss->backing_dev_info,
-						  BLK_RW_ASYNC);
-		}
-	}
 	/* If PagePrivate() is set, then the page is not freeable */
 	if (PagePrivate(page))
 		return 0;
-- 
2.5.5


^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH 08/12] NFS: Fix O_DIRECT verifier problems
  2016-06-14 19:05           ` [PATCH 07/12] NFS: Don't enable deep stack recursion when doing memory reclaim Trond Myklebust
@ 2016-06-14 19:05             ` Trond Myklebust
  2016-06-14 19:05               ` [PATCH 09/12] NFS: Ensure we reset the write verifier 'committed' value on resend Trond Myklebust
  2016-06-15  7:09             ` [PATCH 07/12] NFS: Don't enable deep stack recursion when doing memory reclaim Christoph Hellwig
  1 sibling, 1 reply; 39+ messages in thread
From: Trond Myklebust @ 2016-06-14 19:05 UTC (permalink / raw)
  To: linux-nfs

We should not be interested in looking at the value of the stable field,
since that could take any value.

Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
---
 fs/nfs/direct.c   | 10 ++++++++--
 fs/nfs/internal.h |  7 +++++++
 fs/nfs/write.c    |  2 +-
 3 files changed, 16 insertions(+), 3 deletions(-)

diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 979b3c4dee6a..d6d43b5eafb3 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -196,6 +196,12 @@ static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq,
 	WARN_ON_ONCE(verfp->committed < 0);
 }
 
+static int nfs_direct_cmp_verf(const struct nfs_writeverf *v1,
+		const struct nfs_writeverf *v2)
+{
+	return nfs_write_verifier_cmp(&v1->verifier, &v2->verifier);
+}
+
 /*
  * nfs_direct_cmp_hdr_verf - compare verifier for pgio header
  * @dreq - direct request possibly spanning multiple servers
@@ -215,7 +221,7 @@ static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq,
 		nfs_direct_set_hdr_verf(dreq, hdr);
 		return 0;
 	}
-	return memcmp(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
+	return nfs_direct_cmp_verf(verfp, &hdr->verf);
 }
 
 /*
@@ -238,7 +244,7 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
 	if (verfp->committed < 0)
 		return 1;
 
-	return memcmp(verfp, &data->verf, sizeof(struct nfs_writeverf));
+	return nfs_direct_cmp_verf(verfp, &data->verf);
 }
 
 /**
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 5154fa65a2f2..150a8eb0f323 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -506,6 +506,13 @@ extern int nfs_migrate_page(struct address_space *,
 #define nfs_migrate_page NULL
 #endif
 
+static inline int
+nfs_write_verifier_cmp(const struct nfs_write_verifier *v1,
+		const struct nfs_write_verifier *v2)
+{
+	return memcmp(v1->data, v2->data, sizeof(v1->data));
+}
+
 /* unlink.c */
 extern struct rpc_task *
 nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index b13d48881d3a..3087fb6f1983 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1789,7 +1789,7 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
 
 		/* Okay, COMMIT succeeded, apparently. Check the verifier
 		 * returned by the server against all stored verfs. */
-		if (!memcmp(&req->wb_verf, &data->verf.verifier, sizeof(req->wb_verf))) {
+		if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) {
 			/* We have a match */
 			nfs_inode_remove_request(req);
 			dprintk(" OK\n");
-- 
2.5.5


^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH 09/12] NFS: Ensure we reset the write verifier 'committed' value on resend.
  2016-06-14 19:05             ` [PATCH 08/12] NFS: Fix O_DIRECT verifier problems Trond Myklebust
@ 2016-06-14 19:05               ` Trond Myklebust
  2016-06-14 19:05                 ` [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes Trond Myklebust
  0 siblings, 1 reply; 39+ messages in thread
From: Trond Myklebust @ 2016-06-14 19:05 UTC (permalink / raw)
  To: linux-nfs

Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
---
 fs/nfs/direct.c   |  2 ++
 fs/nfs/internal.h | 17 +++++++++++++++++
 2 files changed, 19 insertions(+)

diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index d6d43b5eafb3..fb659bb50678 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -661,6 +661,8 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
 	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
 
 	dreq->count = 0;
+	dreq->verf.committed = NFS_INVALID_STABLE_HOW;
+	nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
 	for (i = 0; i < dreq->mirror_count; i++)
 		dreq->mirrors[i].count = 0;
 	get_dreq(dreq);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 150a8eb0f323..0eb5c924886d 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -499,6 +499,23 @@ int nfs_key_timeout_notify(struct file *filp, struct inode *inode);
 bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx);
 void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio);
 
+#ifdef CONFIG_NFS_V4_1
+static inline
+void nfs_clear_pnfs_ds_commit_verifiers(struct pnfs_ds_commit_info *cinfo)
+{
+	int i;
+
+	for (i = 0; i < cinfo->nbuckets; i++)
+		cinfo->buckets[i].direct_verf.committed = NFS_INVALID_STABLE_HOW;
+}
+#else
+static inline
+void nfs_clear_pnfs_ds_commit_verifiers(struct pnfs_ds_commit_info *cinfo)
+{
+}
+#endif
+
+
 #ifdef CONFIG_MIGRATION
 extern int nfs_migrate_page(struct address_space *,
 		struct page *, struct page *, enum migrate_mode);
-- 
2.5.5


^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes
  2016-06-14 19:05               ` [PATCH 09/12] NFS: Ensure we reset the write verifier 'committed' value on resend Trond Myklebust
@ 2016-06-14 19:05                 ` Trond Myklebust
  2016-06-14 19:05                   ` [PATCH 11/12] NFS: Don't count O_DIRECT reads in the inode->i_dio_count Trond Myklebust
  2016-06-15  7:13                   ` [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes Christoph Hellwig
  0 siblings, 2 replies; 39+ messages in thread
From: Trond Myklebust @ 2016-06-14 19:05 UTC (permalink / raw)
  To: linux-nfs

Allow dio requests to be scheduled in parallel, but ensuring that they
do not conflict with buffered I/O.

Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
---
 fs/nfs/Makefile        |  2 +-
 fs/nfs/direct.c        | 14 +++++++------
 fs/nfs/file.c          | 13 ++++++++++--
 fs/nfs/inode.c         |  1 +
 fs/nfs/internal.h      |  6 ++++++
 fs/nfs/io.c            | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++
 include/linux/nfs_fs.h |  3 +++
 7 files changed, 84 insertions(+), 9 deletions(-)
 create mode 100644 fs/nfs/io.c

diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 8664417955a2..6abdda209642 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_NFS_FS) += nfs.o
 
 CFLAGS_nfstrace.o += -I$(src)
 nfs-y 			:= client.o dir.o file.o getroot.o inode.o super.o \
-			   direct.o pagelist.o read.o symlink.o unlink.o \
+			   io.o direct.o pagelist.o read.o symlink.o unlink.o \
 			   write.o namespace.o mount_clnt.o nfstrace.o
 nfs-$(CONFIG_ROOT_NFS)	+= nfsroot.o
 nfs-$(CONFIG_SYSCTL)	+= sysctl.o
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index fb659bb50678..81b19c0fd3a3 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -574,6 +574,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
 	struct file *file = iocb->ki_filp;
 	struct address_space *mapping = file->f_mapping;
 	struct inode *inode = mapping->host;
+	struct nfs_inode *nfsi = NFS_I(inode);
 	struct nfs_direct_req *dreq;
 	struct nfs_lock_context *l_ctx;
 	ssize_t result = -EINVAL;
@@ -587,7 +588,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
 	if (!count)
 		goto out;
 
-	inode_lock(inode);
+	nfs_lock_dio(nfsi);
 	result = nfs_sync_mapping(mapping);
 	if (result)
 		goto out_unlock;
@@ -615,7 +616,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
 	NFS_I(inode)->read_io += count;
 	result = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
 
-	inode_unlock(inode);
+	nfs_unlock_dio(nfsi);
 
 	if (!result) {
 		result = nfs_direct_wait(dreq);
@@ -629,7 +630,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
 out_release:
 	nfs_direct_req_release(dreq);
 out_unlock:
-	inode_unlock(inode);
+	nfs_unlock_dio(nfsi);
 out:
 	return result;
 }
@@ -1000,6 +1001,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
 	struct file *file = iocb->ki_filp;
 	struct address_space *mapping = file->f_mapping;
 	struct inode *inode = mapping->host;
+	struct nfs_inode *nfsi = NFS_I(inode);
 	struct nfs_direct_req *dreq;
 	struct nfs_lock_context *l_ctx;
 	loff_t pos, end;
@@ -1013,7 +1015,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
 	pos = iocb->ki_pos;
 	end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
 
-	inode_lock(inode);
+	nfs_lock_dio(nfsi);
 
 	result = nfs_sync_mapping(mapping);
 	if (result)
@@ -1053,7 +1055,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
 					      pos >> PAGE_SHIFT, end);
 	}
 
-	inode_unlock(inode);
+	nfs_unlock_dio(nfsi);
 
 	if (!result) {
 		result = nfs_direct_wait(dreq);
@@ -1076,7 +1078,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
 out_release:
 	nfs_direct_req_release(dreq);
 out_unlock:
-	inode_unlock(inode);
+	nfs_unlock_dio(nfsi);
 	return result;
 }
 
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index df4dd8e7e62e..7c90b6c03103 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -161,6 +161,7 @@ ssize_t
 nfs_file_read(struct kiocb *iocb, struct iov_iter *to)
 {
 	struct inode *inode = file_inode(iocb->ki_filp);
+	struct nfs_inode *nfsi = NFS_I(inode);
 	ssize_t result;
 
 	if (iocb->ki_flags & IOCB_DIRECT)
@@ -170,12 +171,14 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to)
 		iocb->ki_filp,
 		iov_iter_count(to), (unsigned long) iocb->ki_pos);
 
+	nfs_lock_bio(nfsi);
 	result = nfs_revalidate_mapping_protected(inode, iocb->ki_filp->f_mapping);
 	if (!result) {
 		result = generic_file_read_iter(iocb, to);
 		if (result > 0)
 			nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, result);
 	}
+	nfs_unlock_bio(nfsi);
 	return result;
 }
 EXPORT_SYMBOL_GPL(nfs_file_read);
@@ -186,17 +189,20 @@ nfs_file_splice_read(struct file *filp, loff_t *ppos,
 		     unsigned int flags)
 {
 	struct inode *inode = file_inode(filp);
+	struct nfs_inode *nfsi = NFS_I(inode);
 	ssize_t res;
 
 	dprintk("NFS: splice_read(%pD2, %lu@%Lu)\n",
 		filp, (unsigned long) count, (unsigned long long) *ppos);
 
+	nfs_lock_bio(nfsi);
 	res = nfs_revalidate_mapping_protected(inode, filp->f_mapping);
 	if (!res) {
 		res = generic_file_splice_read(filp, ppos, pipe, count, flags);
 		if (res > 0)
 			nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, res);
 	}
+	nfs_unlock_bio(nfsi);
 	return res;
 }
 EXPORT_SYMBOL_GPL(nfs_file_splice_read);
@@ -621,6 +627,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file_inode(file);
+	struct nfs_inode *nfsi = NFS_I(inode);
 	unsigned long written = 0;
 	ssize_t result;
 	size_t count = iov_iter_count(from);
@@ -639,9 +646,10 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
 	dprintk("NFS: write(%pD2, %zu@%Ld)\n",
 		file, count, (long long) iocb->ki_pos);
 
-	result = -EBUSY;
 	if (IS_SWAPFILE(inode))
 		goto out_swapfile;
+
+	nfs_lock_bio(nfsi);
 	/*
 	 * O_APPEND implies that we must revalidate the file length.
 	 */
@@ -668,11 +676,12 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
 	if (result > 0)
 		nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written);
 out:
+	nfs_unlock_bio(nfsi);
 	return result;
 
 out_swapfile:
 	printk(KERN_INFO "NFS: attempt to write to active swap file!\n");
-	goto out;
+	return -EBUSY;
 }
 EXPORT_SYMBOL_GPL(nfs_file_write);
 
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 8a808d25dbc8..8326fce028fe 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1984,6 +1984,7 @@ static void init_once(void *foo)
 	nfsi->commit_info.ncommit = 0;
 	atomic_set(&nfsi->commit_info.rpcs_out, 0);
 	init_rwsem(&nfsi->rmdir_sem);
+	init_rwsem(&nfsi->io_lock);
 	nfs4_init_once(nfsi);
 }
 
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 0eb5c924886d..6b89fdf2c7fa 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -411,6 +411,12 @@ extern void __exit unregister_nfs_fs(void);
 extern bool nfs_sb_active(struct super_block *sb);
 extern void nfs_sb_deactive(struct super_block *sb);
 
+/* io.c */
+extern void nfs_lock_bio(struct nfs_inode *nfsi);
+extern void nfs_unlock_bio(struct nfs_inode *nfsi);
+extern void nfs_lock_dio(struct nfs_inode *nfsi);
+extern void nfs_unlock_dio(struct nfs_inode *nfsi);
+
 /* namespace.c */
 #define NFS_PATH_CANONICAL 1
 extern char *nfs_path(char **p, struct dentry *dentry,
diff --git a/fs/nfs/io.c b/fs/nfs/io.c
new file mode 100644
index 000000000000..c027d7e52d45
--- /dev/null
+++ b/fs/nfs/io.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2016 Trond Myklebust
+ *
+ * I/O and data path helper functionality.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/rwsem.h>
+#include <linux/fs.h>
+#include <linux/nfs_fs.h>
+
+#include "internal.h"
+
+void
+nfs_lock_bio(struct nfs_inode *nfsi)
+{
+	/* Be an optimist! */
+	down_read(&nfsi->io_lock);
+	if (test_bit(NFS_INO_ODIRECT, &nfsi->flags) == 0)
+		return;
+	up_read(&nfsi->io_lock);
+	/* Slow path.... */
+	down_write(&nfsi->io_lock);
+	clear_bit(NFS_INO_ODIRECT, &nfsi->flags);
+	downgrade_write(&nfsi->io_lock);
+}
+
+void
+nfs_unlock_bio(struct nfs_inode *nfsi)
+{
+	up_read(&nfsi->io_lock);
+}
+
+void
+nfs_lock_dio(struct nfs_inode *nfsi)
+{
+	/* Be an optimist! */
+	down_read(&nfsi->io_lock);
+	if (test_bit(NFS_INO_ODIRECT, &nfsi->flags) != 0)
+		return;
+	up_read(&nfsi->io_lock);
+	/* Slow path.... */
+	down_write(&nfsi->io_lock);
+	set_bit(NFS_INO_ODIRECT, &nfsi->flags);
+	downgrade_write(&nfsi->io_lock);
+}
+
+void
+nfs_unlock_dio(struct nfs_inode *nfsi)
+{
+	up_read(&nfsi->io_lock);
+}
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 120dd04b553c..9ce6169be9ab 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -122,6 +122,8 @@ struct nfs_inode {
 	unsigned long		flags;			/* atomic bit ops */
 	unsigned long		cache_validity;		/* bit mask */
 
+	struct rw_semaphore	io_lock;
+
 	/*
 	 * read_cache_jiffies is when we started read-caching this inode.
 	 * attrtimeo is for how long the cached information is assumed
@@ -210,6 +212,7 @@ struct nfs_inode {
 #define NFS_INO_LAYOUTCOMMIT	(9)		/* layoutcommit required */
 #define NFS_INO_LAYOUTCOMMITTING (10)		/* layoutcommit inflight */
 #define NFS_INO_LAYOUTSTATS	(11)		/* layoutstats inflight */
+#define NFS_INO_ODIRECT		(12)		/* I/O setting is O_DIRECT */
 
 static inline struct nfs_inode *NFS_I(const struct inode *inode)
 {
-- 
2.5.5


^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH 11/12] NFS: Don't count O_DIRECT reads in the inode->i_dio_count
  2016-06-14 19:05                 ` [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes Trond Myklebust
@ 2016-06-14 19:05                   ` Trond Myklebust
  2016-06-14 19:05                     ` [PATCH 12/12] NFS: Clean up nfs_direct_complete() Trond Myklebust
  2016-06-15  7:16                     ` [PATCH 11/12] NFS: Don't count O_DIRECT reads in the inode->i_dio_count Christoph Hellwig
  2016-06-15  7:13                   ` [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes Christoph Hellwig
  1 sibling, 2 replies; 39+ messages in thread
From: Trond Myklebust @ 2016-06-14 19:05 UTC (permalink / raw)
  To: linux-nfs

Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
---
 fs/nfs/direct.c | 11 ++++-------
 1 file changed, 4 insertions(+), 7 deletions(-)

diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 81b19c0fd3a3..e1376538b473 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -385,11 +385,6 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write)
 		spin_unlock(&inode->i_lock);
 	}
 
-	if (write)
-		nfs_zap_mapping(inode, inode->i_mapping);
-
-	inode_dio_end(inode);
-
 	if (dreq->iocb) {
 		long res = (long) dreq->error;
 		if (!res)
@@ -488,7 +483,6 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
 			     &nfs_direct_read_completion_ops);
 	get_dreq(dreq);
 	desc.pg_dreq = dreq;
-	inode_dio_begin(inode);
 
 	while (iov_iter_count(iter)) {
 		struct page **pagevec;
@@ -540,7 +534,6 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
 	 * generic layer handle the completion.
 	 */
 	if (requested_bytes == 0) {
-		inode_dio_end(inode);
 		nfs_direct_req_release(dreq);
 		return result < 0 ? result : -EIO;
 	}
@@ -771,6 +764,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
 static void nfs_direct_write_schedule_work(struct work_struct *work)
 {
 	struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
+	struct inode *inode = dreq->inode;
 	int flags = dreq->flags;
 
 	dreq->flags = 0;
@@ -782,6 +776,9 @@ static void nfs_direct_write_schedule_work(struct work_struct *work)
 			nfs_direct_write_reschedule(dreq);
 			break;
 		default:
+			nfs_zap_mapping(inode, inode->i_mapping);
+			inode_dio_end(inode);
+
 			nfs_direct_complete(dreq, true);
 	}
 }
-- 
2.5.5


^ permalink raw reply related	[flat|nested] 39+ messages in thread

* [PATCH 12/12] NFS: Clean up nfs_direct_complete()
  2016-06-14 19:05                   ` [PATCH 11/12] NFS: Don't count O_DIRECT reads in the inode->i_dio_count Trond Myklebust
@ 2016-06-14 19:05                     ` Trond Myklebust
  2016-06-15  7:16                     ` [PATCH 11/12] NFS: Don't count O_DIRECT reads in the inode->i_dio_count Christoph Hellwig
  1 sibling, 0 replies; 39+ messages in thread
From: Trond Myklebust @ 2016-06-14 19:05 UTC (permalink / raw)
  To: linux-nfs

Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
---
 fs/nfs/direct.c | 25 ++++++++++---------------
 1 file changed, 10 insertions(+), 15 deletions(-)

diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index e1376538b473..6a55c43db187 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -372,19 +372,8 @@ out:
  * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
  * the iocb is still valid here if this is a synchronous request.
  */
-static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write)
+static void nfs_direct_complete(struct nfs_direct_req *dreq)
 {
-	struct inode *inode = dreq->inode;
-
-	if (dreq->iocb && write) {
-		loff_t pos = dreq->iocb->ki_pos + dreq->count;
-
-		spin_lock(&inode->i_lock);
-		if (i_size_read(inode) < pos)
-			i_size_write(inode, pos);
-		spin_unlock(&inode->i_lock);
-	}
-
 	if (dreq->iocb) {
 		long res = (long) dreq->error;
 		if (!res)
@@ -435,7 +424,7 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
 	}
 out_put:
 	if (put_dreq(dreq))
-		nfs_direct_complete(dreq, false);
+		nfs_direct_complete(dreq);
 	hdr->release(hdr);
 }
 
@@ -539,7 +528,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
 	}
 
 	if (put_dreq(dreq))
-		nfs_direct_complete(dreq, false);
+		nfs_direct_complete(dreq);
 	return 0;
 }
 
@@ -764,6 +753,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
 static void nfs_direct_write_schedule_work(struct work_struct *work)
 {
 	struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
+	loff_t pos = dreq->iocb->ki_pos + dreq->count;
 	struct inode *inode = dreq->inode;
 	int flags = dreq->flags;
 
@@ -776,10 +766,15 @@ static void nfs_direct_write_schedule_work(struct work_struct *work)
 			nfs_direct_write_reschedule(dreq);
 			break;
 		default:
+			spin_lock(&inode->i_lock);
+			if (i_size_read(inode) < pos)
+				i_size_write(inode, pos);
+			spin_unlock(&inode->i_lock);
+
 			nfs_zap_mapping(inode, inode->i_mapping);
 			inode_dio_end(inode);
 
-			nfs_direct_complete(dreq, true);
+			nfs_direct_complete(dreq);
 	}
 }
 
-- 
2.5.5


^ permalink raw reply related	[flat|nested] 39+ messages in thread

* Re: [PATCH 06/12] NFS: Don't hold the inode lock across fsync()
  2016-06-14 19:05         ` [PATCH 06/12] NFS: Don't hold the inode lock across fsync() Trond Myklebust
  2016-06-14 19:05           ` [PATCH 07/12] NFS: Don't enable deep stack recursion when doing memory reclaim Trond Myklebust
@ 2016-06-15  7:08           ` Christoph Hellwig
  2016-06-15 14:47             ` Trond Myklebust
  1 sibling, 1 reply; 39+ messages in thread
From: Christoph Hellwig @ 2016-06-15  7:08 UTC (permalink / raw)
  To: Trond Myklebust; +Cc: linux-nfs

On Tue, Jun 14, 2016 at 03:05:09PM -0400, Trond Myklebust wrote:
> Commits are no longer required to be serialised.

But we need something to lock out new callers of inode_dio_end
when calling inode_dio_wait.  Then again the current code already
fails to to do that..

^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 07/12] NFS: Don't enable deep stack recursion when doing memory reclaim
  2016-06-14 19:05           ` [PATCH 07/12] NFS: Don't enable deep stack recursion when doing memory reclaim Trond Myklebust
  2016-06-14 19:05             ` [PATCH 08/12] NFS: Fix O_DIRECT verifier problems Trond Myklebust
@ 2016-06-15  7:09             ` Christoph Hellwig
  1 sibling, 0 replies; 39+ messages in thread
From: Christoph Hellwig @ 2016-06-15  7:09 UTC (permalink / raw)
  To: Trond Myklebust; +Cc: linux-nfs

Aka don't do a COMMIT in ->releasepage?  This could use a few more
comments explaining why this change is done.  At least for XFS we
have stopped initiating writeback-like behavior in ->releasepage
long ago.

^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes
  2016-06-14 19:05                 ` [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes Trond Myklebust
  2016-06-14 19:05                   ` [PATCH 11/12] NFS: Don't count O_DIRECT reads in the inode->i_dio_count Trond Myklebust
@ 2016-06-15  7:13                   ` Christoph Hellwig
  2016-06-15 14:29                     ` Trond Myklebust
  1 sibling, 1 reply; 39+ messages in thread
From: Christoph Hellwig @ 2016-06-15  7:13 UTC (permalink / raw)
  To: Trond Myklebust; +Cc: linux-nfs

> +void
> +nfs_lock_bio(struct nfs_inode *nfsi)

bio stands for buffered I/O?  This could really use a more descriptive
name and/or a comment..

> +{
> +	/* Be an optimist! */
> +	down_read(&nfsi->io_lock);
> +	if (test_bit(NFS_INO_ODIRECT, &nfsi->flags) == 0)
> +		return;
> +	up_read(&nfsi->io_lock);

So if no direct I/O is going on this locks shared?

> +	/* Slow path.... */
> +	down_write(&nfsi->io_lock);
> +	clear_bit(NFS_INO_ODIRECT, &nfsi->flags);
> +	downgrade_write(&nfsi->io_lock);

The whole locking here seems rather confusing.  Why not use the XFS
locking model:

buffered write:		exclusive
buffered read:		shared
direct write:		shared (exclusive for pagecache invalidate)
direct read:		shared (exclusive for pagecache invalidate)

The nice thing is than in 4.7-rc i_mutex has been replaced with a
rw_mutex so you can just use that in shared mode for direct I/O
as-is without needing any new lock.

^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 11/12] NFS: Don't count O_DIRECT reads in the inode->i_dio_count
  2016-06-14 19:05                   ` [PATCH 11/12] NFS: Don't count O_DIRECT reads in the inode->i_dio_count Trond Myklebust
  2016-06-14 19:05                     ` [PATCH 12/12] NFS: Clean up nfs_direct_complete() Trond Myklebust
@ 2016-06-15  7:16                     ` Christoph Hellwig
  2016-06-15 14:36                       ` Trond Myklebust
  1 sibling, 1 reply; 39+ messages in thread
From: Christoph Hellwig @ 2016-06-15  7:16 UTC (permalink / raw)
  To: Trond Myklebust; +Cc: linux-nfs

Explanation of why reads are more special than writes here or in
general why they are safe?

^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes
  2016-06-15  7:13                   ` [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes Christoph Hellwig
@ 2016-06-15 14:29                     ` Trond Myklebust
  2016-06-15 14:48                         ` Christoph Hellwig
  0 siblings, 1 reply; 39+ messages in thread
From: Trond Myklebust @ 2016-06-15 14:29 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nfs

DQoNCk9uIDYvMTUvMTYsIDAzOjEzLCAiQ2hyaXN0b3BoIEhlbGx3aWciIDxoY2hAaW5mcmFkZWFk
Lm9yZz4gd3JvdGU6DQoNCj4+ICt2b2lkDQo+PiArbmZzX2xvY2tfYmlvKHN0cnVjdCBuZnNfaW5v
ZGUgKm5mc2kpDQo+DQo+YmlvIHN0YW5kcyBmb3IgYnVmZmVyZWQgSS9PPyAgVGhpcyBjb3VsZCBy
ZWFsbHkgdXNlIGEgbW9yZSBkZXNjcmlwdGl2ZQ0KPm5hbWUgYW5kL29yIGEgY29tbWVudC4uDQo+
DQo+PiArew0KPj4gKwkvKiBCZSBhbiBvcHRpbWlzdCEgKi8NCj4+ICsJZG93bl9yZWFkKCZuZnNp
LT5pb19sb2NrKTsNCj4+ICsJaWYgKHRlc3RfYml0KE5GU19JTk9fT0RJUkVDVCwgJm5mc2ktPmZs
YWdzKSA9PSAwKQ0KPj4gKwkJcmV0dXJuOw0KPj4gKwl1cF9yZWFkKCZuZnNpLT5pb19sb2NrKTsN
Cj4NCj5TbyBpZiBubyBkaXJlY3QgSS9PIGlzIGdvaW5nIG9uIHRoaXMgbG9ja3Mgc2hhcmVkPw0K
Pg0KPj4gKwkvKiBTbG93IHBhdGguLi4uICovDQo+PiArCWRvd25fd3JpdGUoJm5mc2ktPmlvX2xv
Y2spOw0KPj4gKwljbGVhcl9iaXQoTkZTX0lOT19PRElSRUNULCAmbmZzaS0+ZmxhZ3MpOw0KPj4g
Kwlkb3duZ3JhZGVfd3JpdGUoJm5mc2ktPmlvX2xvY2spOw0KPg0KPlRoZSB3aG9sZSBsb2NraW5n
IGhlcmUgc2VlbXMgcmF0aGVyIGNvbmZ1c2luZy4gIFdoeSBub3QgdXNlIHRoZSBYRlMNCj5sb2Nr
aW5nIG1vZGVsOg0KDQpUaGUgbG9ja2luZyBpcyBhY3R1YWxseSBzaW1wbGVyIHRoYW4gWEZTLiBX
ZSBoYXZlIDIgSS9PIG1vZGVzOiBidWZmZXJlZCBJL08gYW5kIGRpcmVjdCBJL08uIFRoZSB3cml0
ZSBsb2NrIGlzIHRoZXJlIHRvIGVuc3VyZSBzYWZlIHRyYW5zaXRpb25zIGJldHdlZW4gdGhvc2Ug
MiBtb2RlcywgYnV0IG9uY2UgdGhlIG1vZGUgaXMgc2V0LCB3ZSBfb25seV8gdXNlIHNoYXJlZCBs
b2NrcyBpbiBvcmRlciB0byBhbGxvdyBwYXJhbGxlbGlzbS4NCg0KPg0KPmJ1ZmZlcmVkIHdyaXRl
OgkJZXhjbHVzaXZlDQo+YnVmZmVyZWQgcmVhZDoJCXNoYXJlZA0KPmRpcmVjdCB3cml0ZToJCXNo
YXJlZCAoZXhjbHVzaXZlIGZvciBwYWdlY2FjaGUgaW52YWxpZGF0ZSkNCj5kaXJlY3QgcmVhZDoJ
CXNoYXJlZCAoZXhjbHVzaXZlIGZvciBwYWdlY2FjaGUgaW52YWxpZGF0ZSkNCj4NCj5UaGUgbmlj
ZSB0aGluZyBpcyB0aGFuIGluIDQuNy1yYyBpX211dGV4IGhhcyBiZWVuIHJlcGxhY2VkIHdpdGgg
YQ0KPnJ3X211dGV4IHNvIHlvdSBjYW4ganVzdCB1c2UgdGhhdCBpbiBzaGFyZWQgbW9kZSBmb3Ig
ZGlyZWN0IEkvTw0KPmFzLWlzIHdpdGhvdXQgbmVlZGluZyBhbnkgbmV3IGxvY2suDQoNCldlIHdv
dWxkIGVuZCB1cCBzZXJpYWxpc2luZyByZWFkcyBhbmQgd3JpdGVzLCBzaW5jZSB0aGUgbGF0dGVy
IGdyYWIgYW4gZXhjbHVzaXZlIGxvY2sgaW4gZ2VuZXJpY19maWxlX3dyaXRlKCkuIFdoeSBkbyB0
aGF0IGlmIHdlIGRvbuKAmXQgaGF2ZSB0bz8NCgkNCkNoZWVycw0KICBUcm9uZA0KDQo=


^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 11/12] NFS: Don't count O_DIRECT reads in the inode->i_dio_count
  2016-06-15  7:16                     ` [PATCH 11/12] NFS: Don't count O_DIRECT reads in the inode->i_dio_count Christoph Hellwig
@ 2016-06-15 14:36                       ` Trond Myklebust
  2016-06-15 14:41                         ` Christoph Hellwig
  0 siblings, 1 reply; 39+ messages in thread
From: Trond Myklebust @ 2016-06-15 14:36 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nfs

T24gNi8xNS8xNiwgMDM6MTYsICJDaHJpc3RvcGggSGVsbHdpZyIgPGhjaEBpbmZyYWRlYWQub3Jn
PiB3cm90ZToNCg0KPkV4cGxhbmF0aW9uIG9mIHdoeSByZWFkcyBhcmUgbW9yZSBzcGVjaWFsIHRo
YW4gd3JpdGVzIGhlcmUgb3IgaW4NCj5nZW5lcmFsIHdoeSB0aGV5IGFyZSBzYWZlPw0KPg0KDQpX
aXRoIHRoZSBuZXcgbG9ja2luZywgd2UgYWxyZWFkeSBoYXZlIGV4Y2x1c2lvbiBiZXR3ZWVuIGJ1
ZmZlcmVkIEkvTyBhbmQgZGlyZWN0IEkvTywgYW5kIHNvIHRoZSBvbmx5IHJlbWFpbmluZyB1c2Ug
Y2FzZSBmb3IgaW5vZGVfZGlvX3dhaXQoKSBpcyB0byB3YWl0IGZvciB3cml0ZXMgdG8gY29tcGxl
dGUgaW4gb3BlcmF0aW9ucyBsaWtlIGZzeW5jKCkuDQoNCkNoZWVycw0KICBUcm9uZA0KDQo=


^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 11/12] NFS: Don't count O_DIRECT reads in the inode->i_dio_count
  2016-06-15 14:36                       ` Trond Myklebust
@ 2016-06-15 14:41                         ` Christoph Hellwig
  2016-06-15 14:50                           ` Trond Myklebust
  0 siblings, 1 reply; 39+ messages in thread
From: Christoph Hellwig @ 2016-06-15 14:41 UTC (permalink / raw)
  To: Trond Myklebust; +Cc: Christoph Hellwig, linux-nfs

On Wed, Jun 15, 2016 at 02:36:04PM +0000, Trond Myklebust wrote:
> On 6/15/16, 03:16, "Christoph Hellwig" <hch@infradead.org> wrote:
> 
> >Explanation of why reads are more special than writes here or in
> >general why they are safe?
> >
> 
> With the new locking, we already have exclusion between buffered I/O and direct I/O, and so the only remaining use case for inode_dio_wait() is to wait for writes to complete in operations like fsync().

There is no need to wait for pending dio in fsync - fsync is only
guarantee to flush out I/O that's alreayd been completed.

inode_dio_wait and friends were introduces to protect aio that doesn't
hold i_mutex against truncate.

^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 06/12] NFS: Don't hold the inode lock across fsync()
  2016-06-15  7:08           ` [PATCH 06/12] NFS: Don't hold the inode lock across fsync() Christoph Hellwig
@ 2016-06-15 14:47             ` Trond Myklebust
  2016-06-15 14:54               ` Christoph Hellwig
  0 siblings, 1 reply; 39+ messages in thread
From: Trond Myklebust @ 2016-06-15 14:47 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nfs

DQoNCk9uIDYvMTUvMTYsIDAzOjA4LCAibGludXgtbmZzLW93bmVyQHZnZXIua2VybmVsLm9yZyBv
biBiZWhhbGYgb2YgQ2hyaXN0b3BoIEhlbGx3aWciIDxsaW51eC1uZnMtb3duZXJAdmdlci5rZXJu
ZWwub3JnIG9uIGJlaGFsZiBvZiBoY2hAaW5mcmFkZWFkLm9yZz4gd3JvdGU6DQoNCj5PbiBUdWUs
IEp1biAxNCwgMjAxNiBhdCAwMzowNTowOVBNIC0wNDAwLCBUcm9uZCBNeWtsZWJ1c3Qgd3JvdGU6
DQo+PiBDb21taXRzIGFyZSBubyBsb25nZXIgcmVxdWlyZWQgdG8gYmUgc2VyaWFsaXNlZC4NCj4N
Cj5CdXQgd2UgbmVlZCBzb21ldGhpbmcgdG8gbG9jayBvdXQgbmV3IGNhbGxlcnMgb2YgaW5vZGVf
ZGlvX2VuZA0KPndoZW4gY2FsbGluZyBpbm9kZV9kaW9fd2FpdC4gIFRoZW4gYWdhaW4gdGhlIGN1
cnJlbnQgY29kZSBhbHJlYWR5DQo+ZmFpbHMgdG8gdG8gZG8gdGhhdC4uDQoNCldlIGNvdWxkIGRv
IHRoYXQgYnkgc2V0dGluZyB0aGUgSS9PIG1vZGUgdG8gYnVmZmVyZWQ7IHRoYXQgc3RpbGwgYWxs
b3dzIHBhcmFsbGVsaXNtICBmb3IgdGhlIGJ1ZmZlcmVkIEkvTyBjYXNlLiBJIHdvdWxkbuKAmXQg
bm9ybWFsbHkgZXhwZWN0IGFwcGxpY2F0aW9ucyB0aGF0IHVzZSBPX0RJUkVDVCBvbiBORlMgdG8g
Y2FsbCBmc3luYygpLCBzaW5jZSB3ZSBhbHdheXMgZW5mb3JjZSBPX0RJUkVDVHxPX1NZTkMgc2Vt
YW50aWNzLg0KDQo=


^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes
  2016-06-15 14:29                     ` Trond Myklebust
@ 2016-06-15 14:48                         ` Christoph Hellwig
  0 siblings, 0 replies; 39+ messages in thread
From: Christoph Hellwig @ 2016-06-15 14:48 UTC (permalink / raw)
  To: Trond Myklebust; +Cc: Christoph Hellwig, linux-nfs, xfs

On Wed, Jun 15, 2016 at 02:29:42PM +0000, Trond Myklebust wrote:
> The locking is actually simpler than XFS.

It looks way more complicated.  And totally undocumented.

> We have 2 I/O modes: buffered I/O and direct I/O. The write lock is there to ensure safe transitions between those 2 modes, but once the mode is set,
> we _only_ use shared locks in order to allow parallelism.

>From reading the patch that's not what actually happens - I think you're
still taking i_rwsem exclusive for buffered writes, aren't you?

Doing that is absolutely mandatory for Posix atomicy requirements, or
you'll break tons of of applications.

But XFS allows full parallelism for direct reads and writes as long
as there is no more pagecache to flush.  But if you have pages in
the pagecache you need the exclusive lock to prevent against new
pagecache pages being added.

> >The nice thing is than in 4.7-rc i_mutex has been replaced with a
> >rw_mutex so you can just use that in shared mode for direct I/O
> >as-is without needing any new lock.
> 
> We would end up serialising reads and writes, since the latter grab an
> exclusive lock in generic_file_write(). Why do that if we don???t have to?

Looks at the XFS code - no serialization between direct reads and writes
as long as no buffered I/O came in inbetween.

And don't use generic_file_{read,write}_iter if you want to do direct I/O,
unfortunately locking in mm/filemap.c is totally screwed for direct I/O,
take a look at XFS which is where direct I/O came from and where we get
the locking right.

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes
@ 2016-06-15 14:48                         ` Christoph Hellwig
  0 siblings, 0 replies; 39+ messages in thread
From: Christoph Hellwig @ 2016-06-15 14:48 UTC (permalink / raw)
  To: Trond Myklebust; +Cc: Christoph Hellwig, linux-nfs, xfs

On Wed, Jun 15, 2016 at 02:29:42PM +0000, Trond Myklebust wrote:
> The locking is actually simpler than XFS.

It looks way more complicated.  And totally undocumented.

> We have 2 I/O modes: buffered I/O and direct I/O. The write lock is there to ensure safe transitions between those 2 modes, but once the mode is set,
> we _only_ use shared locks in order to allow parallelism.

^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 11/12] NFS: Don't count O_DIRECT reads in the inode->i_dio_count
  2016-06-15 14:41                         ` Christoph Hellwig
@ 2016-06-15 14:50                           ` Trond Myklebust
  2016-06-15 14:53                             ` Christoph Hellwig
  0 siblings, 1 reply; 39+ messages in thread
From: Trond Myklebust @ 2016-06-15 14:50 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nfs

DQoNCk9uIDYvMTUvMTYsIDEwOjQxLCAiQ2hyaXN0b3BoIEhlbGx3aWciIDxoY2hAaW5mcmFkZWFk
Lm9yZz4gd3JvdGU6DQoNCj5PbiBXZWQsIEp1biAxNSwgMjAxNiBhdCAwMjozNjowNFBNICswMDAw
LCBUcm9uZCBNeWtsZWJ1c3Qgd3JvdGU6DQo+PiBPbiA2LzE1LzE2LCAwMzoxNiwgIkNocmlzdG9w
aCBIZWxsd2lnIiA8aGNoQGluZnJhZGVhZC5vcmc+IHdyb3RlOg0KPj4gDQo+PiA+RXhwbGFuYXRp
b24gb2Ygd2h5IHJlYWRzIGFyZSBtb3JlIHNwZWNpYWwgdGhhbiB3cml0ZXMgaGVyZSBvciBpbg0K
Pj4gPmdlbmVyYWwgd2h5IHRoZXkgYXJlIHNhZmU/DQo+PiA+DQo+PiANCj4+IFdpdGggdGhlIG5l
dyBsb2NraW5nLCB3ZSBhbHJlYWR5IGhhdmUgZXhjbHVzaW9uIGJldHdlZW4gYnVmZmVyZWQgSS9P
IGFuZCBkaXJlY3QgSS9PLCBhbmQgc28gdGhlIG9ubHkgcmVtYWluaW5nIHVzZSBjYXNlIGZvciBp
bm9kZV9kaW9fd2FpdCgpIGlzIHRvIHdhaXQgZm9yIHdyaXRlcyB0byBjb21wbGV0ZSBpbiBvcGVy
YXRpb25zIGxpa2UgZnN5bmMoKS4NCj4NCj5UaGVyZSBpcyBubyBuZWVkIHRvIHdhaXQgZm9yIHBl
bmRpbmcgZGlvIGluIGZzeW5jIC0gZnN5bmMgaXMgb25seQ0KPmd1YXJhbnRlZSB0byBmbHVzaCBv
dXQgSS9PIHRoYXQncyBhbHJlYXlkIGJlZW4gY29tcGxldGVkLg0KPg0KPmlub2RlX2Rpb193YWl0
IGFuZCBmcmllbmRzIHdlcmUgaW50cm9kdWNlcyB0byBwcm90ZWN0IGFpbyB0aGF0IGRvZXNuJ3QN
Cj5ob2xkIGlfbXV0ZXggYWdhaW5zdCB0cnVuY2F0ZS4NCg0KRmFpciBlbm91Z2guIFNvIGl0IHNv
dW5kcyBhcyBpZiB5b3UgYXJlIHN1Z2dlc3Rpbmcgd2UgY2FuIGp1c3QgZHJvcCB1c2luZyBpbm9k
ZV9kaW9fKigpIGFsdG9nZXRoZXI/DQoNCg0K


^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes
  2016-06-15 14:48                         ` Christoph Hellwig
@ 2016-06-15 14:52                           ` Trond Myklebust
  -1 siblings, 0 replies; 39+ messages in thread
From: Trond Myklebust @ 2016-06-15 14:52 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nfs, xfs


[-- Attachment #1.1: Type: text/plain, Size: 2240 bytes --]

On 6/15/16, 10:48, "Christoph Hellwig" <hch@infradead.org> wrote:

>On Wed, Jun 15, 2016 at 02:29:42PM +0000, Trond Myklebust wrote:
>> The locking is actually simpler than XFS.
>
>It looks way more complicated.  And totally undocumented.
>
>> We have 2 I/O modes: buffered I/O and direct I/O. The write lock is there to ensure safe transitions between those 2 modes, but once the mode is set,
>> we _only_ use shared locks in order to allow parallelism.
>
>From reading the patch that's not what actually happens - I think you're
>still taking i_rwsem exclusive for buffered writes, aren't you?
>
>Doing that is absolutely mandatory for Posix atomicy requirements, or
>you'll break tons of of applications.

Yes. We continue to let the VFS manage serialisation of writes.

>But XFS allows full parallelism for direct reads and writes as long
>as there is no more pagecache to flush.  But if you have pages in
>the pagecache you need the exclusive lock to prevent against new
>pagecache pages being added.

Exactly. So does this.

>> >The nice thing is than in 4.7-rc i_mutex has been replaced with a
>> >rw_mutex so you can just use that in shared mode for direct I/O
>> >as-is without needing any new lock.
>> 
>> We would end up serialising reads and writes, since the latter grab an
>> exclusive lock in generic_file_write(). Why do that if we don???t have to?
>
>Looks at the XFS code - no serialization between direct reads and writes
>as long as no buffered I/O came in inbetween.
>
>And don't use generic_file_{read,write}_iter if you want to do direct I/O,
>unfortunately locking in mm/filemap.c is totally screwed for direct I/O,
>take a look at XFS which is where direct I/O came from and where we get
>the locking right.

We don’t use generic_file_* for O_DIRECT; we only use it for buffered I/O.

Disclaimer

The information contained in this communication from the sender is confidential. It is intended solely for use by the recipient and others authorized to receive it. If you are not the recipient, you are hereby notified that any disclosure, copying, distribution or taking action in relation of the contents of this information is strictly prohibited and may be unlawful.

[-- Attachment #1.2: Type: text/html, Size: 2729 bytes --]

[-- Attachment #2: Type: text/plain, Size: 121 bytes --]

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes
@ 2016-06-15 14:52                           ` Trond Myklebust
  0 siblings, 0 replies; 39+ messages in thread
From: Trond Myklebust @ 2016-06-15 14:52 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nfs, xfs

T24gNi8xNS8xNiwgMTA6NDgsICJDaHJpc3RvcGggSGVsbHdpZyIgPGhjaEBpbmZyYWRlYWQub3Jn
PiB3cm90ZToNCg0KPk9uIFdlZCwgSnVuIDE1LCAyMDE2IGF0IDAyOjI5OjQyUE0gKzAwMDAsIFRy
b25kIE15a2xlYnVzdCB3cm90ZToNCj4+IFRoZSBsb2NraW5nIGlzIGFjdHVhbGx5IHNpbXBsZXIg
dGhhbiBYRlMuDQo+DQo+SXQgbG9va3Mgd2F5IG1vcmUgY29tcGxpY2F0ZWQuICBBbmQgdG90YWxs
eSB1bmRvY3VtZW50ZWQuDQo+DQo+PiBXZSBoYXZlIDIgSS9PIG1vZGVzOiBidWZmZXJlZCBJL08g
YW5kIGRpcmVjdCBJL08uIFRoZSB3cml0ZSBsb2NrIGlzIHRoZXJlIHRvIGVuc3VyZSBzYWZlIHRy
YW5zaXRpb25zIGJldHdlZW4gdGhvc2UgMiBtb2RlcywgYnV0IG9uY2UgdGhlIG1vZGUgaXMgc2V0
LA0KPj4gd2UgX29ubHlfIHVzZSBzaGFyZWQgbG9ja3MgaW4gb3JkZXIgdG8gYWxsb3cgcGFyYWxs
ZWxpc20uDQo+DQo+RnJvbSByZWFkaW5nIHRoZSBwYXRjaCB0aGF0J3Mgbm90IHdoYXQgYWN0dWFs
bHkgaGFwcGVucyAtIEkgdGhpbmsgeW91J3JlDQo+c3RpbGwgdGFraW5nIGlfcndzZW0gZXhjbHVz
aXZlIGZvciBidWZmZXJlZCB3cml0ZXMsIGFyZW4ndCB5b3U/DQo+DQo+RG9pbmcgdGhhdCBpcyBh
YnNvbHV0ZWx5IG1hbmRhdG9yeSBmb3IgUG9zaXggYXRvbWljeSByZXF1aXJlbWVudHMsIG9yDQo+
eW91J2xsIGJyZWFrIHRvbnMgb2Ygb2YgYXBwbGljYXRpb25zLg0KDQpZZXMuIFdlIGNvbnRpbnVl
IHRvIGxldCB0aGUgVkZTIG1hbmFnZSBzZXJpYWxpc2F0aW9uIG9mIHdyaXRlcy4NCg0KPkJ1dCBY
RlMgYWxsb3dzIGZ1bGwgcGFyYWxsZWxpc20gZm9yIGRpcmVjdCByZWFkcyBhbmQgd3JpdGVzIGFz
IGxvbmcNCj5hcyB0aGVyZSBpcyBubyBtb3JlIHBhZ2VjYWNoZSB0byBmbHVzaC4gIEJ1dCBpZiB5
b3UgaGF2ZSBwYWdlcyBpbg0KPnRoZSBwYWdlY2FjaGUgeW91IG5lZWQgdGhlIGV4Y2x1c2l2ZSBs
b2NrIHRvIHByZXZlbnQgYWdhaW5zdCBuZXcNCj5wYWdlY2FjaGUgcGFnZXMgYmVpbmcgYWRkZWQu
DQoNCkV4YWN0bHkuIFNvIGRvZXMgdGhpcy4NCg0KPj4gPlRoZSBuaWNlIHRoaW5nIGlzIHRoYW4g
aW4gNC43LXJjIGlfbXV0ZXggaGFzIGJlZW4gcmVwbGFjZWQgd2l0aCBhDQo+PiA+cndfbXV0ZXgg
c28geW91IGNhbiBqdXN0IHVzZSB0aGF0IGluIHNoYXJlZCBtb2RlIGZvciBkaXJlY3QgSS9PDQo+
PiA+YXMtaXMgd2l0aG91dCBuZWVkaW5nIGFueSBuZXcgbG9jay4NCj4+IA0KPj4gV2Ugd291bGQg
ZW5kIHVwIHNlcmlhbGlzaW5nIHJlYWRzIGFuZCB3cml0ZXMsIHNpbmNlIHRoZSBsYXR0ZXIgZ3Jh
YiBhbg0KPj4gZXhjbHVzaXZlIGxvY2sgaW4gZ2VuZXJpY19maWxlX3dyaXRlKCkuIFdoeSBkbyB0
aGF0IGlmIHdlIGRvbj8/P3QgaGF2ZSB0bz8NCj4NCj5Mb29rcyBhdCB0aGUgWEZTIGNvZGUgLSBu
byBzZXJpYWxpemF0aW9uIGJldHdlZW4gZGlyZWN0IHJlYWRzIGFuZCB3cml0ZXMNCj5hcyBsb25n
IGFzIG5vIGJ1ZmZlcmVkIEkvTyBjYW1lIGluIGluYmV0d2Vlbi4NCj4NCj5BbmQgZG9uJ3QgdXNl
IGdlbmVyaWNfZmlsZV97cmVhZCx3cml0ZX1faXRlciBpZiB5b3Ugd2FudCB0byBkbyBkaXJlY3Qg
SS9PLA0KPnVuZm9ydHVuYXRlbHkgbG9ja2luZyBpbiBtbS9maWxlbWFwLmMgaXMgdG90YWxseSBz
Y3Jld2VkIGZvciBkaXJlY3QgSS9PLA0KPnRha2UgYSBsb29rIGF0IFhGUyB3aGljaCBpcyB3aGVy
ZSBkaXJlY3QgSS9PIGNhbWUgZnJvbSBhbmQgd2hlcmUgd2UgZ2V0DQo+dGhlIGxvY2tpbmcgcmln
aHQuDQoNCldlIGRvbuKAmXQgdXNlIGdlbmVyaWNfZmlsZV8qIGZvciBPX0RJUkVDVDsgd2Ugb25s
eSB1c2UgaXQgZm9yIGJ1ZmZlcmVkIEkvTy4NCg0KDQo=


^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 11/12] NFS: Don't count O_DIRECT reads in the inode->i_dio_count
  2016-06-15 14:50                           ` Trond Myklebust
@ 2016-06-15 14:53                             ` Christoph Hellwig
  0 siblings, 0 replies; 39+ messages in thread
From: Christoph Hellwig @ 2016-06-15 14:53 UTC (permalink / raw)
  To: Trond Myklebust; +Cc: Christoph Hellwig, linux-nfs

On Wed, Jun 15, 2016 at 02:50:07PM +0000, Trond Myklebust wrote:
> Fair enough. So it sounds as if you are suggesting we can just drop using inode_dio_*() altogether?

Maybe.  The big question is how a direct write vs truncate race is
handled.  Either way I think a patch like this deserves a detailed
analysis documented in the changelog and/or comments in the code.

^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 06/12] NFS: Don't hold the inode lock across fsync()
  2016-06-15 14:47             ` Trond Myklebust
@ 2016-06-15 14:54               ` Christoph Hellwig
  0 siblings, 0 replies; 39+ messages in thread
From: Christoph Hellwig @ 2016-06-15 14:54 UTC (permalink / raw)
  To: Trond Myklebust; +Cc: Christoph Hellwig, linux-nfs

On Wed, Jun 15, 2016 at 02:47:55PM +0000, Trond Myklebust wrote:
> >But we need something to lock out new callers of inode_dio_end
> >when calling inode_dio_wait.  Then again the current code already
> >fails to to do that..
> 
> We could do that by setting the I/O mode to buffered; that still allows
> parallelism  for the buffered I/O case. I wouldn???t normally expect
> applications that use O_DIRECT on NFS to call fsync(), since we always enforce O_DIRECT|O_SYNC semantics.

Maybe not applications specificly written for NFS.  But if you're using
for example qemu on O_DIRECT without O_SYNC and lots of fdatasync calls
will be the default behavior.

Anyway, as per the other thread we might be able to get rid of the
inode_dio_wait call entirely.

^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes
  2016-06-15 14:52                           ` Trond Myklebust
@ 2016-06-15 14:56                             ` Christoph Hellwig
  -1 siblings, 0 replies; 39+ messages in thread
From: Christoph Hellwig @ 2016-06-15 14:56 UTC (permalink / raw)
  To: Trond Myklebust; +Cc: Christoph Hellwig, linux-nfs, xfs

On Wed, Jun 15, 2016 at 02:52:24PM +0000, Trond Myklebust wrote:
> >But XFS allows full parallelism for direct reads and writes as long
> >as there is no more pagecache to flush.  But if you have pages in
> >the pagecache you need the exclusive lock to prevent against new
> >pagecache pages being added.
> 
> Exactly. So does this.

So let's avoid bloating the inode with another rw_semaphore, and make
everyones life easier by using the existing lock.

> We don???t use generic_file_* for O_DIRECT; we only use it for buffered I/O.

I know - this was just an answer to your reference to the generic_file_*
code.

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes
@ 2016-06-15 14:56                             ` Christoph Hellwig
  0 siblings, 0 replies; 39+ messages in thread
From: Christoph Hellwig @ 2016-06-15 14:56 UTC (permalink / raw)
  To: Trond Myklebust; +Cc: Christoph Hellwig, linux-nfs, xfs

On Wed, Jun 15, 2016 at 02:52:24PM +0000, Trond Myklebust wrote:
> >But XFS allows full parallelism for direct reads and writes as long
> >as there is no more pagecache to flush.  But if you have pages in
> >the pagecache you need the exclusive lock to prevent against new
> >pagecache pages being added.
> 
> Exactly. So does this.

So let's avoid bloating the inode with another rw_semaphore, and make
everyones life easier by using the existing lock.

> We don???t use generic_file_* for O_DIRECT; we only use it for buffered I/O.

I know - this was just an answer to your reference to the generic_file_*
code.

^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes
  2016-06-15 14:56                             ` Christoph Hellwig
@ 2016-06-15 15:09                               ` Trond Myklebust
  -1 siblings, 0 replies; 39+ messages in thread
From: Trond Myklebust @ 2016-06-15 15:09 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nfs, xfs


[-- Attachment #1.1: Type: text/plain, Size: 1383 bytes --]



On 6/15/16, 10:56, "Christoph Hellwig" <hch@infradead.org> wrote:

>On Wed, Jun 15, 2016 at 02:52:24PM +0000, Trond Myklebust wrote:
>> >But XFS allows full parallelism for direct reads and writes as long
>> >as there is no more pagecache to flush.  But if you have pages in
>> >the pagecache you need the exclusive lock to prevent against new
>> >pagecache pages being added.
>> 
>> Exactly. So does this.
>
>So let's avoid bloating the inode with another rw_semaphore, and make
>everyones life easier by using the existing lock.

As I said earlier, the problem with that is you end up artificially serialising buffered reads and buffered writes.

• The reads only need a shared lock in order to protect the I/O mode from flipping to O_DIRECT (and relying on page locks to protect against buffered writes).
• The writes need protection against O_DIRECT, they don’t need serialisation with other reads, but they do need to be serialised against other buffered writes.

Disclaimer

The information contained in this communication from the sender is confidential. It is intended solely for use by the recipient and others authorized to receive it. If you are not the recipient, you are hereby notified that any disclosure, copying, distribution or taking action in relation of the contents of this information is strictly prohibited and may be unlawful.

[-- Attachment #1.2: Type: text/html, Size: 1715 bytes --]

[-- Attachment #2: Type: text/plain, Size: 121 bytes --]

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes
@ 2016-06-15 15:09                               ` Trond Myklebust
  0 siblings, 0 replies; 39+ messages in thread
From: Trond Myklebust @ 2016-06-15 15:09 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nfs, xfs

DQoNCk9uIDYvMTUvMTYsIDEwOjU2LCAiQ2hyaXN0b3BoIEhlbGx3aWciIDxoY2hAaW5mcmFkZWFk
Lm9yZz4gd3JvdGU6DQoNCj5PbiBXZWQsIEp1biAxNSwgMjAxNiBhdCAwMjo1MjoyNFBNICswMDAw
LCBUcm9uZCBNeWtsZWJ1c3Qgd3JvdGU6DQo+PiA+QnV0IFhGUyBhbGxvd3MgZnVsbCBwYXJhbGxl
bGlzbSBmb3IgZGlyZWN0IHJlYWRzIGFuZCB3cml0ZXMgYXMgbG9uZw0KPj4gPmFzIHRoZXJlIGlz
IG5vIG1vcmUgcGFnZWNhY2hlIHRvIGZsdXNoLiAgQnV0IGlmIHlvdSBoYXZlIHBhZ2VzIGluDQo+
PiA+dGhlIHBhZ2VjYWNoZSB5b3UgbmVlZCB0aGUgZXhjbHVzaXZlIGxvY2sgdG8gcHJldmVudCBh
Z2FpbnN0IG5ldw0KPj4gPnBhZ2VjYWNoZSBwYWdlcyBiZWluZyBhZGRlZC4NCj4+IA0KPj4gRXhh
Y3RseS4gU28gZG9lcyB0aGlzLg0KPg0KPlNvIGxldCdzIGF2b2lkIGJsb2F0aW5nIHRoZSBpbm9k
ZSB3aXRoIGFub3RoZXIgcndfc2VtYXBob3JlLCBhbmQgbWFrZQ0KPmV2ZXJ5b25lcyBsaWZlIGVh
c2llciBieSB1c2luZyB0aGUgZXhpc3RpbmcgbG9jay4NCg0KQXMgSSBzYWlkIGVhcmxpZXIsIHRo
ZSBwcm9ibGVtIHdpdGggdGhhdCBpcyB5b3UgZW5kIHVwIGFydGlmaWNpYWxseSBzZXJpYWxpc2lu
ZyBidWZmZXJlZCByZWFkcyBhbmQgYnVmZmVyZWQgd3JpdGVzLg0KDQrigKIgVGhlIHJlYWRzIG9u
bHkgbmVlZCBhIHNoYXJlZCBsb2NrIGluIG9yZGVyIHRvIHByb3RlY3QgdGhlIEkvTyBtb2RlIGZy
b20gZmxpcHBpbmcgdG8gT19ESVJFQ1QgKGFuZCByZWx5aW5nIG9uIHBhZ2UgbG9ja3MgdG8gcHJv
dGVjdCBhZ2FpbnN0IGJ1ZmZlcmVkIHdyaXRlcykuDQrigKIgVGhlIHdyaXRlcyBuZWVkIHByb3Rl
Y3Rpb24gYWdhaW5zdCBPX0RJUkVDVCwgdGhleSBkb27igJl0IG5lZWQgc2VyaWFsaXNhdGlvbiB3
aXRoIG90aGVyIHJlYWRzLCBidXQgdGhleSBkbyBuZWVkIHRvIGJlIHNlcmlhbGlzZWQgYWdhaW5z
dCBvdGhlciBidWZmZXJlZCB3cml0ZXMuDQoNCg0KDQo=


^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes
  2016-06-15 15:09                               ` Trond Myklebust
@ 2016-06-15 15:14                                 ` Christoph Hellwig
  -1 siblings, 0 replies; 39+ messages in thread
From: Christoph Hellwig @ 2016-06-15 15:14 UTC (permalink / raw)
  To: Trond Myklebust; +Cc: Christoph Hellwig, linux-nfs, xfs

On Wed, Jun 15, 2016 at 03:09:23PM +0000, Trond Myklebust wrote:
> As I said earlier, the problem with that is you end up artificially serialising buffered reads and buffered writes.

If you actually want to be Posix compiant you need to serialize buffered
reads against buffererd writes - it's just that most Linux file systems
happen to get this wrong.

> ??? The reads only need a shared lock in order to protect the I/O mode from flipping to O_DIRECT (and relying on page locks to protect against buffered writes).

Which strictly speaking is not enough, although as said above most
Linux filesystems get this wrong.  If you indeed want to keep that
(incorrect) behavior you need another lock.  It's defintively not
"simpler", though.

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes
@ 2016-06-15 15:14                                 ` Christoph Hellwig
  0 siblings, 0 replies; 39+ messages in thread
From: Christoph Hellwig @ 2016-06-15 15:14 UTC (permalink / raw)
  To: Trond Myklebust; +Cc: Christoph Hellwig, linux-nfs, xfs

On Wed, Jun 15, 2016 at 03:09:23PM +0000, Trond Myklebust wrote:
> As I said earlier, the problem with that is you end up artificially serialising buffered reads and buffered writes.

If you actually want to be Posix compiant you need to serialize buffered
reads against buffererd writes - it's just that most Linux file systems
happen to get this wrong.

> ??? The reads only need a shared lock in order to protect the I/O mode from flipping to O_DIRECT (and relying on page locks to protect against buffered writes).

Which strictly speaking is not enough, although as said above most
Linux filesystems get this wrong.  If you indeed want to keep that
(incorrect) behavior you need another lock.  It's defintively not
"simpler", though.

^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes
  2016-06-15 15:14                                 ` Christoph Hellwig
@ 2016-06-15 15:45                                   ` Trond Myklebust
  -1 siblings, 0 replies; 39+ messages in thread
From: Trond Myklebust @ 2016-06-15 15:45 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nfs, xfs


[-- Attachment #1.1: Type: text/plain, Size: 1902 bytes --]



On 6/15/16, 11:14, "Christoph Hellwig" <hch@infradead.org> wrote:

>On Wed, Jun 15, 2016 at 03:09:23PM +0000, Trond Myklebust wrote:
>> As I said earlier, the problem with that is you end up artificially serialising buffered reads and buffered writes.
>
>If you actually want to be Posix compiant you need to serialize buffered
>reads against buffererd writes - it's just that most Linux file systems
>happen to get this wrong.
>
>> ??? The reads only need a shared lock in order to protect the I/O mode from flipping to O_DIRECT (and relying on page locks to protect against buffered writes).
>
>Which strictly speaking is not enough, although as said above most
>Linux filesystems get this wrong.  If you indeed want to keep that
>(incorrect) behavior you need another lock.  It's defintively not
>"simpler", though.
>

Serialisation is not mandatory in POSIX:

http://pubs.opengroup.org/onlinepubs/9699919799/functions/write.html

“Writes can be serialized with respect to other reads and writes. If a read() of file data can be proven (by any means) to occur after a write() of the data, it must reflect that write(), even if the calls are made by different processes. A similar requirement applies to multiple write operations to the same file position. This is needed to guarantee the propagation of data from write() calls to subsequent read() calls. This requirement is particularly significant for networked file systems, where some caching schemes violate these semantics.”

Disclaimer

The information contained in this communication from the sender is confidential. It is intended solely for use by the recipient and others authorized to receive it. If you are not the recipient, you are hereby notified that any disclosure, copying, distribution or taking action in relation of the contents of this information is strictly prohibited and may be unlawful.

[-- Attachment #1.2: Type: text/html, Size: 2351 bytes --]

[-- Attachment #2: Type: text/plain, Size: 121 bytes --]

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes
@ 2016-06-15 15:45                                   ` Trond Myklebust
  0 siblings, 0 replies; 39+ messages in thread
From: Trond Myklebust @ 2016-06-15 15:45 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nfs, xfs

DQoNCk9uIDYvMTUvMTYsIDExOjE0LCAiQ2hyaXN0b3BoIEhlbGx3aWciIDxoY2hAaW5mcmFkZWFk
Lm9yZz4gd3JvdGU6DQoNCj5PbiBXZWQsIEp1biAxNSwgMjAxNiBhdCAwMzowOToyM1BNICswMDAw
LCBUcm9uZCBNeWtsZWJ1c3Qgd3JvdGU6DQo+PiBBcyBJIHNhaWQgZWFybGllciwgdGhlIHByb2Js
ZW0gd2l0aCB0aGF0IGlzIHlvdSBlbmQgdXAgYXJ0aWZpY2lhbGx5IHNlcmlhbGlzaW5nIGJ1ZmZl
cmVkIHJlYWRzIGFuZCBidWZmZXJlZCB3cml0ZXMuDQo+DQo+SWYgeW91IGFjdHVhbGx5IHdhbnQg
dG8gYmUgUG9zaXggY29tcGlhbnQgeW91IG5lZWQgdG8gc2VyaWFsaXplIGJ1ZmZlcmVkDQo+cmVh
ZHMgYWdhaW5zdCBidWZmZXJlcmQgd3JpdGVzIC0gaXQncyBqdXN0IHRoYXQgbW9zdCBMaW51eCBm
aWxlIHN5c3RlbXMNCj5oYXBwZW4gdG8gZ2V0IHRoaXMgd3JvbmcuDQo+DQo+PiA/Pz8gVGhlIHJl
YWRzIG9ubHkgbmVlZCBhIHNoYXJlZCBsb2NrIGluIG9yZGVyIHRvIHByb3RlY3QgdGhlIEkvTyBt
b2RlIGZyb20gZmxpcHBpbmcgdG8gT19ESVJFQ1QgKGFuZCByZWx5aW5nIG9uIHBhZ2UgbG9ja3Mg
dG8gcHJvdGVjdCBhZ2FpbnN0IGJ1ZmZlcmVkIHdyaXRlcykuDQo+DQo+V2hpY2ggc3RyaWN0bHkg
c3BlYWtpbmcgaXMgbm90IGVub3VnaCwgYWx0aG91Z2ggYXMgc2FpZCBhYm92ZSBtb3N0DQo+TGlu
dXggZmlsZXN5c3RlbXMgZ2V0IHRoaXMgd3JvbmcuICBJZiB5b3UgaW5kZWVkIHdhbnQgdG8ga2Vl
cCB0aGF0DQo+KGluY29ycmVjdCkgYmVoYXZpb3IgeW91IG5lZWQgYW5vdGhlciBsb2NrLiAgSXQn
cyBkZWZpbnRpdmVseSBub3QNCj4ic2ltcGxlciIsIHRob3VnaC4NCj4NCg0KU2VyaWFsaXNhdGlv
biBpcyBub3QgbWFuZGF0b3J5IGluIFBPU0lYOg0KDQpodHRwOi8vcHVicy5vcGVuZ3JvdXAub3Jn
L29ubGluZXB1YnMvOTY5OTkxOTc5OS9mdW5jdGlvbnMvd3JpdGUuaHRtbA0KDQrigJxXcml0ZXMg
Y2FuIGJlIHNlcmlhbGl6ZWQgd2l0aCByZXNwZWN0IHRvIG90aGVyIHJlYWRzIGFuZCB3cml0ZXMu
IElmIGEgcmVhZCgpIG9mIGZpbGUgZGF0YSBjYW4gYmUgcHJvdmVuIChieSBhbnkgbWVhbnMpIHRv
IG9jY3VyIGFmdGVyIGEgd3JpdGUoKSBvZiB0aGUgZGF0YSwgaXQgbXVzdCByZWZsZWN0IHRoYXQg
d3JpdGUoKSwgZXZlbiBpZiB0aGUgY2FsbHMgYXJlIG1hZGUgYnkgZGlmZmVyZW50IHByb2Nlc3Nl
cy4gQSBzaW1pbGFyIHJlcXVpcmVtZW50IGFwcGxpZXMgdG8gbXVsdGlwbGUgd3JpdGUgb3BlcmF0
aW9ucyB0byB0aGUgc2FtZSBmaWxlIHBvc2l0aW9uLiBUaGlzIGlzIG5lZWRlZCB0byBndWFyYW50
ZWUgdGhlIHByb3BhZ2F0aW9uIG9mIGRhdGEgZnJvbSB3cml0ZSgpIGNhbGxzIHRvIHN1YnNlcXVl
bnQgcmVhZCgpIGNhbGxzLiBUaGlzIHJlcXVpcmVtZW50IGlzIHBhcnRpY3VsYXJseSBzaWduaWZp
Y2FudCBmb3IgbmV0d29ya2VkIGZpbGUgc3lzdGVtcywgd2hlcmUgc29tZSBjYWNoaW5nIHNjaGVt
ZXMgdmlvbGF0ZSB0aGVzZSBzZW1hbnRpY3Mu4oCdDQoNCg==


^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes
  2016-06-15 15:45                                   ` Trond Myklebust
@ 2016-06-16  9:12                                     ` Christoph Hellwig
  -1 siblings, 0 replies; 39+ messages in thread
From: Christoph Hellwig @ 2016-06-16  9:12 UTC (permalink / raw)
  To: Trond Myklebust; +Cc: Christoph Hellwig, linux-nfs, xfs

On Wed, Jun 15, 2016 at 03:45:37PM +0000, Trond Myklebust wrote:
> Serialisation is not mandatory in POSIX:
> 
> http://pubs.opengroup.org/onlinepubs/9699919799/functions/write.html
> 
> ???Writes can be serialized with respect to other reads and writes. If a read() of file data can be proven (by any means) to occur after a write() of the data, it must reflect that write(), even if the calls are made by different processes. A similar requirement applies to multiple write operations to the same file position. This is needed to guarantee the propagation of data from write() calls to subsequent read() calls. This requirement is particularly significant for networked file systems, where some caching schemes violate these semantics.???

That is the basic defintion, but once O_DSYNC and friends come into
play it gets more complicated:

>From http://pubs.opengroup.org/onlinepubs/9699919799/functions/read.html:

    [SIO] [Option Start] If the O_DSYNC and O_RSYNC bits have been set,
    read I/O operations on the file descriptor shall complete as defined
    by synchronized I/O data integrity completion. If the O_SYNC and
    O_RSYNC bits have been set, read I/O operations on the file descriptor
    shall complete as defined by synchronized I/O file integrity completion.
    [Option End]

Which directs to:
http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html:

     3.378 Synchronized I/O Data Integrity Completion

     For read, when the operation has been completed or diagnosed if
     unsuccessful. The read is complete only when an image of the data
     has been successfully transferred to the requesting process. If
     there were any pending write requests affecting the data to be read
     at the time that the synchronized read operation was requested,
     these write requests are successfully transferred prior to reading
     the data.

     For write, when the operation has been completed or diagnosed if
     unsuccessful. The write is complete only when the data specified in
     the write request is successfully transferred and all file system
     information required to retrieve the data is successfully
     transferred.

     File attributes that are not necessary for data retrieval (access
     time, modification time, status change time) need not be
     successfully transferred prior to returning to the calling process.

While we'll never see O_RSYNC in the kernel glibc treats it as just
O_SYNC.  Either way - I'd be much happier if we could come up with
less different ways to do read/write exclusion rather than more..

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes
@ 2016-06-16  9:12                                     ` Christoph Hellwig
  0 siblings, 0 replies; 39+ messages in thread
From: Christoph Hellwig @ 2016-06-16  9:12 UTC (permalink / raw)
  To: Trond Myklebust; +Cc: Christoph Hellwig, linux-nfs, xfs

On Wed, Jun 15, 2016 at 03:45:37PM +0000, Trond Myklebust wrote:
> Serialisation is not mandatory in POSIX:
> 
> http://pubs.opengroup.org/onlinepubs/9699919799/functions/write.html
> 
> ???Writes can be serialized with respect to other reads and writes. If a read() of file data can be proven (by any means) to occur after a write() of the data, it must reflect that write(), even if the calls are made by different processes. A similar requirement applies to multiple write operations to the same file position. This is needed to guarantee the propagation of data from write() calls to subsequent read() calls. This requirement is particularly significant for networked file systems, where some caching schemes violate these semantics.???

That is the basic defintion, but once O_DSYNC and friends come into
play it gets more complicated:

^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 03/12] NFS: Cache aggressively when file is open for writing
  2016-06-14 19:05   ` [PATCH 03/12] NFS: Cache aggressively when file is open for writing Trond Myklebust
  2016-06-14 19:05     ` [PATCH 04/12] NFS: Kill NFS_INO_NFS_INO_FLUSHING: it is a performance killer Trond Myklebust
@ 2016-06-17  1:11     ` Oleg Drokin
  2016-06-17 14:01       ` Trond Myklebust
  1 sibling, 1 reply; 39+ messages in thread
From: Oleg Drokin @ 2016-06-17  1:11 UTC (permalink / raw)
  To: Trond Myklebust; +Cc: linux-nfs

I get almost-insta-crash with this patch (really, just testing
the testing branch, but in the code this patch introduced).
-rc2 did not have this sort of crash, so it must be a new one.

On Jun 14, 2016, at 3:05 PM, Trond Myklebust wrote:

> Unless the user is using file locking, we must assume close-to-open
> cache consistency when the file is open for writing. Adjust the
> caching algorithm so that it does not clear the cache on out-of-order
> writes and/or attribute revalidations.
> 
> Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
> ---
> fs/nfs/file.c  | 13 ++-----------
> fs/nfs/inode.c | 56 +++++++++++++++++++++++++++++++++++++++-----------------
> 2 files changed, 41 insertions(+), 28 deletions(-)
> 
> diff --git a/fs/nfs/file.c b/fs/nfs/file.c
> index 717a8d6af52d..2d39d9f9da7d 100644
> --- a/fs/nfs/file.c
> +++ b/fs/nfs/file.c
> @@ -780,11 +780,6 @@ do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
> }
> 
> static int
> -is_time_granular(struct timespec *ts) {
> -	return ((ts->tv_sec == 0) && (ts->tv_nsec <= 1000));
> -}
> -
> -static int
> do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
> {
> 	struct inode *inode = filp->f_mapping->host;
> @@ -817,12 +812,8 @@ do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
> 	 * This makes locking act as a cache coherency point.
> 	 */
> 	nfs_sync_mapping(filp->f_mapping);
> -	if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) {
> -		if (is_time_granular(&NFS_SERVER(inode)->time_delta))
> -			__nfs_revalidate_inode(NFS_SERVER(inode), inode);
> -		else
> -			nfs_zap_caches(inode);
> -	}
> +	if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
> +		nfs_zap_mapping(inode, filp->f_mapping);
> out:
> 	return status;
> }
> diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
> index 60051e62d3f1..8a808d25dbc8 100644
> --- a/fs/nfs/inode.c
> +++ b/fs/nfs/inode.c
> @@ -878,7 +878,10 @@ void nfs_inode_attach_open_context(struct nfs_open_context *ctx)
> 	struct nfs_inode *nfsi = NFS_I(inode);
> 
> 	spin_lock(&inode->i_lock);
> -	list_add(&ctx->list, &nfsi->open_files);
> +	if (ctx->mode & FMODE_WRITE)
> +		list_add(&ctx->list, &nfsi->open_files);
> +	else
> +		list_add_tail(&ctx->list, &nfsi->open_files);
> 	spin_unlock(&inode->i_lock);
> }
> EXPORT_SYMBOL_GPL(nfs_inode_attach_open_context);
> @@ -1215,6 +1218,21 @@ int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *
> 	return __nfs_revalidate_mapping(inode, mapping, true);
> }
> 
> +static bool nfs_file_has_writers(struct nfs_inode *nfsi)
> +{
> +	assert_spin_locked(&nfsi->vfs_inode.i_lock);
> +
> +	if (list_empty(&nfsi->open_files))
> +		return false;
> +	/* Note: This relies on nfsi->open_files being ordered with writers
> +	 *       being placed at the head of the list.
> +	 *       See nfs_inode_attach_open_context()
> +	 */
> +	return (list_first_entry(&nfsi->open_files,
> +			struct nfs_open_context,
> +			list)->mode & FMODE_WRITE) == FMODE_WRITE;
> +}
> +
> static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
> {
> 	struct nfs_inode *nfsi = NFS_I(inode);
> @@ -1279,22 +1297,24 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
> 	if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
> 		return -EIO;
> 
> -	if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 &&
> -			inode->i_version != fattr->change_attr)
> -		invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
> +	if (!nfs_file_has_writers(nfsi)) {

The crash address points here ^^:
gdb> l *(nfs_refresh_inode_locked+0x249)
0xffffffff81382c39 is in nfs_refresh_inode_locked (/home/green/bk/linux-test/fs/nfs/inode.c:1301).
1301		if (!nfs_file_has_writers(nfsi)) {

Did some racing thread just kill the inode under this thread I wonder?

[  264.739757] BUG: unable to handle kernel paging request at ffff88009875ffe8
[  264.740468] IP: [<ffffffff81382c39>] nfs_refresh_inode_locked+0x249/0x4e0
[  264.741080] PGD 3580067 PUD 3583067 PMD bce56067 PTE 800000009875f060
[  264.741836] Oops: 0000 [#1] SMP DEBUG_PAGEALLOC
[  264.742357] Modules linked in: loop rpcsec_gss_krb5 acpi_cpufreq tpm_tis joydev tpm virtio_console pcspkr i2c_piix4 nfsd ttm drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops drm floppy serio_raw
[  264.785009] CPU: 7 PID: 34494 Comm: ls Not tainted 4.7.0-rc3-vm-nfs+ #2
[  264.785559] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.8.2-20150714_191134- 04/01/2014
[  264.786545] task: ffff88009f78cac0 ti: ffff88009f780000 task.ti: ffff88009f780000
[  264.787596] RIP: 0010:[<ffffffff81382c39>]  [<ffffffff81382c39>] nfs_refresh_inode_locked+0x249/0x4e0
[  264.788628] RSP: 0018:ffff88009f783b58  EFLAGS: 00010246
[  264.789151] RAX: 0000000000000000 RBX: ffff8800aa48b348 RCX: ffff880098760000
[  264.789708] RDX: 0000000000027e7f RSI: 0000000000000001 RDI: ffff8800aa48b348
[  264.792533] RBP: ffff88009f783b70 R08: 0000000000000000 R09: 0000000000000001
[  264.793090] R10: ffff8800aa48b3e8 R11: 0000000000000000 R12: ffff8800aa48b348
[  264.793647] R13: ffff880093f8cf00 R14: ffff880089eb10c0 R15: ffff88009f783de0
[  264.794204] FS:  00007fefee488800(0000) GS:ffff8800b9000000(0000) knlGS:0000000000000000
[  264.795154] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[  264.795686] CR2: ffff88009875ffe8 CR3: 000000009f3ab000 CR4: 00000000000006e0
[  264.796243] Stack:
[  264.796708]  ffff8800aa48b3d0 ffff8800aa48b348 ffff880093f8cf00 ffff88009f783b98
[  264.797853]  ffffffff81382efe ffff8800aa48b348 ffff88009f783c38 0000000000000000
[  264.798996]  ffff88009f783ba8 ffffffff81382f39 ffff88009f783c18 ffffffff81396fe3
[  264.824881] Call Trace:
[  264.825366]  [<ffffffff81382efe>] nfs_refresh_inode.part.24+0x2e/0x50
[  264.825908]  [<ffffffff81382f39>] nfs_refresh_inode+0x19/0x20
[  264.826441]  [<ffffffff81396fe3>] nfs3_proc_access+0xd3/0x190
[  264.826971]  [<ffffffff8137c66c>] nfs_do_access+0x3ec/0x620
[  264.837497]  [<ffffffff8137c2d1>] ? nfs_do_access+0x51/0x620
[  264.838029]  [<ffffffff818507ea>] ? generic_lookup_cred+0x1a/0x20
[  264.838585]  [<ffffffff8184ef7e>] ? rpcauth_lookupcred+0x8e/0xd0
[  264.839120]  [<ffffffff8137cb69>] nfs_permission+0x289/0x2e0
[  264.839667]  [<ffffffff8137c943>] ? nfs_permission+0x63/0x2e0
[  264.840201]  [<ffffffff812768da>] __inode_permission+0x6a/0xb0
[  264.850580]  [<ffffffff81276934>] inode_permission+0x14/0x50
[  264.851113]  [<ffffffff81276a8c>] may_open+0x8c/0x100
[  264.851635]  [<ffffffff8127bdc6>] path_openat+0x596/0xc20
[  264.852160]  [<ffffffff8127d9b1>] do_filp_open+0x91/0x100
[  264.852689]  [<ffffffff8188a557>] ? _raw_spin_unlock+0x27/0x40
[  264.853223]  [<ffffffff8128f010>] ? __alloc_fd+0x100/0x200
[  264.861053]  [<ffffffff81269f90>] do_sys_open+0x130/0x220
[  264.861582]  [<ffffffff8126a09e>] SyS_open+0x1e/0x20
[  264.862101]  [<ffffffff8188aebc>] entry_SYSCALL_64_fastpath+0x1f/0xbd

> +		/* Verify a few of the more important attributes */
> +		if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && inode->i_version != fattr->change_attr)
> +			invalid |= NFS_INO_INVALID_ATTR | NFS_INO_REVAL_PAGECACHE;
> 
> -	/* Verify a few of the more important attributes */
> -	if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec_equal(&inode->i_mtime, &fattr->mtime))
> -		invalid |= NFS_INO_INVALID_ATTR;
> +		if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec_equal(&inode->i_mtime, &fattr->mtime))
> +			invalid |= NFS_INO_INVALID_ATTR;
> 
> -	if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
> -		cur_size = i_size_read(inode);
> -		new_isize = nfs_size_to_loff_t(fattr->size);
> -		if (cur_size != new_isize)
> -			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
> +		if ((fattr->valid & NFS_ATTR_FATTR_CTIME) && !timespec_equal(&inode->i_ctime, &fattr->ctime))
> +			invalid |= NFS_INO_INVALID_ATTR;
> +
> +		if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
> +			cur_size = i_size_read(inode);
> +			new_isize = nfs_size_to_loff_t(fattr->size);
> +			if (cur_size != new_isize)
> +				invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
> +		}
> 	}
> -	if (nfsi->nrequests != 0)
> -		invalid &= ~NFS_INO_REVAL_PAGECACHE;
> 
> 	/* Have any file permissions changed? */
> 	if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
> @@ -1675,6 +1695,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
> 	unsigned long invalid = 0;
> 	unsigned long now = jiffies;
> 	unsigned long save_cache_validity;
> +	bool have_writers = nfs_file_has_writers(nfsi);
> 	bool cache_revalidated = true;
> 
> 	dfprintk(VFS, "NFS: %s(%s/%lu fh_crc=0x%08x ct=%d info=0x%x)\n",
> @@ -1730,7 +1751,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
> 			dprintk("NFS: change_attr change on server for file %s/%ld\n",
> 					inode->i_sb->s_id, inode->i_ino);
> 			/* Could it be a race with writeback? */
> -			if (nfsi->nrequests == 0) {
> +			if (!have_writers) {
> 				invalid |= NFS_INO_INVALID_ATTR
> 					| NFS_INO_INVALID_DATA
> 					| NFS_INO_INVALID_ACCESS
> @@ -1770,9 +1791,10 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
> 		if (new_isize != cur_isize) {
> 			/* Do we perhaps have any outstanding writes, or has
> 			 * the file grown beyond our last write? */
> -			if ((nfsi->nrequests == 0) || new_isize > cur_isize) {
> +			if (nfsi->nrequests == 0 || new_isize > cur_isize) {
> 				i_size_write(inode, new_isize);
> -				invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
> +				if (!have_writers)
> +					invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
> 			}
> 			dprintk("NFS: isize change on server for file %s/%ld "
> 					"(%Ld to %Ld)\n",
> -- 
> 2.5.5
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html


^ permalink raw reply	[flat|nested] 39+ messages in thread

* Re: [PATCH 03/12] NFS: Cache aggressively when file is open for writing
  2016-06-17  1:11     ` [PATCH 03/12] NFS: Cache aggressively when file is open for writing Oleg Drokin
@ 2016-06-17 14:01       ` Trond Myklebust
  0 siblings, 0 replies; 39+ messages in thread
From: Trond Myklebust @ 2016-06-17 14:01 UTC (permalink / raw)
  To: Oleg Drokin; +Cc: linux-nfs

DQoNCk9uIDYvMTYvMTYsIDIxOjExLCAiT2xlZyBEcm9raW4iIDxncmVlbkBsaW51eGhhY2tlci5y
dT4gd3JvdGU6DQoNCkkgZ2V0IGFsbW9zdC1pbnN0YS1jcmFzaCB3aXRoIHRoaXMgcGF0Y2ggKHJl
YWxseSwganVzdCB0ZXN0aW5nDQp0aGUgdGVzdGluZyBicmFuY2gsIGJ1dCBpbiB0aGUgY29kZSB0
aGlzIHBhdGNoIGludHJvZHVjZWQpLg0KLXJjMiBkaWQgbm90IGhhdmUgdGhpcyBzb3J0IG9mIGNy
YXNoLCBzbyBpdCBtdXN0IGJlIGEgbmV3IG9uZS4NCg0KT24gSnVuIDE0LCAyMDE2LCBhdCAzOjA1
IFBNLCBUcm9uZCBNeWtsZWJ1c3Qgd3JvdGU6DQoNCj4gVW5sZXNzIHRoZSB1c2VyIGlzIHVzaW5n
IGZpbGUgbG9ja2luZywgd2UgbXVzdCBhc3N1bWUgY2xvc2UtdG8tb3Blbg0KPiBjYWNoZSBjb25z
aXN0ZW5jeSB3aGVuIHRoZSBmaWxlIGlzIG9wZW4gZm9yIHdyaXRpbmcuIEFkanVzdCB0aGUNCj4g
Y2FjaGluZyBhbGdvcml0aG0gc28gdGhhdCBpdCBkb2VzIG5vdCBjbGVhciB0aGUgY2FjaGUgb24g
b3V0LW9mLW9yZGVyDQo+IHdyaXRlcyBhbmQvb3IgYXR0cmlidXRlIHJldmFsaWRhdGlvbnMuDQo+
IA0KPiBTaWduZWQtb2ZmLWJ5OiBUcm9uZCBNeWtsZWJ1c3QgPHRyb25kLm15a2xlYnVzdEBwcmlt
YXJ5ZGF0YS5jb20+DQo+IC0tLQ0KPiBmcy9uZnMvZmlsZS5jICB8IDEzICsrLS0tLS0tLS0tLS0N
Cj4gZnMvbmZzL2lub2RlLmMgfCA1NiArKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysr
KysrKystLS0tLS0tLS0tLS0tLS0tLQ0KPiAyIGZpbGVzIGNoYW5nZWQsIDQxIGluc2VydGlvbnMo
KyksIDI4IGRlbGV0aW9ucygtKQ0KPiANCj4gZGlmZiAtLWdpdCBhL2ZzL25mcy9maWxlLmMgYi9m
cy9uZnMvZmlsZS5jDQo+IGluZGV4IDcxN2E4ZDZhZjUyZC4uMmQzOWQ5ZjlkYTdkIDEwMDY0NA0K
PiAtLS0gYS9mcy9uZnMvZmlsZS5jDQo+ICsrKyBiL2ZzL25mcy9maWxlLmMNCj4gQEAgLTc4MCwx
MSArNzgwLDYgQEAgZG9fdW5sayhzdHJ1Y3QgZmlsZSAqZmlscCwgaW50IGNtZCwgc3RydWN0IGZp
bGVfbG9jayAqZmwsIGludCBpc19sb2NhbCkNCj4gfQ0KPiANCj4gc3RhdGljIGludA0KPiAtaXNf
dGltZV9ncmFudWxhcihzdHJ1Y3QgdGltZXNwZWMgKnRzKSB7DQo+IC0JcmV0dXJuICgodHMtPnR2
X3NlYyA9PSAwKSAmJiAodHMtPnR2X25zZWMgPD0gMTAwMCkpOw0KPiAtfQ0KPiAtDQo+IC1zdGF0
aWMgaW50DQo+IGRvX3NldGxrKHN0cnVjdCBmaWxlICpmaWxwLCBpbnQgY21kLCBzdHJ1Y3QgZmls
ZV9sb2NrICpmbCwgaW50IGlzX2xvY2FsKQ0KPiB7DQo+IAlzdHJ1Y3QgaW5vZGUgKmlub2RlID0g
ZmlscC0+Zl9tYXBwaW5nLT5ob3N0Ow0KPiBAQCAtODE3LDEyICs4MTIsOCBAQCBkb19zZXRsayhz
dHJ1Y3QgZmlsZSAqZmlscCwgaW50IGNtZCwgc3RydWN0IGZpbGVfbG9jayAqZmwsIGludCBpc19s
b2NhbCkNCj4gCSAqIFRoaXMgbWFrZXMgbG9ja2luZyBhY3QgYXMgYSBjYWNoZSBjb2hlcmVuY3kg
cG9pbnQuDQo+IAkgKi8NCj4gCW5mc19zeW5jX21hcHBpbmcoZmlscC0+Zl9tYXBwaW5nKTsNCj4g
LQlpZiAoIU5GU19QUk9UTyhpbm9kZSktPmhhdmVfZGVsZWdhdGlvbihpbm9kZSwgRk1PREVfUkVB
RCkpIHsNCj4gLQkJaWYgKGlzX3RpbWVfZ3JhbnVsYXIoJk5GU19TRVJWRVIoaW5vZGUpLT50aW1l
X2RlbHRhKSkNCj4gLQkJCV9fbmZzX3JldmFsaWRhdGVfaW5vZGUoTkZTX1NFUlZFUihpbm9kZSks
IGlub2RlKTsNCj4gLQkJZWxzZQ0KPiAtCQkJbmZzX3phcF9jYWNoZXMoaW5vZGUpOw0KPiAtCX0N
Cj4gKwlpZiAoIU5GU19QUk9UTyhpbm9kZSktPmhhdmVfZGVsZWdhdGlvbihpbm9kZSwgRk1PREVf
UkVBRCkpDQo+ICsJCW5mc196YXBfbWFwcGluZyhpbm9kZSwgZmlscC0+Zl9tYXBwaW5nKTsNCj4g
b3V0Og0KPiAJcmV0dXJuIHN0YXR1czsNCj4gfQ0KPiBkaWZmIC0tZ2l0IGEvZnMvbmZzL2lub2Rl
LmMgYi9mcy9uZnMvaW5vZGUuYw0KPiBpbmRleCA2MDA1MWU2MmQzZjEuLjhhODA4ZDI1ZGJjOCAx
MDA2NDQNCj4gLS0tIGEvZnMvbmZzL2lub2RlLmMNCj4gKysrIGIvZnMvbmZzL2lub2RlLmMNCj4g
QEAgLTg3OCw3ICs4NzgsMTAgQEAgdm9pZCBuZnNfaW5vZGVfYXR0YWNoX29wZW5fY29udGV4dChz
dHJ1Y3QgbmZzX29wZW5fY29udGV4dCAqY3R4KQ0KPiAJc3RydWN0IG5mc19pbm9kZSAqbmZzaSA9
IE5GU19JKGlub2RlKTsNCj4gDQo+IAlzcGluX2xvY2soJmlub2RlLT5pX2xvY2spOw0KPiAtCWxp
c3RfYWRkKCZjdHgtPmxpc3QsICZuZnNpLT5vcGVuX2ZpbGVzKTsNCj4gKwlpZiAoY3R4LT5tb2Rl
ICYgRk1PREVfV1JJVEUpDQo+ICsJCWxpc3RfYWRkKCZjdHgtPmxpc3QsICZuZnNpLT5vcGVuX2Zp
bGVzKTsNCj4gKwllbHNlDQo+ICsJCWxpc3RfYWRkX3RhaWwoJmN0eC0+bGlzdCwgJm5mc2ktPm9w
ZW5fZmlsZXMpOw0KPiAJc3Bpbl91bmxvY2soJmlub2RlLT5pX2xvY2spOw0KPiB9DQo+IEVYUE9S
VF9TWU1CT0xfR1BMKG5mc19pbm9kZV9hdHRhY2hfb3Blbl9jb250ZXh0KTsNCj4gQEAgLTEyMTUs
NiArMTIxOCwyMSBAQCBpbnQgbmZzX3JldmFsaWRhdGVfbWFwcGluZ19wcm90ZWN0ZWQoc3RydWN0
IGlub2RlICppbm9kZSwgc3RydWN0IGFkZHJlc3Nfc3BhY2UgKg0KPiAJcmV0dXJuIF9fbmZzX3Jl
dmFsaWRhdGVfbWFwcGluZyhpbm9kZSwgbWFwcGluZywgdHJ1ZSk7DQo+IH0NCj4gDQo+ICtzdGF0
aWMgYm9vbCBuZnNfZmlsZV9oYXNfd3JpdGVycyhzdHJ1Y3QgbmZzX2lub2RlICpuZnNpKQ0KPiAr
ew0KPiArCWFzc2VydF9zcGluX2xvY2tlZCgmbmZzaS0+dmZzX2lub2RlLmlfbG9jayk7DQo+ICsN
Cj4gKwlpZiAobGlzdF9lbXB0eSgmbmZzaS0+b3Blbl9maWxlcykpDQo+ICsJCXJldHVybiBmYWxz
ZTsNCj4gKwkvKiBOb3RlOiBUaGlzIHJlbGllcyBvbiBuZnNpLT5vcGVuX2ZpbGVzIGJlaW5nIG9y
ZGVyZWQgd2l0aCB3cml0ZXJzDQo+ICsJICogICAgICAgYmVpbmcgcGxhY2VkIGF0IHRoZSBoZWFk
IG9mIHRoZSBsaXN0Lg0KPiArCSAqICAgICAgIFNlZSBuZnNfaW5vZGVfYXR0YWNoX29wZW5fY29u
dGV4dCgpDQo+ICsJICovDQo+ICsJcmV0dXJuIChsaXN0X2ZpcnN0X2VudHJ5KCZuZnNpLT5vcGVu
X2ZpbGVzLA0KPiArCQkJc3RydWN0IG5mc19vcGVuX2NvbnRleHQsDQo+ICsJCQlsaXN0KS0+bW9k
ZSAmIEZNT0RFX1dSSVRFKSA9PSBGTU9ERV9XUklURTsNCj4gK30NCj4gKw0KPiBzdGF0aWMgdW5z
aWduZWQgbG9uZyBuZnNfd2NjX3VwZGF0ZV9pbm9kZShzdHJ1Y3QgaW5vZGUgKmlub2RlLCBzdHJ1
Y3QgbmZzX2ZhdHRyICpmYXR0cikNCj4gew0KPiAJc3RydWN0IG5mc19pbm9kZSAqbmZzaSA9IE5G
U19JKGlub2RlKTsNCj4gQEAgLTEyNzksMjIgKzEyOTcsMjQgQEAgc3RhdGljIGludCBuZnNfY2hl
Y2tfaW5vZGVfYXR0cmlidXRlcyhzdHJ1Y3QgaW5vZGUgKmlub2RlLCBzdHJ1Y3QgbmZzX2ZhdHRy
ICpmYXQNCj4gCWlmICgoZmF0dHItPnZhbGlkICYgTkZTX0FUVFJfRkFUVFJfVFlQRSkgJiYgKGlu
b2RlLT5pX21vZGUgJiBTX0lGTVQpICE9IChmYXR0ci0+bW9kZSAmIFNfSUZNVCkpDQo+IAkJcmV0
dXJuIC1FSU87DQo+IA0KPiAtCWlmICgoZmF0dHItPnZhbGlkICYgTkZTX0FUVFJfRkFUVFJfQ0hB
TkdFKSAhPSAwICYmDQo+IC0JCQlpbm9kZS0+aV92ZXJzaW9uICE9IGZhdHRyLT5jaGFuZ2VfYXR0
cikNCj4gLQkJaW52YWxpZCB8PSBORlNfSU5PX0lOVkFMSURfQVRUUnxORlNfSU5PX1JFVkFMX1BB
R0VDQUNIRTsNCj4gKwlpZiAoIW5mc19maWxlX2hhc193cml0ZXJzKG5mc2kpKSB7DQoNClRoZSBj
cmFzaCBhZGRyZXNzIHBvaW50cyBoZXJlIF5eOg0KZ2RiPiBsICoobmZzX3JlZnJlc2hfaW5vZGVf
bG9ja2VkKzB4MjQ5KQ0KMHhmZmZmZmZmZjgxMzgyYzM5IGlzIGluIG5mc19yZWZyZXNoX2lub2Rl
X2xvY2tlZCAoL2hvbWUvZ3JlZW4vYmsvbGludXgtdGVzdC9mcy9uZnMvaW5vZGUuYzoxMzAxKS4N
CjEzMDEJCWlmICghbmZzX2ZpbGVfaGFzX3dyaXRlcnMobmZzaSkpIHsNCg0KRGlkIHNvbWUgcmFj
aW5nIHRocmVhZCBqdXN0IGtpbGwgdGhlIGlub2RlIHVuZGVyIHRoaXMgdGhyZWFkIEkgd29uZGVy
Pw0KDQpbICAyNjQuNzM5NzU3XSBCVUc6IHVuYWJsZSB0byBoYW5kbGUga2VybmVsIHBhZ2luZyBy
ZXF1ZXN0IGF0IGZmZmY4ODAwOTg3NWZmZTgNClsgIDI2NC43NDA0NjhdIElQOiBbPGZmZmZmZmZm
ODEzODJjMzk+XSBuZnNfcmVmcmVzaF9pbm9kZV9sb2NrZWQrMHgyNDkvMHg0ZTANClsgIDI2NC43
NDEwODBdIFBHRCAzNTgwMDY3IFBVRCAzNTgzMDY3IFBNRCBiY2U1NjA2NyBQVEUgODAwMDAwMDA5
ODc1ZjA2MA0KWyAgMjY0Ljc0MTgzNl0gT29wczogMDAwMCBbIzFdIFNNUCBERUJVR19QQUdFQUxM
T0MNClsgIDI2NC43NDIzNTddIE1vZHVsZXMgbGlua2VkIGluOiBsb29wIHJwY3NlY19nc3Nfa3Ji
NSBhY3BpX2NwdWZyZXEgdHBtX3RpcyBqb3lkZXYgdHBtIHZpcnRpb19jb25zb2xlIHBjc3BrciBp
MmNfcGlpeDQgbmZzZCB0dG0gZHJtX2ttc19oZWxwZXIgc3lzY29weWFyZWEgc3lzZmlsbHJlY3Qg
c3lzaW1nYmx0IGZiX3N5c19mb3BzIGRybSBmbG9wcHkgc2VyaW9fcmF3DQpbICAyNjQuNzg1MDA5
XSBDUFU6IDcgUElEOiAzNDQ5NCBDb21tOiBscyBOb3QgdGFpbnRlZCA0LjcuMC1yYzMtdm0tbmZz
KyAjMg0KWyAgMjY0Ljc4NTU1OV0gSGFyZHdhcmUgbmFtZTogUUVNVSBTdGFuZGFyZCBQQyAoaTQ0
MEZYICsgUElJWCwgMTk5NiksIEJJT1MgMS44LjItMjAxNTA3MTRfMTkxMTM0LSAwNC8wMS8yMDE0
DQpbICAyNjQuNzg2NTQ1XSB0YXNrOiBmZmZmODgwMDlmNzhjYWMwIHRpOiBmZmZmODgwMDlmNzgw
MDAwIHRhc2sudGk6IGZmZmY4ODAwOWY3ODAwMDANClsgIDI2NC43ODc1OTZdIFJJUDogMDAxMDpb
PGZmZmZmZmZmODEzODJjMzk+XSAgWzxmZmZmZmZmZjgxMzgyYzM5Pl0gbmZzX3JlZnJlc2hfaW5v
ZGVfbG9ja2VkKzB4MjQ5LzB4NGUwDQpbICAyNjQuNzg4NjI4XSBSU1A6IDAwMTg6ZmZmZjg4MDA5
Zjc4M2I1OCAgRUZMQUdTOiAwMDAxMDI0Ng0KWyAgMjY0Ljc4OTE1MV0gUkFYOiAwMDAwMDAwMDAw
MDAwMDAwIFJCWDogZmZmZjg4MDBhYTQ4YjM0OCBSQ1g6IGZmZmY4ODAwOTg3NjAwMDANClsgIDI2
NC43ODk3MDhdIFJEWDogMDAwMDAwMDAwMDAyN2U3ZiBSU0k6IDAwMDAwMDAwMDAwMDAwMDEgUkRJ
OiBmZmZmODgwMGFhNDhiMzQ4DQpbICAyNjQuNzkyNTMzXSBSQlA6IGZmZmY4ODAwOWY3ODNiNzAg
UjA4OiAwMDAwMDAwMDAwMDAwMDAwIFIwOTogMDAwMDAwMDAwMDAwMDAwMQ0KWyAgMjY0Ljc5MzA5
MF0gUjEwOiBmZmZmODgwMGFhNDhiM2U4IFIxMTogMDAwMDAwMDAwMDAwMDAwMCBSMTI6IGZmZmY4
ODAwYWE0OGIzNDgNClsgIDI2NC43OTM2NDddIFIxMzogZmZmZjg4MDA5M2Y4Y2YwMCBSMTQ6IGZm
ZmY4ODAwODllYjEwYzAgUjE1OiBmZmZmODgwMDlmNzgzZGUwDQpbICAyNjQuNzk0MjA0XSBGUzog
IDAwMDA3ZmVmZWU0ODg4MDAoMDAwMCkgR1M6ZmZmZjg4MDBiOTAwMDAwMCgwMDAwKSBrbmxHUzow
MDAwMDAwMDAwMDAwMDAwDQpbICAyNjQuNzk1MTU0XSBDUzogIDAwMTAgRFM6IDAwMDAgRVM6IDAw
MDAgQ1IwOiAwMDAwMDAwMDgwMDUwMDMzDQpbICAyNjQuNzk1Njg2XSBDUjI6IGZmZmY4ODAwOTg3
NWZmZTggQ1IzOiAwMDAwMDAwMDlmM2FiMDAwIENSNDogMDAwMDAwMDAwMDAwMDZlMA0KWyAgMjY0
Ljc5NjI0M10gU3RhY2s6DQpbICAyNjQuNzk2NzA4XSAgZmZmZjg4MDBhYTQ4YjNkMCBmZmZmODgw
MGFhNDhiMzQ4IGZmZmY4ODAwOTNmOGNmMDAgZmZmZjg4MDA5Zjc4M2I5OA0KWyAgMjY0Ljc5Nzg1
M10gIGZmZmZmZmZmODEzODJlZmUgZmZmZjg4MDBhYTQ4YjM0OCBmZmZmODgwMDlmNzgzYzM4IDAw
MDAwMDAwMDAwMDAwMDANClsgIDI2NC43OTg5OTZdICBmZmZmODgwMDlmNzgzYmE4IGZmZmZmZmZm
ODEzODJmMzkgZmZmZjg4MDA5Zjc4M2MxOCBmZmZmZmZmZjgxMzk2ZmUzDQpbICAyNjQuODI0ODgx
XSBDYWxsIFRyYWNlOg0KWyAgMjY0LjgyNTM2Nl0gIFs8ZmZmZmZmZmY4MTM4MmVmZT5dIG5mc19y
ZWZyZXNoX2lub2RlLnBhcnQuMjQrMHgyZS8weDUwDQpbICAyNjQuODI1OTA4XSAgWzxmZmZmZmZm
ZjgxMzgyZjM5Pl0gbmZzX3JlZnJlc2hfaW5vZGUrMHgxOS8weDIwDQpbICAyNjQuODI2NDQxXSAg
WzxmZmZmZmZmZjgxMzk2ZmUzPl0gbmZzM19wcm9jX2FjY2VzcysweGQzLzB4MTkwDQpbICAyNjQu
ODI2OTcxXSAgWzxmZmZmZmZmZjgxMzdjNjZjPl0gbmZzX2RvX2FjY2VzcysweDNlYy8weDYyMA0K
WyAgMjY0LjgzNzQ5N10gIFs8ZmZmZmZmZmY4MTM3YzJkMT5dID8gbmZzX2RvX2FjY2VzcysweDUx
LzB4NjIwDQoNCkkgc3VzcGVjdCB0aGlzIGlzIHRoZSBidWcgdGhhdCB0aGUgYXV0b21hdGVkIHRl
c3RpbmcgYWxzbyBmb3VuZCB3aXRoIHRoZSBuZnNfcmV2YWxpZGF0ZV9pbm9kZSgpIGdldHRpbmcg
Y2FsbGVkIGFzIHBhcnQgb2YgdGhlIFJDVSBwYXRoLiBJIGhhdmUgYSBuZXcgcGF0Y2ggc2VyaWVz
IHRoYXQgZml4ZXMgdGhhdCBpc3N1ZSBhbmQgdGhhdCBJ4oCZbGwgcHVibGlzaCBzb29uIChqdXN0
IG5lZWQgdG8gZmlndXJlIG91dCB3aGF0IHRvIGRvIGFib3V0IHRoZSBsYXN0IGZldyBjb21tZW50
cyBmcm9tIENocmlzdG9waOKAmXMgcmV2aWV3KS4NCg0KQ2hlZXJzDQogIFRyb25kDQoNCg==


^ permalink raw reply	[flat|nested] 39+ messages in thread

end of thread, other threads:[~2016-06-17 14:01 UTC | newest]

Thread overview: 39+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-06-14 19:05 [PATCH 01/12] NFS: Don't flush caches for a getattr that races with writeback Trond Myklebust
2016-06-14 19:05 ` [PATCH 02/12] NFS: Cache access checks more aggressively Trond Myklebust
2016-06-14 19:05   ` [PATCH 03/12] NFS: Cache aggressively when file is open for writing Trond Myklebust
2016-06-14 19:05     ` [PATCH 04/12] NFS: Kill NFS_INO_NFS_INO_FLUSHING: it is a performance killer Trond Myklebust
2016-06-14 19:05       ` [PATCH 05/12] NFS: writepage of a single page should not be synchronous Trond Myklebust
2016-06-14 19:05         ` [PATCH 06/12] NFS: Don't hold the inode lock across fsync() Trond Myklebust
2016-06-14 19:05           ` [PATCH 07/12] NFS: Don't enable deep stack recursion when doing memory reclaim Trond Myklebust
2016-06-14 19:05             ` [PATCH 08/12] NFS: Fix O_DIRECT verifier problems Trond Myklebust
2016-06-14 19:05               ` [PATCH 09/12] NFS: Ensure we reset the write verifier 'committed' value on resend Trond Myklebust
2016-06-14 19:05                 ` [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes Trond Myklebust
2016-06-14 19:05                   ` [PATCH 11/12] NFS: Don't count O_DIRECT reads in the inode->i_dio_count Trond Myklebust
2016-06-14 19:05                     ` [PATCH 12/12] NFS: Clean up nfs_direct_complete() Trond Myklebust
2016-06-15  7:16                     ` [PATCH 11/12] NFS: Don't count O_DIRECT reads in the inode->i_dio_count Christoph Hellwig
2016-06-15 14:36                       ` Trond Myklebust
2016-06-15 14:41                         ` Christoph Hellwig
2016-06-15 14:50                           ` Trond Myklebust
2016-06-15 14:53                             ` Christoph Hellwig
2016-06-15  7:13                   ` [PATCH 10/12] NFS: Do not serialise O_DIRECT reads and writes Christoph Hellwig
2016-06-15 14:29                     ` Trond Myklebust
2016-06-15 14:48                       ` Christoph Hellwig
2016-06-15 14:48                         ` Christoph Hellwig
2016-06-15 14:52                         ` Trond Myklebust
2016-06-15 14:52                           ` Trond Myklebust
2016-06-15 14:56                           ` Christoph Hellwig
2016-06-15 14:56                             ` Christoph Hellwig
2016-06-15 15:09                             ` Trond Myklebust
2016-06-15 15:09                               ` Trond Myklebust
2016-06-15 15:14                               ` Christoph Hellwig
2016-06-15 15:14                                 ` Christoph Hellwig
2016-06-15 15:45                                 ` Trond Myklebust
2016-06-15 15:45                                   ` Trond Myklebust
2016-06-16  9:12                                   ` Christoph Hellwig
2016-06-16  9:12                                     ` Christoph Hellwig
2016-06-15  7:09             ` [PATCH 07/12] NFS: Don't enable deep stack recursion when doing memory reclaim Christoph Hellwig
2016-06-15  7:08           ` [PATCH 06/12] NFS: Don't hold the inode lock across fsync() Christoph Hellwig
2016-06-15 14:47             ` Trond Myklebust
2016-06-15 14:54               ` Christoph Hellwig
2016-06-17  1:11     ` [PATCH 03/12] NFS: Cache aggressively when file is open for writing Oleg Drokin
2016-06-17 14:01       ` Trond Myklebust

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.