All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/6] Cache deferral improvements - try N+2
@ 2010-08-12  7:04 NeilBrown
  2010-08-12  7:04 ` [PATCH 1/6] sunrpc/cache: allow threads to block while waiting for cache update NeilBrown
                   ` (7 more replies)
  0 siblings, 8 replies; 19+ messages in thread
From: NeilBrown @ 2010-08-12  7:04 UTC (permalink / raw)
  To: J. Bruce Fields; +Cc: linux-nfs

I don't think it is quite 6 months since I last posted these, but I've
definitely dropped the ball... so here they are again.

This set allows kernel threads to wait a short while for an upcall (to
get auth info) to complete rather than having to queue and retry the
whole request.
It also changes NFSv4 to never use the old deferral scheme at all.

The last two patches are just little clean-ups that I found while
wandering around the code.

I think I have address all the concern raised previously, e.g.
  http://www.spinics.net/lists/linux-nfs/msg12253.html

Note that there is a conflict between this patch set and the other
one.  The other changes someing in nfs4idmap.c, and this one removes
all the code that was changed.  Very easy to resolve.

Thanks,
NeilBrown

---

NeilBrown (6):
      sunrpc/cache: allow threads to block while waiting for cache update.
      nfsd/idmap: drop special request deferal in favour of improved default.
      sunrpc: close connection when a request is irretrievably lost.
      nfsd: disable deferral for NFSv4
      svcauth_gss: replace a trivial 'switch' with an 'if'
      sunrpc/cache: change deferred-request hash table to use hlist.


 fs/nfsd/nfs4idmap.c               |  105 ++++---------------------------------
 fs/nfsd/nfs4proc.c                |    4 +
 include/linux/sunrpc/cache.h      |    5 +-
 include/linux/sunrpc/svcauth.h    |   10 ++--
 net/sunrpc/auth_gss/svcauth_gss.c |   51 ++++++++----------
 net/sunrpc/cache.c                |   71 +++++++++++++++++++------
 net/sunrpc/svc.c                  |    3 +
 net/sunrpc/svc_xprt.c             |   11 ++++
 net/sunrpc/svcauth_unix.c         |   11 +++-
 9 files changed, 125 insertions(+), 146 deletions(-)

-- 
Signature


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH 2/6] nfsd/idmap: drop special request deferal in favour of improved default.
  2010-08-12  7:04 [PATCH 0/6] Cache deferral improvements - try N+2 NeilBrown
  2010-08-12  7:04 ` [PATCH 1/6] sunrpc/cache: allow threads to block while waiting for cache update NeilBrown
@ 2010-08-12  7:04 ` NeilBrown
       [not found]   ` <20100812070406.11459.89468.stgit-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
  2010-08-12  7:04 ` [PATCH 3/6] sunrpc: close connection when a request is irretrievably lost NeilBrown
                   ` (5 subsequent siblings)
  7 siblings, 1 reply; 19+ messages in thread
From: NeilBrown @ 2010-08-12  7:04 UTC (permalink / raw)
  To: J. Bruce Fields; +Cc: linux-nfs

The idmap code manages request deferal by waiting for a reply from
userspace rather than putting the NFS request on a queue to be retried
from the start.
Now that the common deferal code does this there is no need for the
special code in idmap.

Signed-off-by: NeilBrown <neilb@suse.de>
---
 fs/nfsd/nfs4idmap.c |  105 +++++----------------------------------------------
 1 files changed, 11 insertions(+), 94 deletions(-)

diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index c78dbf4..f0695e8 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -482,109 +482,26 @@ nfsd_idmap_shutdown(void)
 	cache_unregister(&nametoid_cache);
 }
 
-/*
- * Deferred request handling
- */
-
-struct idmap_defer_req {
-       struct cache_req		req;
-       struct cache_deferred_req deferred_req;
-       wait_queue_head_t	waitq;
-       atomic_t			count;
-};
-
-static inline void
-put_mdr(struct idmap_defer_req *mdr)
-{
-	if (atomic_dec_and_test(&mdr->count))
-		kfree(mdr);
-}
-
-static inline void
-get_mdr(struct idmap_defer_req *mdr)
-{
-	atomic_inc(&mdr->count);
-}
-
-static void
-idmap_revisit(struct cache_deferred_req *dreq, int toomany)
-{
-	struct idmap_defer_req *mdr =
-		container_of(dreq, struct idmap_defer_req, deferred_req);
-
-	wake_up(&mdr->waitq);
-	put_mdr(mdr);
-}
-
-static struct cache_deferred_req *
-idmap_defer(struct cache_req *req)
-{
-	struct idmap_defer_req *mdr =
-		container_of(req, struct idmap_defer_req, req);
-
-	mdr->deferred_req.revisit = idmap_revisit;
-	get_mdr(mdr);
-	return (&mdr->deferred_req);
-}
-
-static inline int
-do_idmap_lookup(struct ent *(*lookup_fn)(struct ent *), struct ent *key,
-		struct cache_detail *detail, struct ent **item,
-		struct idmap_defer_req *mdr)
-{
-	*item = lookup_fn(key);
-	if (!*item)
-		return -ENOMEM;
-	return cache_check(detail, &(*item)->h, &mdr->req);
-}
-
-static inline int
-do_idmap_lookup_nowait(struct ent *(*lookup_fn)(struct ent *),
-			struct ent *key, struct cache_detail *detail,
-			struct ent **item)
-{
-	int ret = -ENOMEM;
-
-	*item = lookup_fn(key);
-	if (!*item)
-		goto out_err;
-	ret = -ETIMEDOUT;
-	if (!test_bit(CACHE_VALID, &(*item)->h.flags)
-			|| (*item)->h.expiry_time < get_seconds()
-			|| detail->flush_time > (*item)->h.last_refresh)
-		goto out_put;
-	ret = -ENOENT;
-	if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags))
-		goto out_put;
-	return 0;
-out_put:
-	cache_put(&(*item)->h, detail);
-out_err:
-	*item = NULL;
-	return ret;
-}
-
 static int
 idmap_lookup(struct svc_rqst *rqstp,
 		struct ent *(*lookup_fn)(struct ent *), struct ent *key,
 		struct cache_detail *detail, struct ent **item)
 {
-	struct idmap_defer_req *mdr;
 	int ret;
 
-	mdr = kzalloc(sizeof(*mdr), GFP_KERNEL);
-	if (!mdr)
+	*item = lookup_fn(key);
+	if (!*item)
 		return -ENOMEM;
-	atomic_set(&mdr->count, 1);
-	init_waitqueue_head(&mdr->waitq);
-	mdr->req.defer = idmap_defer;
-	ret = do_idmap_lookup(lookup_fn, key, detail, item, mdr);
-	if (ret == -EAGAIN) {
-		wait_event_interruptible_timeout(mdr->waitq,
-			test_bit(CACHE_VALID, &(*item)->h.flags), 1 * HZ);
-		ret = do_idmap_lookup_nowait(lookup_fn, key, detail, item);
+ retry:
+	ret = cache_check(detail, &(*item)->h, &rqstp->rq_chandle);
+
+	if (ret == -ETIMEDOUT) {
+		struct ent *prev_item = *item;
+		*item = lookup_fn(key);
+		if (*item != prev_item)
+			goto retry;
+		cache_put(&(*item)->h, detail);
 	}
-	put_mdr(mdr);
 	return ret;
 }
 



^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH 1/6] sunrpc/cache: allow threads to block while waiting for cache update.
  2010-08-12  7:04 [PATCH 0/6] Cache deferral improvements - try N+2 NeilBrown
@ 2010-08-12  7:04 ` NeilBrown
  2010-08-12  7:04 ` [PATCH 2/6] nfsd/idmap: drop special request deferal in favour of improved default NeilBrown
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 19+ messages in thread
From: NeilBrown @ 2010-08-12  7:04 UTC (permalink / raw)
  To: J. Bruce Fields; +Cc: linux-nfs

The current practice of waiting for cache updates by queueing the
whole request to be retried has (at least) two problems.

1/ With NFSv4, requests can be quite complex and re-trying a whole
  request when a later part fails should only be a last-resort, not a
  normal practice.

2/ Large requests, and in particular any 'write' request, will not be
  queued by the current code and doing so would be undesirable.

In many cases only a very sort wait is needed before the cache gets
valid data.

So, providing the underlying transport permits it by setting
 ->thread_wait,
arrange to wait briefly for an upcall to be completed (as reflected in
the clearing of CACHE_PENDING).
If the short wait was not long enough and CACHE_PENDING is still set,
fall back on the old approach.

The 'thread_wait' value is set to 5 seconds when there are spare
threads, and 1 second when there are no spare threads.

These values are probably much higher than needed, but will ensure
some forward progress.

Note that as we only request an update for a non-valid item, and as
non-valid items are updated in place it is extremely unlikely that
cache_check will return -ETIMEDOUT.  Normally cache_defer_req will
sleep for a short while and then find that the item is_valid.


Signed-off-by: NeilBrown <neilb@suse.de>
---
 include/linux/sunrpc/cache.h |    3 +++
 net/sunrpc/cache.c           |   47 +++++++++++++++++++++++++++++++++++++++++-
 net/sunrpc/svc_xprt.c        |   11 ++++++++++
 3 files changed, 60 insertions(+), 1 deletions(-)

diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 7bf3e84..61f521f 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -125,6 +125,9 @@ struct cache_detail {
  */
 struct cache_req {
 	struct cache_deferred_req *(*defer)(struct cache_req *req);
+	int thread_wait;  /* How long (jiffies) we can block the
+			   * current thread to wait for updates.
+			   */
 };
 /* this must be embedded in a deferred_request that is being
  * delayed awaiting cache-fill
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 2b06410..2fdd66b 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -509,10 +509,22 @@ static LIST_HEAD(cache_defer_list);
 static struct list_head cache_defer_hash[DFR_HASHSIZE];
 static int cache_defer_cnt;
 
+struct thread_deferred_req {
+	struct cache_deferred_req handle;
+	struct completion completion;
+};
+static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
+{
+	struct thread_deferred_req *dr =
+		container_of(dreq, struct thread_deferred_req, handle);
+	complete(&dr->completion);
+}
+
 static int cache_defer_req(struct cache_req *req, struct cache_head *item)
 {
 	struct cache_deferred_req *dreq, *discard;
 	int hash = DFR_HASH(item);
+	struct thread_deferred_req sleeper;
 
 	if (cache_defer_cnt >= DFR_MAX) {
 		/* too much in the cache, randomly drop this one,
@@ -521,7 +533,15 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
 		if (net_random()&1)
 			return -ENOMEM;
 	}
-	dreq = req->defer(req);
+	if (req->thread_wait) {
+		dreq = &sleeper.handle;
+		sleeper.completion =
+			COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
+		dreq->revisit = cache_restart_thread;
+	} else
+		dreq = req->defer(req);
+
+ retry:
 	if (dreq == NULL)
 		return -ENOMEM;
 
@@ -555,6 +575,31 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
 		cache_revisit_request(item);
 		return -EAGAIN;
 	}
+
+	if (dreq == &sleeper.handle) {
+		wait_for_completion_interruptible_timeout(
+			&sleeper.completion, req->thread_wait);
+		spin_lock(&cache_defer_lock);
+		if (!list_empty(&sleeper.handle.hash)) {
+			list_del_init(&sleeper.handle.recent);
+			list_del_init(&sleeper.handle.hash);
+			cache_defer_cnt--;
+		}
+		spin_unlock(&cache_defer_lock);
+		if (test_bit(CACHE_PENDING, &item->flags)) {
+			/* item is still pending, try request
+			 * deferral
+			 */
+			dreq = req->defer(req);
+			goto retry;
+		}
+		/* only return success if we actually deferred the
+		 * request.  In this case we waited until it was
+		 * answered so no deferral has happened - rather
+		 * an answer already exists.
+		 */
+		return -EEXIST;
+	}
 	return 0;
 }
 
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index cbc0849..8ff6840 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -651,6 +651,11 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
 	if (signalled() || kthread_should_stop())
 		return -EINTR;
 
+	/* Normally we will wait up to 5 seconds for any required
+	 * cache information to be provided.
+	 */
+	rqstp->rq_chandle.thread_wait = 5*HZ;
+
 	spin_lock_bh(&pool->sp_lock);
 	xprt = svc_xprt_dequeue(pool);
 	if (xprt) {
@@ -658,6 +663,12 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
 		svc_xprt_get(xprt);
 		rqstp->rq_reserved = serv->sv_max_mesg;
 		atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
+
+		/* As there is a shortage of threads and this request
+		 * had to be queue, don't allow the thread to wait so
+		 * long for cache updates.
+		 */
+		rqstp->rq_chandle.thread_wait = 1*HZ;
 	} else {
 		/* No data pending. Go to sleep */
 		svc_thread_enqueue(pool, rqstp);



^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH 5/6] svcauth_gss: replace a trivial 'switch' with an 'if'
  2010-08-12  7:04 [PATCH 0/6] Cache deferral improvements - try N+2 NeilBrown
                   ` (2 preceding siblings ...)
  2010-08-12  7:04 ` [PATCH 3/6] sunrpc: close connection when a request is irretrievably lost NeilBrown
@ 2010-08-12  7:04 ` NeilBrown
  2010-08-12  7:04 ` [PATCH 4/6] nfsd: disable deferral for NFSv4 NeilBrown
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 19+ messages in thread
From: NeilBrown @ 2010-08-12  7:04 UTC (permalink / raw)
  To: J. Bruce Fields; +Cc: linux-nfs

Code like:

  switch(xxx) {
  case -error1:
  case -error2:
     ..
     return;
  case 0:
     stuff;
  }

  can more naturally be written:

  if (xxx < 0)
      return;

  stuff;

Signed-off-by: NeilBrown <neilb@suse.de>
---
 net/sunrpc/auth_gss/svcauth_gss.c |   41 +++++++++++++++++--------------------
 1 files changed, 19 insertions(+), 22 deletions(-)

diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index ed005af..dec2a6f 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1034,30 +1034,27 @@ static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
 	rsi_free(&rsikey);
 	if (!rsip)
 		return SVC_CLOSE;
-	switch (cache_check(&rsi_cache, &rsip->h, &rqstp->rq_chandle)) {
-	case -EAGAIN:
-	case -ETIMEDOUT:
-	case -ENOENT:
+	if (cache_check(&rsi_cache, &rsip->h, &rqstp->rq_chandle) < 0)
 		/* No upcall result: */
 		return SVC_CLOSE;
-	case 0:
-		ret = SVC_CLOSE;
-		/* Got an answer to the upcall; use it: */
-		if (gss_write_init_verf(rqstp, rsip))
-			goto out;
-		if (resv->iov_len + 4 > PAGE_SIZE)
-			goto out;
-		svc_putnl(resv, RPC_SUCCESS);
-		if (svc_safe_putnetobj(resv, &rsip->out_handle))
-			goto out;
-		if (resv->iov_len + 3 * 4 > PAGE_SIZE)
-			goto out;
-		svc_putnl(resv, rsip->major_status);
-		svc_putnl(resv, rsip->minor_status);
-		svc_putnl(resv, GSS_SEQ_WIN);
-		if (svc_safe_putnetobj(resv, &rsip->out_token))
-			goto out;
-	}
+
+	ret = SVC_CLOSE;
+	/* Got an answer to the upcall; use it: */
+	if (gss_write_init_verf(rqstp, rsip))
+		goto out;
+	if (resv->iov_len + 4 > PAGE_SIZE)
+		goto out;
+	svc_putnl(resv, RPC_SUCCESS);
+	if (svc_safe_putnetobj(resv, &rsip->out_handle))
+		goto out;
+	if (resv->iov_len + 3 * 4 > PAGE_SIZE)
+		goto out;
+	svc_putnl(resv, rsip->major_status);
+	svc_putnl(resv, rsip->minor_status);
+	svc_putnl(resv, GSS_SEQ_WIN);
+	if (svc_safe_putnetobj(resv, &rsip->out_token))
+		goto out;
+
 	ret = SVC_COMPLETE;
 out:
 	cache_put(&rsip->h, &rsi_cache);



^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH 4/6] nfsd: disable deferral for NFSv4
  2010-08-12  7:04 [PATCH 0/6] Cache deferral improvements - try N+2 NeilBrown
                   ` (3 preceding siblings ...)
  2010-08-12  7:04 ` [PATCH 5/6] svcauth_gss: replace a trivial 'switch' with an 'if' NeilBrown
@ 2010-08-12  7:04 ` NeilBrown
       [not found]   ` <20100812070407.11459.2929.stgit-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
  2010-08-12  7:04 ` [PATCH 6/6] sunrpc/cache: change deferred-request hash table to use hlist NeilBrown
                   ` (2 subsequent siblings)
  7 siblings, 1 reply; 19+ messages in thread
From: NeilBrown @ 2010-08-12  7:04 UTC (permalink / raw)
  To: J. Bruce Fields; +Cc: linux-nfs

Now that a slight delay in getting a reply to an upcall doesn't
require deferring of requests, request deferral for all NFSv4
requests - the concept doesn't really fit with the v4 model.

Signed-off-by: NeilBrown <neilb@suse.de>
---
 fs/nfsd/nfs4proc.c |    4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 59ec449..fca3621 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1031,8 +1031,8 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
 	resp->cstate.session = NULL;
 	fh_init(&resp->cstate.current_fh, NFS4_FHSIZE);
 	fh_init(&resp->cstate.save_fh, NFS4_FHSIZE);
-	/* Use the deferral mechanism only for NFSv4.0 compounds */
-	rqstp->rq_usedeferral = (args->minorversion == 0);
+	/* Don't use the deferral mechanism NFSv4. */
+	rqstp->rq_usedeferral = 0;
 
 	/*
 	 * According to RFC3010, this takes precedence over all other errors.



^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH 3/6] sunrpc: close connection when a request is irretrievably lost.
  2010-08-12  7:04 [PATCH 0/6] Cache deferral improvements - try N+2 NeilBrown
  2010-08-12  7:04 ` [PATCH 1/6] sunrpc/cache: allow threads to block while waiting for cache update NeilBrown
  2010-08-12  7:04 ` [PATCH 2/6] nfsd/idmap: drop special request deferal in favour of improved default NeilBrown
@ 2010-08-12  7:04 ` NeilBrown
  2010-09-21 20:53   ` J. Bruce Fields
  2010-08-12  7:04 ` [PATCH 5/6] svcauth_gss: replace a trivial 'switch' with an 'if' NeilBrown
                   ` (4 subsequent siblings)
  7 siblings, 1 reply; 19+ messages in thread
From: NeilBrown @ 2010-08-12  7:04 UTC (permalink / raw)
  To: J. Bruce Fields; +Cc: linux-nfs

If we drop a request in the sunrpc layer, either due kmalloc failure,
or due to a cache miss when we could not queue the request for later
replay, then close the connection to encourage the client to retry sooner.

Note that if the drop happens in the NFS layer, NFSERR_JUKEBOX
(aka NFS4ERR_DELAY) is returned to guide the client concerning
replay.

Signed-off-by: NeilBrown <neilb@suse.de>
---
 include/linux/sunrpc/svcauth.h    |   10 +++++++---
 net/sunrpc/auth_gss/svcauth_gss.c |   12 ++++++------
 net/sunrpc/svc.c                  |    3 +++
 net/sunrpc/svcauth_unix.c         |   11 ++++++++---
 4 files changed, 24 insertions(+), 12 deletions(-)

diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index d39dbdc..1126693 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -108,9 +108,13 @@ struct auth_ops {
 #define	SVC_NEGATIVE	4
 #define	SVC_OK		5
 #define	SVC_DROP	6
-#define	SVC_DENIED	7
-#define	SVC_PENDING	8
-#define	SVC_COMPLETE	9
+#define	SVC_CLOSE	7	/* Like SVC_DROP, but request is definitely
+				 * lost so if there is a tcp connection, it
+				 * should be closed
+				 */
+#define	SVC_DENIED	8
+#define	SVC_PENDING	9
+#define	SVC_COMPLETE	10
 
 
 extern int	svc_authenticate(struct svc_rqst *rqstp, __be32 *authp);
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index cc385b3..ed005af 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -964,7 +964,7 @@ svcauth_gss_set_client(struct svc_rqst *rqstp)
 	if (rqstp->rq_gssclient == NULL)
 		return SVC_DENIED;
 	stat = svcauth_unix_set_client(rqstp);
-	if (stat == SVC_DROP)
+	if (stat == SVC_DROP || stat == SVC_CLOSE)
 		return stat;
 	return SVC_OK;
 }
@@ -1018,7 +1018,7 @@ static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
 		return SVC_DENIED;
 	memset(&rsikey, 0, sizeof(rsikey));
 	if (dup_netobj(&rsikey.in_handle, &gc->gc_ctx))
-		return SVC_DROP;
+		return SVC_CLOSE;
 	*authp = rpc_autherr_badverf;
 	if (svc_safe_getnetobj(argv, &tmpobj)) {
 		kfree(rsikey.in_handle.data);
@@ -1026,22 +1026,22 @@ static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
 	}
 	if (dup_netobj(&rsikey.in_token, &tmpobj)) {
 		kfree(rsikey.in_handle.data);
-		return SVC_DROP;
+		return SVC_CLOSE;
 	}
 
 	/* Perform upcall, or find upcall result: */
 	rsip = rsi_lookup(&rsikey);
 	rsi_free(&rsikey);
 	if (!rsip)
-		return SVC_DROP;
+		return SVC_CLOSE;
 	switch (cache_check(&rsi_cache, &rsip->h, &rqstp->rq_chandle)) {
 	case -EAGAIN:
 	case -ETIMEDOUT:
 	case -ENOENT:
 		/* No upcall result: */
-		return SVC_DROP;
+		return SVC_CLOSE;
 	case 0:
-		ret = SVC_DROP;
+		ret = SVC_CLOSE;
 		/* Got an answer to the upcall; use it: */
 		if (gss_write_init_verf(rqstp, rsip))
 			goto out;
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index d9017d6..6359c42 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1055,6 +1055,9 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
 		goto err_bad;
 	case SVC_DENIED:
 		goto err_bad_auth;
+	case SVC_CLOSE:
+		if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
+			svc_close_xprt(rqstp->rq_xprt);
 	case SVC_DROP:
 		goto dropit;
 	case SVC_COMPLETE:
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 2073116..e91b550 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -674,6 +674,8 @@ static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp)
 	switch (ret) {
 	case -ENOENT:
 		return ERR_PTR(-ENOENT);
+	case -ETIMEDOUT:
+		return ERR_PTR(-ESHUTDOWN);
 	case 0:
 		gi = get_group_info(ug->gi);
 		cache_put(&ug->h, &unix_gid_cache);
@@ -720,8 +722,9 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
 	switch (cache_check(&ip_map_cache, &ipm->h, &rqstp->rq_chandle)) {
 		default:
 			BUG();
-		case -EAGAIN:
 		case -ETIMEDOUT:
+			return SVC_CLOSE;
+		case -EAGAIN:
 			return SVC_DROP;
 		case -ENOENT:
 			return SVC_DENIED;
@@ -736,6 +739,8 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
 	switch (PTR_ERR(gi)) {
 	case -EAGAIN:
 		return SVC_DROP;
+	case -ESHUTDOWN:
+		return SVC_CLOSE;
 	case -ENOENT:
 		break;
 	default:
@@ -776,7 +781,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
 	cred->cr_gid = (gid_t) -1;
 	cred->cr_group_info = groups_alloc(0);
 	if (cred->cr_group_info == NULL)
-		return SVC_DROP; /* kmalloc failure - client must retry */
+		return SVC_CLOSE; /* kmalloc failure - client must retry */
 
 	/* Put NULL verifier */
 	svc_putnl(resv, RPC_AUTH_NULL);
@@ -840,7 +845,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
 		goto badcred;
 	cred->cr_group_info = groups_alloc(slen);
 	if (cred->cr_group_info == NULL)
-		return SVC_DROP;
+		return SVC_CLOSE;
 	for (i = 0; i < slen; i++)
 		GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
 	if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {



^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH 6/6] sunrpc/cache: change deferred-request hash table to use hlist.
  2010-08-12  7:04 [PATCH 0/6] Cache deferral improvements - try N+2 NeilBrown
                   ` (4 preceding siblings ...)
  2010-08-12  7:04 ` [PATCH 4/6] nfsd: disable deferral for NFSv4 NeilBrown
@ 2010-08-12  7:04 ` NeilBrown
       [not found] ` <20100812065722.11459.18978.stgit-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
  2010-08-17  5:15 ` [PATCH 1.5/6] Fix race in new request delay code NeilBrown
  7 siblings, 0 replies; 19+ messages in thread
From: NeilBrown @ 2010-08-12  7:04 UTC (permalink / raw)
  To: J. Bruce Fields; +Cc: linux-nfs

Being a hash table, hlist is the best option.

There is currently some ugliness were we treat "->next == NULL" as
a special case to avoid having to initialise the whole array.
This change nicely gets rid of that case.

Signed-off-by: NeilBrown <neilb@suse.de>
---
 include/linux/sunrpc/cache.h |    2 +-
 net/sunrpc/cache.c           |   28 +++++++++++-----------------
 2 files changed, 12 insertions(+), 18 deletions(-)

diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 61f521f..5782154 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -133,7 +133,7 @@ struct cache_req {
  * delayed awaiting cache-fill
  */
 struct cache_deferred_req {
-	struct list_head	hash;	/* on hash chain */
+	struct hlist_node	hash;	/* on hash chain */
 	struct list_head	recent; /* on fifo */
 	struct cache_head	*item;  /* cache item we wait on */
 	void			*owner; /* we might need to discard all defered requests
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 2fdd66b..551ed44 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -506,7 +506,7 @@ EXPORT_SYMBOL_GPL(cache_purge);
 
 static DEFINE_SPINLOCK(cache_defer_lock);
 static LIST_HEAD(cache_defer_list);
-static struct list_head cache_defer_hash[DFR_HASHSIZE];
+static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
 static int cache_defer_cnt;
 
 struct thread_deferred_req {
@@ -551,9 +551,7 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
 
 	list_add(&dreq->recent, &cache_defer_list);
 
-	if (cache_defer_hash[hash].next == NULL)
-		INIT_LIST_HEAD(&cache_defer_hash[hash]);
-	list_add(&dreq->hash, &cache_defer_hash[hash]);
+	hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
 
 	/* it is in, now maybe clean up */
 	discard = NULL;
@@ -561,7 +559,7 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
 		discard = list_entry(cache_defer_list.prev,
 				     struct cache_deferred_req, recent);
 		list_del_init(&discard->recent);
-		list_del_init(&discard->hash);
+		hlist_del_init(&discard->hash);
 		cache_defer_cnt--;
 	}
 	spin_unlock(&cache_defer_lock);
@@ -580,9 +578,9 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
 		wait_for_completion_interruptible_timeout(
 			&sleeper.completion, req->thread_wait);
 		spin_lock(&cache_defer_lock);
-		if (!list_empty(&sleeper.handle.hash)) {
+		if (!hlist_unhashed(&sleeper.handle.hash)) {
 			list_del_init(&sleeper.handle.recent);
-			list_del_init(&sleeper.handle.hash);
+			hlist_del_init(&sleeper.handle.hash);
 			cache_defer_cnt--;
 		}
 		spin_unlock(&cache_defer_lock);
@@ -608,24 +606,20 @@ static void cache_revisit_request(struct cache_head *item)
 	struct cache_deferred_req *dreq;
 	struct list_head pending;
 
-	struct list_head *lp;
+	struct hlist_node *lp, *tmp;
 	int hash = DFR_HASH(item);
 
 	INIT_LIST_HEAD(&pending);
 	spin_lock(&cache_defer_lock);
 
-	lp = cache_defer_hash[hash].next;
-	if (lp) {
-		while (lp != &cache_defer_hash[hash]) {
-			dreq = list_entry(lp, struct cache_deferred_req, hash);
-			lp = lp->next;
+	hlist_for_each_entry_safe(dreq, lp, tmp,
+				  &cache_defer_hash[hash], hash)
 			if (dreq->item == item) {
-				list_del_init(&dreq->hash);
+				hlist_del_init(&dreq->hash);
 				list_move(&dreq->recent, &pending);
 				cache_defer_cnt--;
 			}
-		}
-	}
+
 	spin_unlock(&cache_defer_lock);
 
 	while (!list_empty(&pending)) {
@@ -646,7 +640,7 @@ void cache_clean_deferred(void *owner)
 
 	list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
 		if (dreq->owner == owner) {
-			list_del_init(&dreq->hash);
+			hlist_del_init(&dreq->hash);
 			list_move(&dreq->recent, &pending);
 			cache_defer_cnt--;
 		}



^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH 0/6] Cache deferral improvements - try N+2
       [not found] ` <20100812065722.11459.18978.stgit-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
@ 2010-08-12 12:03   ` J. Bruce Fields
  0 siblings, 0 replies; 19+ messages in thread
From: J. Bruce Fields @ 2010-08-12 12:03 UTC (permalink / raw)
  To: NeilBrown; +Cc: linux-nfs

On Thu, Aug 12, 2010 at 05:04:05PM +1000, NeilBrown wrote:
> Note that there is a conflict between this patch set and the other
> one.  The other changes someing in nfs4idmap.c, and this one removes
> all the code that was changed.  Very easy to resolve.

Thanks, Neil!

I'll go through both of these series next week after I've gotten back
from Linuxcon.

--b.

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH 1.5/6] Fix race in new request delay code.
  2010-08-12  7:04 [PATCH 0/6] Cache deferral improvements - try N+2 NeilBrown
                   ` (6 preceding siblings ...)
       [not found] ` <20100812065722.11459.18978.stgit-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
@ 2010-08-17  5:15 ` NeilBrown
  2010-08-26 21:08   ` J. Bruce Fields
  7 siblings, 1 reply; 19+ messages in thread
From: NeilBrown @ 2010-08-17  5:15 UTC (permalink / raw)
  To: J. Bruce Fields; +Cc: linux-nfs, Anton Blanchard

If the 'wait_for_completion_interruptible_timeout' completes due
to interrupt or timeout, just before cache_revisit request gets around
to calling ->revisit and thus the completion, we have a race with bad
consequences.

cache_defer_req will see that sleeper.handle.hash is empty (because
cache_revisit_request has already done the list_del_init), and that
CACHE_PENDING is clear, and so will exit that function leaving any
local variables such as 'sleeper' undefined.

Meanwhile cache_revisit_request could delete sleeper.handle.recent
from the 'pending' list, and call sleeper.hande.revisit, any of which
could have new garbage values and so could trigger a BUG.

Reported-by: Anton Blanchard <anton@au1.ibm.com>
Signed-off-by: NeilBrown <neilb@suse.de>
---

Hi Bruce,

This should probably be merged into the patch 1/6 of the earlier
series.

I'm not certain this actually fixed what Anton reported (against
SLES), but there is a fair chance and I only found it because of the
report.

Thanks,
NeilBrown



 net/sunrpc/cache.c |   28 ++++++++++++++++++++--------
 1 files changed, 20 insertions(+), 8 deletions(-)

diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 2fdd66b..ee4f799 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -577,15 +577,27 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
 	}
 
 	if (dreq == &sleeper.handle) {
-		wait_for_completion_interruptible_timeout(
-			&sleeper.completion, req->thread_wait);
-		spin_lock(&cache_defer_lock);
-		if (!list_empty(&sleeper.handle.hash)) {
-			list_del_init(&sleeper.handle.recent);
-			list_del_init(&sleeper.handle.hash);
-			cache_defer_cnt--;
+		if (wait_for_completion_interruptible_timeout(
+			    &sleeper.completion, req->thread_wait) <= 0) {
+			/* The completion wasn't completed, so we need
+			 * to clean up
+			 */
+			spin_lock(&cache_defer_lock);
+			if (!list_empty(&sleeper.handle.hash)) {
+				list_del_init(&sleeper.handle.recent);
+				list_del_init(&sleeper.handle.hash);
+				cache_defer_cnt--;
+				spin_unlock(&cache_defer_lock);
+			} else {
+				/* cache_revisit_request already removed
+				 * this from the hash table, but hasn't
+				 * called ->revisit yet.  It will very soon
+				 * and we need to wait for it.
+				 */
+				spin_unlock(&cache_defer_lock);
+				wait_for_completion(&sleeper.completion);
+			}
 		}
-		spin_unlock(&cache_defer_lock);
 		if (test_bit(CACHE_PENDING, &item->flags)) {
 			/* item is still pending, try request
 			 * deferral



^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH 1.5/6] Fix race in new request delay code.
  2010-08-17  5:15 ` [PATCH 1.5/6] Fix race in new request delay code NeilBrown
@ 2010-08-26 21:08   ` J. Bruce Fields
  2010-08-29 23:36     ` Neil Brown
  0 siblings, 1 reply; 19+ messages in thread
From: J. Bruce Fields @ 2010-08-26 21:08 UTC (permalink / raw)
  To: NeilBrown; +Cc: linux-nfs, Anton Blanchard

On Tue, Aug 17, 2010 at 03:15:09PM +1000, NeilBrown wrote:
> If the 'wait_for_completion_interruptible_timeout' completes due
> to interrupt or timeout, just before cache_revisit request gets around
> to calling ->revisit and thus the completion, we have a race with bad
> consequences.
> 
> cache_defer_req will see that sleeper.handle.hash is empty (because
> cache_revisit_request has already done the list_del_init), and that
> CACHE_PENDING is clear, and so will exit that function leaving any
> local variables such as 'sleeper' undefined.
> 
> Meanwhile cache_revisit_request could delete sleeper.handle.recent
> from the 'pending' list, and call sleeper.hande.revisit, any of which
> could have new garbage values and so could trigger a BUG.

Thanks, this looks correct to me!

I wish it were simpler, but don't see how right now.

Some superficial cleanup might help me at least--see below (untested),
which attempts to make obvious the first-try-sleeping-then-try-deferral
logic, by turning cache_defer_req into basically:

	if (req->thread_wait) {
		ret = cache_wait_req(req, item);
		if (ret != -ETIMEDOUT)
			return ret;
	}
	dreq = req->defer(req);
	if (dreq == NULL)
		return -ENOMEM;
...

I also wonder whether there would be a way to make most of this just
another ->defer() method, so that we wouldn't need to switch on
req->thread_wait here at all.  But maybe there's not a nice way to do
that.  (Certainly I prefer your approach to what the idmap code was
doing.)

--b.

diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 2c5297f..da872f9 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -509,60 +509,39 @@ static LIST_HEAD(cache_defer_list);
 static struct list_head cache_defer_hash[DFR_HASHSIZE];
 static int cache_defer_cnt;
 
-struct thread_deferred_req {
-	struct cache_deferred_req handle;
-	struct completion completion;
-};
-static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
+static void __unhash_deferred_req(struct cache_deferred_req *dreq)
 {
-	struct thread_deferred_req *dr =
-		container_of(dreq, struct thread_deferred_req, handle);
-	complete(&dr->completion);
+	list_del_init(&dreq->recent);
+	list_del_init(&dreq->hash);
+	cache_defer_cnt--;
 }
 
-static int cache_defer_req(struct cache_req *req, struct cache_head *item)
+static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
 {
-	struct cache_deferred_req *dreq, *discard;
 	int hash = DFR_HASH(item);
-	struct thread_deferred_req sleeper;
 
-	if (cache_defer_cnt >= DFR_MAX) {
-		/* too much in the cache, randomly drop this one,
-		 * or continue and drop the oldest below
-		 */
-		if (net_random()&1)
-			return -ENOMEM;
-	}
-	if (req->thread_wait) {
-		dreq = &sleeper.handle;
-		sleeper.completion =
-			COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
-		dreq->revisit = cache_restart_thread;
-	} else
-		dreq = req->defer(req);
+	list_add(&dreq->recent, &cache_defer_list);
+	if (cache_defer_hash[hash].next == NULL)
+		INIT_LIST_HEAD(&cache_defer_hash[hash]);
+	list_add(&dreq->hash, &cache_defer_hash[hash]);
+}
 
- retry:
-	if (dreq == NULL)
-		return -ENOMEM;
+static int setup_deferral(struct cache_deferred_req *dreq, struct cache_head *item)
+{
+	struct cache_deferred_req *discard;
 
 	dreq->item = item;
 
 	spin_lock(&cache_defer_lock);
 
-	list_add(&dreq->recent, &cache_defer_list);
-
-	if (cache_defer_hash[hash].next == NULL)
-		INIT_LIST_HEAD(&cache_defer_hash[hash]);
-	list_add(&dreq->hash, &cache_defer_hash[hash]);
+	__hash_deferred_req(dreq, item);
 
 	/* it is in, now maybe clean up */
 	discard = NULL;
 	if (++cache_defer_cnt > DFR_MAX) {
 		discard = list_entry(cache_defer_list.prev,
 				     struct cache_deferred_req, recent);
-		list_del_init(&discard->recent);
-		list_del_init(&discard->hash);
-		cache_defer_cnt--;
+		__unhash_deferred_req(discard);
 	}
 	spin_unlock(&cache_defer_lock);
 
@@ -575,44 +554,88 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
 		cache_revisit_request(item);
 		return -EAGAIN;
 	}
+	return 0;
+}
 
-	if (dreq == &sleeper.handle) {
-		if (wait_for_completion_interruptible_timeout(
-			    &sleeper.completion, req->thread_wait) <= 0) {
-			/* The completion wasn't completed, so we need
-			 * to clean up
-			 */
-			spin_lock(&cache_defer_lock);
-			if (!list_empty(&sleeper.handle.hash)) {
-				list_del_init(&sleeper.handle.recent);
-				list_del_init(&sleeper.handle.hash);
-				cache_defer_cnt--;
-				spin_unlock(&cache_defer_lock);
-			} else {
-				/* cache_revisit_request already removed
-				 * this from the hash table, but hasn't
-				 * called ->revisit yet.  It will very soon
-				 * and we need to wait for it.
-				 */
-				spin_unlock(&cache_defer_lock);
-				wait_for_completion(&sleeper.completion);
-			}
-		}
-		if (test_bit(CACHE_PENDING, &item->flags)) {
-			/* item is still pending, try request
-			 * deferral
+struct thread_deferred_req {
+	struct cache_deferred_req handle;
+	struct completion completion;
+};
+
+static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
+{
+	struct thread_deferred_req *dr =
+		container_of(dreq, struct thread_deferred_req, handle);
+	complete(&dr->completion);
+}
+
+static int cache_wait_req(struct cache_req *req, struct cache_head *item)
+{
+	struct thread_deferred_req sleeper;
+	struct cache_deferred_req *dreq = &sleeper.handle;
+	int ret;
+
+	sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
+	dreq->revisit = cache_restart_thread;
+
+	ret = setup_deferral(dreq, item);
+	if (ret)
+		return ret;
+
+	if (wait_for_completion_interruptible_timeout(
+		    &sleeper.completion, req->thread_wait) <= 0) {
+		/* The completion wasn't completed, so we need
+		 * to clean up
+		 */
+		spin_lock(&cache_defer_lock);
+		if (!list_empty(&sleeper.handle.hash)) {
+			__unhash_deferred_req(&sleeper.handle);
+			spin_unlock(&cache_defer_lock);
+		} else {
+			/* cache_revisit_request already removed
+			 * this from the hash table, but hasn't
+			 * called ->revisit yet.  It will very soon
+			 * and we need to wait for it.
 			 */
-			dreq = req->defer(req);
-			goto retry;
+			spin_unlock(&cache_defer_lock);
+			wait_for_completion(&sleeper.completion);
 		}
-		/* only return success if we actually deferred the
-		 * request.  In this case we waited until it was
-		 * answered so no deferral has happened - rather
-		 * an answer already exists.
+	}
+	if (test_bit(CACHE_PENDING, &item->flags)) {
+		/* item is still pending, try request
+		 * deferral
 		 */
-		return -EEXIST;
+		return -ETIMEDOUT;
 	}
-	return 0;
+	/* only return success if we actually deferred the
+	 * request.  In this case we waited until it was
+	 * answered so no deferral has happened - rather
+	 * an answer already exists.
+	 */
+	return -EEXIST;
+}
+
+static int cache_defer_req(struct cache_req *req, struct cache_head *item)
+{
+	struct cache_deferred_req *dreq;
+	int ret;
+
+	if (cache_defer_cnt >= DFR_MAX) {
+		/* too much in the cache, randomly drop this one,
+		 * or continue and drop the oldest
+		 */
+		if (net_random()&1)
+			return -ENOMEM;
+	}
+	if (req->thread_wait) {
+		ret = cache_wait_req(req, item);
+		if (ret != -ETIMEDOUT)
+			return ret;
+	}
+	dreq = req->defer(req);
+	if (dreq == NULL)
+		return -ENOMEM;
+	return setup_deferral(dreq, item);
 }
 
 static void cache_revisit_request(struct cache_head *item)
@@ -632,9 +655,8 @@ static void cache_revisit_request(struct cache_head *item)
 			dreq = list_entry(lp, struct cache_deferred_req, hash);
 			lp = lp->next;
 			if (dreq->item == item) {
-				list_del_init(&dreq->hash);
-				list_move(&dreq->recent, &pending);
-				cache_defer_cnt--;
+				__unhash_deferred_req(dreq);
+				list_add(&dreq->recent, &pending);
 			}
 		}
 	}
@@ -657,11 +679,8 @@ void cache_clean_deferred(void *owner)
 	spin_lock(&cache_defer_lock);
 
 	list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
-		if (dreq->owner == owner) {
-			list_del_init(&dreq->hash);
-			list_move(&dreq->recent, &pending);
-			cache_defer_cnt--;
-		}
+		if (dreq->owner == owner)
+			__unhash_deferred_req(dreq);
 	}
 	spin_unlock(&cache_defer_lock);
 
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 8ff6840..95fc3e8 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -665,7 +665,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
 		atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
 
 		/* As there is a shortage of threads and this request
-		 * had to be queue, don't allow the thread to wait so
+		 * had to be queued, don't allow the thread to wait so
 		 * long for cache updates.
 		 */
 		rqstp->rq_chandle.thread_wait = 1*HZ;

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH 1.5/6] Fix race in new request delay code.
  2010-08-26 21:08   ` J. Bruce Fields
@ 2010-08-29 23:36     ` Neil Brown
  2010-09-01 11:31       ` J. Bruce Fields
  2010-09-21  8:35       ` Neil Brown
  0 siblings, 2 replies; 19+ messages in thread
From: Neil Brown @ 2010-08-29 23:36 UTC (permalink / raw)
  To: J. Bruce Fields; +Cc: linux-nfs, Anton Blanchard

On Thu, 26 Aug 2010 17:08:00 -0400
"J. Bruce Fields" <bfields@fieldses.org> wrote:

> On Tue, Aug 17, 2010 at 03:15:09PM +1000, NeilBrown wrote:
> > If the 'wait_for_completion_interruptible_timeout' completes due
> > to interrupt or timeout, just before cache_revisit request gets around
> > to calling ->revisit and thus the completion, we have a race with bad
> > consequences.
> > 
> > cache_defer_req will see that sleeper.handle.hash is empty (because
> > cache_revisit_request has already done the list_del_init), and that
> > CACHE_PENDING is clear, and so will exit that function leaving any
> > local variables such as 'sleeper' undefined.
> > 
> > Meanwhile cache_revisit_request could delete sleeper.handle.recent
> > from the 'pending' list, and call sleeper.hande.revisit, any of which
> > could have new garbage values and so could trigger a BUG.
> 
> Thanks, this looks correct to me!
> 
> I wish it were simpler, but don't see how right now.

I think the way to make it simpler is to get rid of the deferral altogether.
The more I think about it the less I like it :-)

The deferral code effectively gives you DFR_MAX extra virtual threads.  They
each are processing one request but have (supposedly) much lower over-head
than creating real threads.  So there are two groups of threads:
 - those that can wait a longish time for a reply to an upcall
 - those that only wait for IO.

This distinction could well still be useful as we don't really want all
threads to be tied up waiting on upcalls so that no really work can be done.
However we don't really need the deferral mechanism to achieve this.

Instead we impose a limit on the number of threads that can be in waiting on
a completion (in cache_wait_req in your revised code).
If the number of such threads ever reaches - say - half the total, then we
start dropping requests and closing TCP connections (or start new threads if
we ever work out a good way to dynamically manage the thread pool).

So I would suggest:
  - putting in a counter and limiting the number of waiting threads to
    half the pool size
  - removing all the deferral code
  - looking at cleaning up what is left to make it more readable.

???

NeilBrown


> 
> Some superficial cleanup might help me at least--see below (untested),
> which attempts to make obvious the first-try-sleeping-then-try-deferral
> logic, by turning cache_defer_req into basically:
> 
> 	if (req->thread_wait) {
> 		ret = cache_wait_req(req, item);
> 		if (ret != -ETIMEDOUT)
> 			return ret;
> 	}
> 	dreq = req->defer(req);
> 	if (dreq == NULL)
> 		return -ENOMEM;
> ...
> 
> I also wonder whether there would be a way to make most of this just
> another ->defer() method, so that we wouldn't need to switch on
> req->thread_wait here at all.  But maybe there's not a nice way to do
> that.  (Certainly I prefer your approach to what the idmap code was
> doing.)
> 
> --b.
> 
> diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
> index 2c5297f..da872f9 100644
> --- a/net/sunrpc/cache.c
> +++ b/net/sunrpc/cache.c
> @@ -509,60 +509,39 @@ static LIST_HEAD(cache_defer_list);
>  static struct list_head cache_defer_hash[DFR_HASHSIZE];
>  static int cache_defer_cnt;
>  
> -struct thread_deferred_req {
> -	struct cache_deferred_req handle;
> -	struct completion completion;
> -};
> -static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
> +static void __unhash_deferred_req(struct cache_deferred_req *dreq)
>  {
> -	struct thread_deferred_req *dr =
> -		container_of(dreq, struct thread_deferred_req, handle);
> -	complete(&dr->completion);
> +	list_del_init(&dreq->recent);
> +	list_del_init(&dreq->hash);
> +	cache_defer_cnt--;
>  }
>  
> -static int cache_defer_req(struct cache_req *req, struct cache_head *item)
> +static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
>  {
> -	struct cache_deferred_req *dreq, *discard;
>  	int hash = DFR_HASH(item);
> -	struct thread_deferred_req sleeper;
>  
> -	if (cache_defer_cnt >= DFR_MAX) {
> -		/* too much in the cache, randomly drop this one,
> -		 * or continue and drop the oldest below
> -		 */
> -		if (net_random()&1)
> -			return -ENOMEM;
> -	}
> -	if (req->thread_wait) {
> -		dreq = &sleeper.handle;
> -		sleeper.completion =
> -			COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
> -		dreq->revisit = cache_restart_thread;
> -	} else
> -		dreq = req->defer(req);
> +	list_add(&dreq->recent, &cache_defer_list);
> +	if (cache_defer_hash[hash].next == NULL)
> +		INIT_LIST_HEAD(&cache_defer_hash[hash]);
> +	list_add(&dreq->hash, &cache_defer_hash[hash]);
> +}
>  
> - retry:
> -	if (dreq == NULL)
> -		return -ENOMEM;
> +static int setup_deferral(struct cache_deferred_req *dreq, struct cache_head *item)
> +{
> +	struct cache_deferred_req *discard;
>  
>  	dreq->item = item;
>  
>  	spin_lock(&cache_defer_lock);
>  
> -	list_add(&dreq->recent, &cache_defer_list);
> -
> -	if (cache_defer_hash[hash].next == NULL)
> -		INIT_LIST_HEAD(&cache_defer_hash[hash]);
> -	list_add(&dreq->hash, &cache_defer_hash[hash]);
> +	__hash_deferred_req(dreq, item);
>  
>  	/* it is in, now maybe clean up */
>  	discard = NULL;
>  	if (++cache_defer_cnt > DFR_MAX) {
>  		discard = list_entry(cache_defer_list.prev,
>  				     struct cache_deferred_req, recent);
> -		list_del_init(&discard->recent);
> -		list_del_init(&discard->hash);
> -		cache_defer_cnt--;
> +		__unhash_deferred_req(discard);
>  	}
>  	spin_unlock(&cache_defer_lock);
>  
> @@ -575,44 +554,88 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
>  		cache_revisit_request(item);
>  		return -EAGAIN;
>  	}
> +	return 0;
> +}
>  
> -	if (dreq == &sleeper.handle) {
> -		if (wait_for_completion_interruptible_timeout(
> -			    &sleeper.completion, req->thread_wait) <= 0) {
> -			/* The completion wasn't completed, so we need
> -			 * to clean up
> -			 */
> -			spin_lock(&cache_defer_lock);
> -			if (!list_empty(&sleeper.handle.hash)) {
> -				list_del_init(&sleeper.handle.recent);
> -				list_del_init(&sleeper.handle.hash);
> -				cache_defer_cnt--;
> -				spin_unlock(&cache_defer_lock);
> -			} else {
> -				/* cache_revisit_request already removed
> -				 * this from the hash table, but hasn't
> -				 * called ->revisit yet.  It will very soon
> -				 * and we need to wait for it.
> -				 */
> -				spin_unlock(&cache_defer_lock);
> -				wait_for_completion(&sleeper.completion);
> -			}
> -		}
> -		if (test_bit(CACHE_PENDING, &item->flags)) {
> -			/* item is still pending, try request
> -			 * deferral
> +struct thread_deferred_req {
> +	struct cache_deferred_req handle;
> +	struct completion completion;
> +};
> +
> +static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
> +{
> +	struct thread_deferred_req *dr =
> +		container_of(dreq, struct thread_deferred_req, handle);
> +	complete(&dr->completion);
> +}
> +
> +static int cache_wait_req(struct cache_req *req, struct cache_head *item)
> +{
> +	struct thread_deferred_req sleeper;
> +	struct cache_deferred_req *dreq = &sleeper.handle;
> +	int ret;
> +
> +	sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
> +	dreq->revisit = cache_restart_thread;
> +
> +	ret = setup_deferral(dreq, item);
> +	if (ret)
> +		return ret;
> +
> +	if (wait_for_completion_interruptible_timeout(
> +		    &sleeper.completion, req->thread_wait) <= 0) {
> +		/* The completion wasn't completed, so we need
> +		 * to clean up
> +		 */
> +		spin_lock(&cache_defer_lock);
> +		if (!list_empty(&sleeper.handle.hash)) {
> +			__unhash_deferred_req(&sleeper.handle);
> +			spin_unlock(&cache_defer_lock);
> +		} else {
> +			/* cache_revisit_request already removed
> +			 * this from the hash table, but hasn't
> +			 * called ->revisit yet.  It will very soon
> +			 * and we need to wait for it.
>  			 */
> -			dreq = req->defer(req);
> -			goto retry;
> +			spin_unlock(&cache_defer_lock);
> +			wait_for_completion(&sleeper.completion);
>  		}
> -		/* only return success if we actually deferred the
> -		 * request.  In this case we waited until it was
> -		 * answered so no deferral has happened - rather
> -		 * an answer already exists.
> +	}
> +	if (test_bit(CACHE_PENDING, &item->flags)) {
> +		/* item is still pending, try request
> +		 * deferral
>  		 */
> -		return -EEXIST;
> +		return -ETIMEDOUT;
>  	}
> -	return 0;
> +	/* only return success if we actually deferred the
> +	 * request.  In this case we waited until it was
> +	 * answered so no deferral has happened - rather
> +	 * an answer already exists.
> +	 */
> +	return -EEXIST;
> +}
> +
> +static int cache_defer_req(struct cache_req *req, struct cache_head *item)
> +{
> +	struct cache_deferred_req *dreq;
> +	int ret;
> +
> +	if (cache_defer_cnt >= DFR_MAX) {
> +		/* too much in the cache, randomly drop this one,
> +		 * or continue and drop the oldest
> +		 */
> +		if (net_random()&1)
> +			return -ENOMEM;
> +	}
> +	if (req->thread_wait) {
> +		ret = cache_wait_req(req, item);
> +		if (ret != -ETIMEDOUT)
> +			return ret;
> +	}
> +	dreq = req->defer(req);
> +	if (dreq == NULL)
> +		return -ENOMEM;
> +	return setup_deferral(dreq, item);
>  }
>  
>  static void cache_revisit_request(struct cache_head *item)
> @@ -632,9 +655,8 @@ static void cache_revisit_request(struct cache_head *item)
>  			dreq = list_entry(lp, struct cache_deferred_req, hash);
>  			lp = lp->next;
>  			if (dreq->item == item) {
> -				list_del_init(&dreq->hash);
> -				list_move(&dreq->recent, &pending);
> -				cache_defer_cnt--;
> +				__unhash_deferred_req(dreq);
> +				list_add(&dreq->recent, &pending);
>  			}
>  		}
>  	}
> @@ -657,11 +679,8 @@ void cache_clean_deferred(void *owner)
>  	spin_lock(&cache_defer_lock);
>  
>  	list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
> -		if (dreq->owner == owner) {
> -			list_del_init(&dreq->hash);
> -			list_move(&dreq->recent, &pending);
> -			cache_defer_cnt--;
> -		}
> +		if (dreq->owner == owner)
> +			__unhash_deferred_req(dreq);
>  	}
>  	spin_unlock(&cache_defer_lock);
>  
> diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
> index 8ff6840..95fc3e8 100644
> --- a/net/sunrpc/svc_xprt.c
> +++ b/net/sunrpc/svc_xprt.c
> @@ -665,7 +665,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
>  		atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
>  
>  		/* As there is a shortage of threads and this request
> -		 * had to be queue, don't allow the thread to wait so
> +		 * had to be queued, don't allow the thread to wait so
>  		 * long for cache updates.
>  		 */
>  		rqstp->rq_chandle.thread_wait = 1*HZ;
> --
> To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 1.5/6] Fix race in new request delay code.
  2010-08-29 23:36     ` Neil Brown
@ 2010-09-01 11:31       ` J. Bruce Fields
  2010-09-21  8:35       ` Neil Brown
  1 sibling, 0 replies; 19+ messages in thread
From: J. Bruce Fields @ 2010-09-01 11:31 UTC (permalink / raw)
  To: Neil Brown; +Cc: linux-nfs, Anton Blanchard

On Mon, Aug 30, 2010 at 09:36:08AM +1000, Neil Brown wrote:
> On Thu, 26 Aug 2010 17:08:00 -0400
> "J. Bruce Fields" <bfields@fieldses.org> wrote:
> 
> > On Tue, Aug 17, 2010 at 03:15:09PM +1000, NeilBrown wrote:
> > > If the 'wait_for_completion_interruptible_timeout' completes due
> > > to interrupt or timeout, just before cache_revisit request gets around
> > > to calling ->revisit and thus the completion, we have a race with bad
> > > consequences.
> > > 
> > > cache_defer_req will see that sleeper.handle.hash is empty (because
> > > cache_revisit_request has already done the list_del_init), and that
> > > CACHE_PENDING is clear, and so will exit that function leaving any
> > > local variables such as 'sleeper' undefined.
> > > 
> > > Meanwhile cache_revisit_request could delete sleeper.handle.recent
> > > from the 'pending' list, and call sleeper.hande.revisit, any of which
> > > could have new garbage values and so could trigger a BUG.
> > 
> > Thanks, this looks correct to me!
> > 
> > I wish it were simpler, but don't see how right now.
> 
> I think the way to make it simpler is to get rid of the deferral altogether.
> The more I think about it the less I like it :-)
> 
> The deferral code effectively gives you DFR_MAX extra virtual threads.  They
> each are processing one request but have (supposedly) much lower over-head
> than creating real threads.  So there are two groups of threads:
>  - those that can wait a longish time for a reply to an upcall
>  - those that only wait for IO.
> 
> This distinction could well still be useful as we don't really want all
> threads to be tied up waiting on upcalls so that no really work can be done.
> However we don't really need the deferral mechanism to achieve this.
> 
> Instead we impose a limit on the number of threads that can be in waiting on
> a completion (in cache_wait_req in your revised code).
> If the number of such threads ever reaches - say - half the total, then we
> start dropping requests and closing TCP connections (or start new threads if
> we ever work out a good way to dynamically manage the thread pool).
> 
> So I would suggest:
>   - putting in a counter and limiting the number of waiting threads to
>     half the pool size
>   - removing all the deferral code
>   - looking at cleaning up what is left to make it more readable.
> 
> ???

That makes sense to me.  We should make a quick check of how the
client's using the deferral code (since it uses the cache code for
idmapping now), but I suspect it's very simple (it should only ever need
to do idmapping in process context).

--b.

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 1.5/6] Fix race in new request delay code.
  2010-08-29 23:36     ` Neil Brown
  2010-09-01 11:31       ` J. Bruce Fields
@ 2010-09-21  8:35       ` Neil Brown
  2010-09-22  2:15         ` J. Bruce Fields
  1 sibling, 1 reply; 19+ messages in thread
From: Neil Brown @ 2010-09-21  8:35 UTC (permalink / raw)
  To: Neil Brown; +Cc: J. Bruce Fields, linux-nfs

On Mon, 30 Aug 2010 09:36:08 +1000
Neil Brown <neilb@suse.de> wrote:

> On Thu, 26 Aug 2010 17:08:00 -0400
> "J. Bruce Fields" <bfields@fieldses.org> wrote:
> 
> > On Tue, Aug 17, 2010 at 03:15:09PM +1000, NeilBrown wrote:
> > > If the 'wait_for_completion_interruptible_timeout' completes due
> > > to interrupt or timeout, just before cache_revisit request gets around
> > > to calling ->revisit and thus the completion, we have a race with bad
> > > consequences.
> > > 
> > > cache_defer_req will see that sleeper.handle.hash is empty (because
> > > cache_revisit_request has already done the list_del_init), and that
> > > CACHE_PENDING is clear, and so will exit that function leaving any
> > > local variables such as 'sleeper' undefined.
> > > 
> > > Meanwhile cache_revisit_request could delete sleeper.handle.recent
> > > from the 'pending' list, and call sleeper.hande.revisit, any of which
> > > could have new garbage values and so could trigger a BUG.
> > 
> > Thanks, this looks correct to me!
> > 
> > I wish it were simpler, but don't see how right now.
> 
> I think the way to make it simpler is to get rid of the deferral altogether.
> The more I think about it the less I like it :-)

I was in the process of doing this "getting rid of deferral" when I
discovered that lockd now uses deferral too (and has done for 4 years! I am
out of touch!!).  This is for NFS exporting cluster filesystems where 'lock'
might take a while, but returns a 'grant'.

As lockd is single-threads we cannot just let it wait for the grant
in-process.

So I guess I cannot rip all that code out after all :-(

NeilBrown




> 
> The deferral code effectively gives you DFR_MAX extra virtual threads.  They
> each are processing one request but have (supposedly) much lower over-head
> than creating real threads.  So there are two groups of threads:
>  - those that can wait a longish time for a reply to an upcall
>  - those that only wait for IO.
> 
> This distinction could well still be useful as we don't really want all
> threads to be tied up waiting on upcalls so that no really work can be done.
> However we don't really need the deferral mechanism to achieve this.
> 
> Instead we impose a limit on the number of threads that can be in waiting on
> a completion (in cache_wait_req in your revised code).
> If the number of such threads ever reaches - say - half the total, then we
> start dropping requests and closing TCP connections (or start new threads if
> we ever work out a good way to dynamically manage the thread pool).
> 
> So I would suggest:
>   - putting in a counter and limiting the number of waiting threads to
>     half the pool size
>   - removing all the deferral code
>   - looking at cleaning up what is left to make it more readable.
> 
> ???
> 
> NeilBrown

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 3/6] sunrpc: close connection when a request is irretrievably lost.
  2010-08-12  7:04 ` [PATCH 3/6] sunrpc: close connection when a request is irretrievably lost NeilBrown
@ 2010-09-21 20:53   ` J. Bruce Fields
  2010-09-21 23:37     ` Neil Brown
  0 siblings, 1 reply; 19+ messages in thread
From: J. Bruce Fields @ 2010-09-21 20:53 UTC (permalink / raw)
  To: NeilBrown; +Cc: linux-nfs

Apologies for the delay getting to the rest of these.

On Thu, Aug 12, 2010 at 05:04:07PM +1000, NeilBrown wrote:
> If we drop a request in the sunrpc layer, either due kmalloc failure,
> or due to a cache miss when we could not queue the request for later
> replay, then close the connection to encourage the client to retry sooner.
> 
> Note that if the drop happens in the NFS layer, NFSERR_JUKEBOX
> (aka NFS4ERR_DELAY) is returned to guide the client concerning
> replay.

Looks fine, but:

> diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
> index d9017d6..6359c42 100644
> --- a/net/sunrpc/svc.c
> +++ b/net/sunrpc/svc.c
> @@ -1055,6 +1055,9 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
>  		goto err_bad;
>  	case SVC_DENIED:
>  		goto err_bad_auth;
> +	case SVC_CLOSE:
> +		if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
> +			svc_close_xprt(rqstp->rq_xprt);

There are also dropit's later in svc_process_common when xdr encoding
fails.  I wonder if we should close there?

Well, it's an odd case.  Seems like it should almost be declared a
programming error and made a WARN().

Applying as is.--b.

>  	case SVC_DROP:
>  		goto dropit;
>  	case SVC_COMPLETE:
> diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
> index 2073116..e91b550 100644
> --- a/net/sunrpc/svcauth_unix.c
> +++ b/net/sunrpc/svcauth_unix.c
> @@ -674,6 +674,8 @@ static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp)
>  	switch (ret) {
>  	case -ENOENT:
>  		return ERR_PTR(-ENOENT);
> +	case -ETIMEDOUT:
> +		return ERR_PTR(-ESHUTDOWN);
>  	case 0:
>  		gi = get_group_info(ug->gi);
>  		cache_put(&ug->h, &unix_gid_cache);
> @@ -720,8 +722,9 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
>  	switch (cache_check(&ip_map_cache, &ipm->h, &rqstp->rq_chandle)) {
>  		default:
>  			BUG();
> -		case -EAGAIN:
>  		case -ETIMEDOUT:
> +			return SVC_CLOSE;
> +		case -EAGAIN:
>  			return SVC_DROP;
>  		case -ENOENT:
>  			return SVC_DENIED;
> @@ -736,6 +739,8 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
>  	switch (PTR_ERR(gi)) {
>  	case -EAGAIN:
>  		return SVC_DROP;
> +	case -ESHUTDOWN:
> +		return SVC_CLOSE;
>  	case -ENOENT:
>  		break;
>  	default:
> @@ -776,7 +781,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
>  	cred->cr_gid = (gid_t) -1;
>  	cred->cr_group_info = groups_alloc(0);
>  	if (cred->cr_group_info == NULL)
> -		return SVC_DROP; /* kmalloc failure - client must retry */
> +		return SVC_CLOSE; /* kmalloc failure - client must retry */
>  
>  	/* Put NULL verifier */
>  	svc_putnl(resv, RPC_AUTH_NULL);
> @@ -840,7 +845,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
>  		goto badcred;
>  	cred->cr_group_info = groups_alloc(slen);
>  	if (cred->cr_group_info == NULL)
> -		return SVC_DROP;
> +		return SVC_CLOSE;
>  	for (i = 0; i < slen; i++)
>  		GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
>  	if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
> 
> 

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 4/6] nfsd: disable deferral for NFSv4
       [not found]   ` <20100812070407.11459.2929.stgit-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
@ 2010-09-21 21:01     ` J. Bruce Fields
  0 siblings, 0 replies; 19+ messages in thread
From: J. Bruce Fields @ 2010-09-21 21:01 UTC (permalink / raw)
  To: NeilBrown; +Cc: linux-nfs

On Thu, Aug 12, 2010 at 05:04:07PM +1000, NeilBrown wrote:
> Now that a slight delay in getting a reply to an upcall doesn't
> require deferring of requests, request deferral for all NFSv4
> requests - the concept doesn't really fit with the v4 model.

Applied with a minor comment fix.

--b.

> 
> Signed-off-by: NeilBrown <neilb@suse.de>
> ---
>  fs/nfsd/nfs4proc.c |    4 ++--
>  1 files changed, 2 insertions(+), 2 deletions(-)
> 
> diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
> index 59ec449..fca3621 100644
> --- a/fs/nfsd/nfs4proc.c
> +++ b/fs/nfsd/nfs4proc.c
> @@ -1031,8 +1031,8 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
>  	resp->cstate.session = NULL;
>  	fh_init(&resp->cstate.current_fh, NFS4_FHSIZE);
>  	fh_init(&resp->cstate.save_fh, NFS4_FHSIZE);
> -	/* Use the deferral mechanism only for NFSv4.0 compounds */
> -	rqstp->rq_usedeferral = (args->minorversion == 0);
> +	/* Don't use the deferral mechanism NFSv4. */
> +	rqstp->rq_usedeferral = 0;
>  
>  	/*
>  	 * According to RFC3010, this takes precedence over all other errors.
> 
> 

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 2/6] nfsd/idmap: drop special request deferal in favour of improved default.
       [not found]   ` <20100812070406.11459.89468.stgit-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
@ 2010-09-21 21:08     ` J. Bruce Fields
  0 siblings, 0 replies; 19+ messages in thread
From: J. Bruce Fields @ 2010-09-21 21:08 UTC (permalink / raw)
  To: NeilBrown; +Cc: linux-nfs

On Thu, Aug 12, 2010 at 05:04:06PM +1000, NeilBrown wrote:
> The idmap code manages request deferal by waiting for a reply from
> userspace rather than putting the NFS request on a queue to be retried
> from the start.
> Now that the common deferal code does this there is no need for the
> special code in idmap.

Applied (with minor fixup to get it to apply after seconds-since-boot
change).

> Signed-off-by: NeilBrown <neilb@suse.de>
> ---
>  fs/nfsd/nfs4idmap.c |  105 +++++----------------------------------------------
>  1 files changed, 11 insertions(+), 94 deletions(-)

And yay for that diffstat....

--b.

> 
> diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
> index c78dbf4..f0695e8 100644
> --- a/fs/nfsd/nfs4idmap.c
> +++ b/fs/nfsd/nfs4idmap.c
> @@ -482,109 +482,26 @@ nfsd_idmap_shutdown(void)
>  	cache_unregister(&nametoid_cache);
>  }
>  
> -/*
> - * Deferred request handling
> - */
> -
> -struct idmap_defer_req {
> -       struct cache_req		req;
> -       struct cache_deferred_req deferred_req;
> -       wait_queue_head_t	waitq;
> -       atomic_t			count;
> -};
> -
> -static inline void
> -put_mdr(struct idmap_defer_req *mdr)
> -{
> -	if (atomic_dec_and_test(&mdr->count))
> -		kfree(mdr);
> -}
> -
> -static inline void
> -get_mdr(struct idmap_defer_req *mdr)
> -{
> -	atomic_inc(&mdr->count);
> -}
> -
> -static void
> -idmap_revisit(struct cache_deferred_req *dreq, int toomany)
> -{
> -	struct idmap_defer_req *mdr =
> -		container_of(dreq, struct idmap_defer_req, deferred_req);
> -
> -	wake_up(&mdr->waitq);
> -	put_mdr(mdr);
> -}
> -
> -static struct cache_deferred_req *
> -idmap_defer(struct cache_req *req)
> -{
> -	struct idmap_defer_req *mdr =
> -		container_of(req, struct idmap_defer_req, req);
> -
> -	mdr->deferred_req.revisit = idmap_revisit;
> -	get_mdr(mdr);
> -	return (&mdr->deferred_req);
> -}
> -
> -static inline int
> -do_idmap_lookup(struct ent *(*lookup_fn)(struct ent *), struct ent *key,
> -		struct cache_detail *detail, struct ent **item,
> -		struct idmap_defer_req *mdr)
> -{
> -	*item = lookup_fn(key);
> -	if (!*item)
> -		return -ENOMEM;
> -	return cache_check(detail, &(*item)->h, &mdr->req);
> -}
> -
> -static inline int
> -do_idmap_lookup_nowait(struct ent *(*lookup_fn)(struct ent *),
> -			struct ent *key, struct cache_detail *detail,
> -			struct ent **item)
> -{
> -	int ret = -ENOMEM;
> -
> -	*item = lookup_fn(key);
> -	if (!*item)
> -		goto out_err;
> -	ret = -ETIMEDOUT;
> -	if (!test_bit(CACHE_VALID, &(*item)->h.flags)
> -			|| (*item)->h.expiry_time < get_seconds()
> -			|| detail->flush_time > (*item)->h.last_refresh)
> -		goto out_put;
> -	ret = -ENOENT;
> -	if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags))
> -		goto out_put;
> -	return 0;
> -out_put:
> -	cache_put(&(*item)->h, detail);
> -out_err:
> -	*item = NULL;
> -	return ret;
> -}
> -
>  static int
>  idmap_lookup(struct svc_rqst *rqstp,
>  		struct ent *(*lookup_fn)(struct ent *), struct ent *key,
>  		struct cache_detail *detail, struct ent **item)
>  {
> -	struct idmap_defer_req *mdr;
>  	int ret;
>  
> -	mdr = kzalloc(sizeof(*mdr), GFP_KERNEL);
> -	if (!mdr)
> +	*item = lookup_fn(key);
> +	if (!*item)
>  		return -ENOMEM;
> -	atomic_set(&mdr->count, 1);
> -	init_waitqueue_head(&mdr->waitq);
> -	mdr->req.defer = idmap_defer;
> -	ret = do_idmap_lookup(lookup_fn, key, detail, item, mdr);
> -	if (ret == -EAGAIN) {
> -		wait_event_interruptible_timeout(mdr->waitq,
> -			test_bit(CACHE_VALID, &(*item)->h.flags), 1 * HZ);
> -		ret = do_idmap_lookup_nowait(lookup_fn, key, detail, item);
> + retry:
> +	ret = cache_check(detail, &(*item)->h, &rqstp->rq_chandle);
> +
> +	if (ret == -ETIMEDOUT) {
> +		struct ent *prev_item = *item;
> +		*item = lookup_fn(key);
> +		if (*item != prev_item)
> +			goto retry;
> +		cache_put(&(*item)->h, detail);
>  	}
> -	put_mdr(mdr);
>  	return ret;
>  }
>  
> 
> 

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 3/6] sunrpc: close connection when a request is irretrievably lost.
  2010-09-21 20:53   ` J. Bruce Fields
@ 2010-09-21 23:37     ` Neil Brown
  2010-09-22  2:13       ` J. Bruce Fields
  0 siblings, 1 reply; 19+ messages in thread
From: Neil Brown @ 2010-09-21 23:37 UTC (permalink / raw)
  To: J. Bruce Fields; +Cc: linux-nfs

On Tue, 21 Sep 2010 16:53:43 -0400
"J. Bruce Fields" <bfields@fieldses.org> wrote:

> Apologies for the delay getting to the rest of these.
> 
> On Thu, Aug 12, 2010 at 05:04:07PM +1000, NeilBrown wrote:
> > If we drop a request in the sunrpc layer, either due kmalloc failure,
> > or due to a cache miss when we could not queue the request for later
> > replay, then close the connection to encourage the client to retry sooner.
> > 
> > Note that if the drop happens in the NFS layer, NFSERR_JUKEBOX
> > (aka NFS4ERR_DELAY) is returned to guide the client concerning
> > replay.
> 
> Looks fine, but:
> 
> > diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
> > index d9017d6..6359c42 100644
> > --- a/net/sunrpc/svc.c
> > +++ b/net/sunrpc/svc.c
> > @@ -1055,6 +1055,9 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
> >  		goto err_bad;
> >  	case SVC_DENIED:
> >  		goto err_bad_auth;
> > +	case SVC_CLOSE:
> > +		if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
> > +			svc_close_xprt(rqstp->rq_xprt);
> 
> There are also dropit's later in svc_process_common when xdr encoding
> fails.  I wonder if we should close there?
> 
> Well, it's an odd case.  Seems like it should almost be declared a
> programming error and made a WARN().
> 
> Applying as is.--b.

Thanks, I was wondering what had happened to these...

I cannot find them in your git though - not pushed yet, or am I looking in
the wrong branch (for-2.6.37 and nfsd-next seem the obvious choices).

For the other 'dropit' cases I think the svc_authorise failure is most likely
to be interesting.  That can only fail if svcauth_gss_wrap_resp_* fails.  I
don't really know what that means that ... maybe we should close the
connection in that case.

NeilBrown


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 3/6] sunrpc: close connection when a request is irretrievably lost.
  2010-09-21 23:37     ` Neil Brown
@ 2010-09-22  2:13       ` J. Bruce Fields
  0 siblings, 0 replies; 19+ messages in thread
From: J. Bruce Fields @ 2010-09-22  2:13 UTC (permalink / raw)
  To: Neil Brown; +Cc: linux-nfs

On Wed, Sep 22, 2010 at 09:37:25AM +1000, Neil Brown wrote:
> On Tue, 21 Sep 2010 16:53:43 -0400
> "J. Bruce Fields" <bfields@fieldses.org> wrote:
> 
> > Apologies for the delay getting to the rest of these.
> > 
> > On Thu, Aug 12, 2010 at 05:04:07PM +1000, NeilBrown wrote:
> > > If we drop a request in the sunrpc layer, either due kmalloc failure,
> > > or due to a cache miss when we could not queue the request for later
> > > replay, then close the connection to encourage the client to retry sooner.
> > > 
> > > Note that if the drop happens in the NFS layer, NFSERR_JUKEBOX
> > > (aka NFS4ERR_DELAY) is returned to guide the client concerning
> > > replay.
> > 
> > Looks fine, but:
> > 
> > > diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
> > > index d9017d6..6359c42 100644
> > > --- a/net/sunrpc/svc.c
> > > +++ b/net/sunrpc/svc.c
> > > @@ -1055,6 +1055,9 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
> > >  		goto err_bad;
> > >  	case SVC_DENIED:
> > >  		goto err_bad_auth;
> > > +	case SVC_CLOSE:
> > > +		if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
> > > +			svc_close_xprt(rqstp->rq_xprt);
> > 
> > There are also dropit's later in svc_process_common when xdr encoding
> > fails.  I wonder if we should close there?
> > 
> > Well, it's an odd case.  Seems like it should almost be declared a
> > programming error and made a WARN().
> > 
> > Applying as is.--b.
> 
> Thanks, I was wondering what had happened to these...
> 
> I cannot find them in your git though - not pushed yet, or am I looking in
> the wrong branch (for-2.6.37 and nfsd-next seem the obvious choices).

Yeah, that's the right place, apologies, I just tend to say "applied"
when I add them to my local tree, but then I wait till some tests have
run at least before actually pushing them out.

> For the other 'dropit' cases I think the svc_authorise failure is most likely
> to be interesting.  That can only fail if svcauth_gss_wrap_resp_* fails.  I
> don't really know what that means that ... maybe we should close the
> connection in that case.

If dropping the connection is our way of communicating "I've lost one of
your requests, you're never going to see the reply until you retry",
then perhaps the only time we should drop is when we've deferred.

(In which case, maybe s/SVC_DROP/SVC_DEFERRED/.)

Anyway, they're all pushed out now.  The last one required more fixing
up (thanks to my gratuitous cleanup patch), but I think it's right....

--b.

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 1.5/6] Fix race in new request delay code.
  2010-09-21  8:35       ` Neil Brown
@ 2010-09-22  2:15         ` J. Bruce Fields
  0 siblings, 0 replies; 19+ messages in thread
From: J. Bruce Fields @ 2010-09-22  2:15 UTC (permalink / raw)
  To: Neil Brown; +Cc: linux-nfs

On Tue, Sep 21, 2010 at 06:35:21PM +1000, Neil Brown wrote:
> On Mon, 30 Aug 2010 09:36:08 +1000
> Neil Brown <neilb@suse.de> wrote:
> 
> > On Thu, 26 Aug 2010 17:08:00 -0400
> > "J. Bruce Fields" <bfields@fieldses.org> wrote:
> > 
> > > On Tue, Aug 17, 2010 at 03:15:09PM +1000, NeilBrown wrote:
> > > > If the 'wait_for_completion_interruptible_timeout' completes due
> > > > to interrupt or timeout, just before cache_revisit request gets around
> > > > to calling ->revisit and thus the completion, we have a race with bad
> > > > consequences.
> > > > 
> > > > cache_defer_req will see that sleeper.handle.hash is empty (because
> > > > cache_revisit_request has already done the list_del_init), and that
> > > > CACHE_PENDING is clear, and so will exit that function leaving any
> > > > local variables such as 'sleeper' undefined.
> > > > 
> > > > Meanwhile cache_revisit_request could delete sleeper.handle.recent
> > > > from the 'pending' list, and call sleeper.hande.revisit, any of which
> > > > could have new garbage values and so could trigger a BUG.
> > > 
> > > Thanks, this looks correct to me!
> > > 
> > > I wish it were simpler, but don't see how right now.
> > 
> > I think the way to make it simpler is to get rid of the deferral altogether.
> > The more I think about it the less I like it :-)
> 
> I was in the process of doing this "getting rid of deferral" when I
> discovered that lockd now uses deferral too (and has done for 4 years! I am
> out of touch!!).  This is for NFS exporting cluster filesystems where 'lock'
> might take a while, but returns a 'grant'.
> 
> As lockd is single-threads we cannot just let it wait for the grant
> in-process.
> 
> So I guess I cannot rip all that code out after all :-(

Yeah, I'd forgotten about that.  Eventually perhaps we can fix lockd....

--b.

^ permalink raw reply	[flat|nested] 19+ messages in thread

end of thread, other threads:[~2010-09-22  2:16 UTC | newest]

Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-08-12  7:04 [PATCH 0/6] Cache deferral improvements - try N+2 NeilBrown
2010-08-12  7:04 ` [PATCH 1/6] sunrpc/cache: allow threads to block while waiting for cache update NeilBrown
2010-08-12  7:04 ` [PATCH 2/6] nfsd/idmap: drop special request deferal in favour of improved default NeilBrown
     [not found]   ` <20100812070406.11459.89468.stgit-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2010-09-21 21:08     ` J. Bruce Fields
2010-08-12  7:04 ` [PATCH 3/6] sunrpc: close connection when a request is irretrievably lost NeilBrown
2010-09-21 20:53   ` J. Bruce Fields
2010-09-21 23:37     ` Neil Brown
2010-09-22  2:13       ` J. Bruce Fields
2010-08-12  7:04 ` [PATCH 5/6] svcauth_gss: replace a trivial 'switch' with an 'if' NeilBrown
2010-08-12  7:04 ` [PATCH 4/6] nfsd: disable deferral for NFSv4 NeilBrown
     [not found]   ` <20100812070407.11459.2929.stgit-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2010-09-21 21:01     ` J. Bruce Fields
2010-08-12  7:04 ` [PATCH 6/6] sunrpc/cache: change deferred-request hash table to use hlist NeilBrown
     [not found] ` <20100812065722.11459.18978.stgit-bi+AKbBUZKY6gyzm1THtWbp2dZbC/Bob@public.gmane.org>
2010-08-12 12:03   ` [PATCH 0/6] Cache deferral improvements - try N+2 J. Bruce Fields
2010-08-17  5:15 ` [PATCH 1.5/6] Fix race in new request delay code NeilBrown
2010-08-26 21:08   ` J. Bruce Fields
2010-08-29 23:36     ` Neil Brown
2010-09-01 11:31       ` J. Bruce Fields
2010-09-21  8:35       ` Neil Brown
2010-09-22  2:15         ` J. Bruce Fields

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.