linux-nfs.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/4] Refactor data structures to support NFSv4 migration
@ 2010-12-23 21:54 Chuck Lever
  2010-12-23 21:54 ` [PATCH 1/4] NFS: Allow walking nfs_client.cl_superblocks list outside client.c Chuck Lever
                   ` (3 more replies)
  0 siblings, 4 replies; 11+ messages in thread
From: Chuck Lever @ 2010-12-23 21:54 UTC (permalink / raw)
  To: trond.myklebust; +Cc: linux-nfs

Trond-

Here are four patches that move fields related to NFSv4 state from the
nfs_client struct to the nfs_server struct, in order to facilitate the
eventual implementation of NFSv4 migration.

This series should address all recent review comments.  It passes my
simple tests here:  Full cthon suite passes over NFSv4 against an
OpenSolaris 2009.6 NFS server, and a full -j3 kernel build on NFSv4
completes without hangs or lockdep splats on a two-way client.  The
kernel build usually generates some delegation activity with this
Solaris server.

---

Chuck Lever (4):
      NFS: Move cl_delegations to the nfs_server struct
      NFS: Introduce nfs_detach_delegations()
      NFS: Move cl_state_owners and related fields to the nfs_server struct
      NFS: Allow walking nfs_client.cl_superblocks list outside client.c


 fs/nfs/client.c           |   48 +++---
 fs/nfs/delegation.c       |  362 ++++++++++++++++++++++++++++++++-------------
 fs/nfs/delegation.h       |    1 
 fs/nfs/nfs4_fs.h          |    2 
 fs/nfs/nfs4renewd.c       |   11 +
 fs/nfs/nfs4state.c        |  239 +++++++++++++++++++++---------
 include/linux/nfs_fs_sb.h |   11 +
 7 files changed, 474 insertions(+), 200 deletions(-)

-- 
Chuck Lever

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 1/4] NFS: Allow walking nfs_client.cl_superblocks list outside client.c
  2010-12-23 21:54 [PATCH 0/4] Refactor data structures to support NFSv4 migration Chuck Lever
@ 2010-12-23 21:54 ` Chuck Lever
  2010-12-23 21:54 ` [PATCH 2/4] NFS: Move cl_state_owners and related fields to the nfs_server struct Chuck Lever
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 11+ messages in thread
From: Chuck Lever @ 2010-12-23 21:54 UTC (permalink / raw)
  To: trond.myklebust; +Cc: linux-nfs

We're about to move some fields from struct nfs_client to struct
nfs_server.  There is a many-to-one relationship between nfs_servers
and nfs_clients.  After these fields are moved to the nfs_server
struct, to visit all of the data in these fields that is owned by one
nfs_client, code will need to visit each nfs_server on the
cl_superblocks list for that nfs_client.

To serialize changes to the cl_superblocks list during these little
expeditions, protect the list with RCU.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/client.c     |   44 +++++++++++++++++++++++++-------------------
 fs/nfs/nfs4renewd.c |    9 +++++++--
 2 files changed, 32 insertions(+), 21 deletions(-)

diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 0870d0d..05e2ee2 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -988,6 +988,27 @@ static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_serve
 	target->options = source->options;
 }
 
+static void nfs_server_insert_lists(struct nfs_server *server)
+{
+	struct nfs_client *clp = server->nfs_client;
+
+	spin_lock(&nfs_client_lock);
+	list_add_tail_rcu(&server->client_link, &clp->cl_superblocks);
+	list_add_tail(&server->master_link, &nfs_volume_list);
+	spin_unlock(&nfs_client_lock);
+
+}
+
+static void nfs_server_remove_lists(struct nfs_server *server)
+{
+	spin_lock(&nfs_client_lock);
+	list_del_rcu(&server->client_link);
+	list_del(&server->master_link);
+	spin_unlock(&nfs_client_lock);
+
+	synchronize_rcu();
+}
+
 /*
  * Allocate and initialise a server record
  */
@@ -1029,11 +1050,8 @@ void nfs_free_server(struct nfs_server *server)
 {
 	dprintk("--> nfs_free_server()\n");
 
+	nfs_server_remove_lists(server);
 	unset_pnfs_layoutdriver(server);
-	spin_lock(&nfs_client_lock);
-	list_del(&server->client_link);
-	list_del(&server->master_link);
-	spin_unlock(&nfs_client_lock);
 
 	if (server->destroy != NULL)
 		server->destroy(server);
@@ -1108,11 +1126,7 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
 		(unsigned long long) server->fsid.major,
 		(unsigned long long) server->fsid.minor);
 
-	spin_lock(&nfs_client_lock);
-	list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
-	list_add_tail(&server->master_link, &nfs_volume_list);
-	spin_unlock(&nfs_client_lock);
-
+	nfs_server_insert_lists(server);
 	server->mount_time = jiffies;
 	nfs_free_fattr(fattr);
 	return server;
@@ -1342,11 +1356,7 @@ static int nfs4_server_common_setup(struct nfs_server *server,
 	if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
 		server->namelen = NFS4_MAXNAMLEN;
 
-	spin_lock(&nfs_client_lock);
-	list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
-	list_add_tail(&server->master_link, &nfs_volume_list);
-	spin_unlock(&nfs_client_lock);
-
+	nfs_server_insert_lists(server);
 	server->mount_time = jiffies;
 out:
 	nfs_free_fattr(fattr);
@@ -1551,11 +1561,7 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
 	if (error < 0)
 		goto out_free_server;
 
-	spin_lock(&nfs_client_lock);
-	list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
-	list_add_tail(&server->master_link, &nfs_volume_list);
-	spin_unlock(&nfs_client_lock);
-
+	nfs_server_insert_lists(server);
 	server->mount_time = jiffies;
 
 	nfs_free_fattr(fattr_fsinfo);
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 72b6c58..cde5650 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -63,9 +63,14 @@ nfs4_renew_state(struct work_struct *work)
 
 	ops = clp->cl_mvops->state_renewal_ops;
 	dprintk("%s: start\n", __func__);
-	/* Are there any active superblocks? */
-	if (list_empty(&clp->cl_superblocks))
+
+	rcu_read_lock();
+	if (list_empty(&clp->cl_superblocks)) {
+		rcu_read_unlock();
 		goto out;
+	}
+	rcu_read_unlock();
+
 	spin_lock(&clp->cl_lock);
 	lease = clp->cl_lease_time;
 	last = clp->cl_last_renewal;


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2/4] NFS: Move cl_state_owners and related fields to the nfs_server struct
  2010-12-23 21:54 [PATCH 0/4] Refactor data structures to support NFSv4 migration Chuck Lever
  2010-12-23 21:54 ` [PATCH 1/4] NFS: Allow walking nfs_client.cl_superblocks list outside client.c Chuck Lever
@ 2010-12-23 21:54 ` Chuck Lever
  2010-12-23 22:04   ` Trond Myklebust
  2010-12-23 21:54 ` [PATCH 3/4] NFS: Introduce nfs_detach_delegations() Chuck Lever
  2010-12-23 21:54 ` [PATCH 4/4] NFS: Move cl_delegations to the nfs_server struct Chuck Lever
  3 siblings, 1 reply; 11+ messages in thread
From: Chuck Lever @ 2010-12-23 21:54 UTC (permalink / raw)
  To: trond.myklebust; +Cc: linux-nfs

NFSv4 migration needs to reassociate state owners from the source to
the destination nfs_server data structures.  To make that easier, move
the cl_state_owners field to the nfs_server struct.  cl_openowner_id
and cl_lockowner_id accompany this move, as they are used in
conjunction with cl_state_owners.

The cl_lock field in the parent nfs_client continues to protect all
three of these fields.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/nfs4_fs.h          |    2 
 fs/nfs/nfs4state.c        |  239 ++++++++++++++++++++++++++++++++-------------
 include/linux/nfs_fs_sb.h |    9 +-
 3 files changed, 177 insertions(+), 73 deletions(-)

diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 7a6eecf..fa5c5d3 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -109,7 +109,7 @@ struct nfs_unique_id {
 struct nfs4_state_owner {
 	struct nfs_unique_id so_owner_id;
 	struct nfs_server    *so_server;
-	struct rb_node	     so_client_node;
+	struct rb_node	     so_server_node;
 
 	struct rpc_cred	     *so_cred;	 /* Associated cred */
 
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index f575a31..fbcfe72 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -105,14 +105,17 @@ static void nfs4_clear_machine_cred(struct nfs_client *clp)
 		put_rpccred(cred);
 }
 
-struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
+static struct rpc_cred *
+nfs4_get_renew_cred_server_locked(struct nfs_server *server)
 {
+	struct rpc_cred *cred = NULL;
 	struct nfs4_state_owner *sp;
 	struct rb_node *pos;
-	struct rpc_cred *cred = NULL;
 
-	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
-		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
+	for (pos = rb_first(&server->state_owners);
+	     pos != NULL;
+	     pos = rb_next(pos)) {
+		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
 		if (list_empty(&sp->so_states))
 			continue;
 		cred = get_rpccred(sp->so_cred);
@@ -121,6 +124,28 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
 	return cred;
 }
 
+/**
+ * nfs4_get_renew_cred_locked - Acquire credential for a renew operation
+ * @clp: client state handle
+ *
+ * Returns an rpc_cred with reference count bumped, or NULL.
+ * Caller must hold clp->cl_lock.
+ */
+struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
+{
+	struct rpc_cred *cred = NULL;
+	struct nfs_server *server;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		cred = nfs4_get_renew_cred_server_locked(server);
+		if (cred != NULL)
+			break;
+	}
+	rcu_read_unlock();
+	return cred;
+}
+
 #if defined(CONFIG_NFS_V4_1)
 
 static int nfs41_setup_state_renewal(struct nfs_client *clp)
@@ -210,21 +235,45 @@ struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
 
 #endif /* CONFIG_NFS_V4_1 */
 
-struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
+static struct rpc_cred *
+nfs4_get_setclientid_cred_server_locked(struct nfs_server *server)
 {
+	struct rpc_cred *cred = NULL;
 	struct nfs4_state_owner *sp;
 	struct rb_node *pos;
+
+	pos = rb_first(&server->state_owners);
+	if (pos != NULL) {
+		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
+		cred = get_rpccred(sp->so_cred);
+	}
+	return cred;
+}
+
+/**
+ * nfs4_get_setclientid_cred - Acquire credential for a setclientid operation
+ * @clp: client state handle
+ *
+ * Returns an rpc_cred with reference count bumped, or NULL.
+ */
+struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
+{
+	struct nfs_server *server;
 	struct rpc_cred *cred;
 
 	spin_lock(&clp->cl_lock);
 	cred = nfs4_get_machine_cred_locked(clp);
 	if (cred != NULL)
 		goto out;
-	pos = rb_first(&clp->cl_state_owners);
-	if (pos != NULL) {
-		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
-		cred = get_rpccred(sp->so_cred);
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		cred = nfs4_get_setclientid_cred_server_locked(server);
+		if (cred != NULL)
+			break;
 	}
+	rcu_read_unlock();
+
 out:
 	spin_unlock(&clp->cl_lock);
 	return cred;
@@ -286,16 +335,15 @@ static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id)
 }
 
 static struct nfs4_state_owner *
-nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
+nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred)
 {
-	struct nfs_client *clp = server->nfs_client;
-	struct rb_node **p = &clp->cl_state_owners.rb_node,
+	struct rb_node **p = &server->state_owners.rb_node,
 		       *parent = NULL;
 	struct nfs4_state_owner *sp, *res = NULL;
 
 	while (*p != NULL) {
 		parent = *p;
-		sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
+		sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
 
 		if (server < sp->so_server) {
 			p = &parent->rb_left;
@@ -319,24 +367,17 @@ nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
 }
 
 static struct nfs4_state_owner *
-nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
+nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
 {
-	struct rb_node **p = &clp->cl_state_owners.rb_node,
+	struct nfs_server *server = new->so_server;
+	struct rb_node **p = &server->state_owners.rb_node,
 		       *parent = NULL;
 	struct nfs4_state_owner *sp;
 
 	while (*p != NULL) {
 		parent = *p;
-		sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
+		sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
 
-		if (new->so_server < sp->so_server) {
-			p = &parent->rb_left;
-			continue;
-		}
-		if (new->so_server > sp->so_server) {
-			p = &parent->rb_right;
-			continue;
-		}
 		if (new->so_cred < sp->so_cred)
 			p = &parent->rb_left;
 		else if (new->so_cred > sp->so_cred)
@@ -346,18 +387,20 @@ nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
 			return sp;
 		}
 	}
-	nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64);
-	rb_link_node(&new->so_client_node, parent, p);
-	rb_insert_color(&new->so_client_node, &clp->cl_state_owners);
+	nfs_alloc_unique_id(&server->openowner_id, &new->so_owner_id, 1, 64);
+	rb_link_node(&new->so_server_node, parent, p);
+	rb_insert_color(&new->so_server_node, &server->state_owners);
 	return new;
 }
 
 static void
-nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp)
+nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp)
 {
-	if (!RB_EMPTY_NODE(&sp->so_client_node))
-		rb_erase(&sp->so_client_node, &clp->cl_state_owners);
-	nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id);
+	struct nfs_server *server = sp->so_server;
+
+	if (!RB_EMPTY_NODE(&sp->so_server_node))
+		rb_erase(&sp->so_server_node, &server->state_owners);
+	nfs_free_unique_id(&server->openowner_id, &sp->so_owner_id);
 }
 
 /*
@@ -386,23 +429,32 @@ nfs4_alloc_state_owner(void)
 static void
 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
 {
-	if (!RB_EMPTY_NODE(&sp->so_client_node)) {
-		struct nfs_client *clp = sp->so_server->nfs_client;
+	if (!RB_EMPTY_NODE(&sp->so_server_node)) {
+		struct nfs_server *server = sp->so_server;
+		struct nfs_client *clp = server->nfs_client;
 
 		spin_lock(&clp->cl_lock);
-		rb_erase(&sp->so_client_node, &clp->cl_state_owners);
-		RB_CLEAR_NODE(&sp->so_client_node);
+		rb_erase(&sp->so_server_node, &server->state_owners);
+		RB_CLEAR_NODE(&sp->so_server_node);
 		spin_unlock(&clp->cl_lock);
 	}
 }
 
-struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
+/**
+ * nfs4_get_state_owner - Look up a state owner given a credential
+ * @server: nfs_server to search
+ * @cred: RPC credential to match
+ *
+ * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL.
+ */
+struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
+					      struct rpc_cred *cred)
 {
 	struct nfs_client *clp = server->nfs_client;
 	struct nfs4_state_owner *sp, *new;
 
 	spin_lock(&clp->cl_lock);
-	sp = nfs4_find_state_owner(server, cred);
+	sp = nfs4_find_state_owner_locked(server, cred);
 	spin_unlock(&clp->cl_lock);
 	if (sp != NULL)
 		return sp;
@@ -412,7 +464,7 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct
 	new->so_server = server;
 	new->so_cred = cred;
 	spin_lock(&clp->cl_lock);
-	sp = nfs4_insert_state_owner(clp, new);
+	sp = nfs4_insert_state_owner_locked(new);
 	spin_unlock(&clp->cl_lock);
 	if (sp == new)
 		get_rpccred(cred);
@@ -423,6 +475,11 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct
 	return sp;
 }
 
+/**
+ * nfs4_put_state_owner - Release a nfs4_state_owner
+ * @sp: state owner data to release
+ *
+ */
 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
 {
 	struct nfs_client *clp = sp->so_server->nfs_client;
@@ -430,7 +487,7 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
 
 	if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
 		return;
-	nfs4_remove_state_owner(clp, sp);
+	nfs4_remove_state_owner_locked(sp);
 	spin_unlock(&clp->cl_lock);
 	rpc_destroy_wait_queue(&sp->so_sequence.wait);
 	put_rpccred(cred);
@@ -633,7 +690,8 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_p
 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
 {
 	struct nfs4_lock_state *lsp;
-	struct nfs_client *clp = state->owner->so_server->nfs_client;
+	struct nfs_server *server = state->owner->so_server;
+	struct nfs_client *clp = server->nfs_client;
 
 	lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
 	if (lsp == NULL)
@@ -657,7 +715,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
 		return NULL;
 	}
 	spin_lock(&clp->cl_lock);
-	nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
+	nfs_alloc_unique_id(&server->lockowner_id, &lsp->ls_id, 1, 64);
 	spin_unlock(&clp->cl_lock);
 	INIT_LIST_HEAD(&lsp->ls_locks);
 	return lsp;
@@ -665,10 +723,11 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
 
 static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
 {
-	struct nfs_client *clp = lsp->ls_state->owner->so_server->nfs_client;
+	struct nfs_server *server = lsp->ls_state->owner->so_server;
+	struct nfs_client *clp = server->nfs_client;
 
 	spin_lock(&clp->cl_lock);
-	nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
+	nfs_free_unique_id(&server->lockowner_id, &lsp->ls_id);
 	spin_unlock(&clp->cl_lock);
 	rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
 	kfree(lsp);
@@ -1114,25 +1173,40 @@ static void nfs4_clear_open_state(struct nfs4_state *state)
 	}
 }
 
-static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
+static void nfs4_reset_seqids_locked(struct nfs_server *server,
+	int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
 {
 	struct nfs4_state_owner *sp;
 	struct rb_node *pos;
 	struct nfs4_state *state;
 
-	/* Reset all sequence ids to zero */
-	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
-		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
+	for (pos = rb_first(&server->state_owners);
+	     pos != NULL;
+	     pos = rb_next(pos)) {
+		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
 		sp->so_seqid.flags = 0;
 		spin_lock(&sp->so_lock);
 		list_for_each_entry(state, &sp->so_states, open_states) {
-			if (mark_reclaim(clp, state))
+			if (mark_reclaim(server->nfs_client, state))
 				nfs4_clear_open_state(state);
 		}
 		spin_unlock(&sp->so_lock);
 	}
 }
 
+static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp,
+	int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
+{
+	struct nfs_server *server;
+
+	spin_lock(&clp->cl_lock);
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs4_reset_seqids_locked(server, mark_reclaim);
+	rcu_read_unlock();
+	spin_unlock(&clp->cl_lock);
+}
+
 static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
 {
 	/* Mark all delegations for reclaim */
@@ -1148,26 +1222,42 @@ static void nfs4_reclaim_complete(struct nfs_client *clp,
 		(void)ops->reclaim_complete(clp);
 }
 
-static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
+static void nfs4_clear_reclaim_server_locked(struct nfs_server *server)
 {
 	struct nfs4_state_owner *sp;
 	struct rb_node *pos;
 	struct nfs4_state *state;
 
-	if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
-		return 0;
-
-	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
-		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
+	for (pos = rb_first(&server->state_owners);
+	     pos != NULL;
+	     pos = rb_next(pos)) {
+		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
 		spin_lock(&sp->so_lock);
 		list_for_each_entry(state, &sp->so_states, open_states) {
-			if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags))
+			if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT,
+						&state->flags))
 				continue;
-			nfs4_state_mark_reclaim_nograce(clp, state);
+			nfs4_state_mark_reclaim_nograce(server->nfs_client,
+							state);
 		}
 		spin_unlock(&sp->so_lock);
 	}
 
+}
+
+static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
+{
+	struct nfs_server *server;
+
+	if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
+		return 0;
+
+	spin_lock(&clp->cl_lock);
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs4_clear_reclaim_server_locked(server);
+	rcu_read_unlock();
+	spin_unlock(&clp->cl_lock);
 	nfs_delegation_reap_unclaimed(clp);
 	return 1;
 }
@@ -1238,26 +1328,39 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
 
 static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
 {
+	struct nfs4_state_owner *sp;
+	struct nfs_server *server;
 	struct rb_node *pos;
 	int status = 0;
 
 restart:
 	spin_lock(&clp->cl_lock);
-	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
-		struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
-		if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags))
-			continue;
-		atomic_inc(&sp->so_count);
-		spin_unlock(&clp->cl_lock);
-		status = nfs4_reclaim_open_state(sp, ops);
-		if (status < 0) {
-			set_bit(ops->owner_flag_bit, &sp->so_flags);
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		for (pos = rb_first(&server->state_owners);
+		     pos != NULL;
+		     pos = rb_next(pos)) {
+			sp = rb_entry(pos,
+				struct nfs4_state_owner, so_server_node);
+			if (!test_and_clear_bit(ops->owner_flag_bit,
+							&sp->so_flags))
+				continue;
+			atomic_inc(&sp->so_count);
+			rcu_read_unlock();
+			spin_unlock(&clp->cl_lock);
+
+			status = nfs4_reclaim_open_state(sp, ops);
+			if (status < 0) {
+				set_bit(ops->owner_flag_bit, &sp->so_flags);
+				nfs4_put_state_owner(sp);
+				return nfs4_recovery_handle_error(clp, status);
+			}
+
 			nfs4_put_state_owner(sp);
-			return nfs4_recovery_handle_error(clp, status);
+			goto restart;
 		}
-		nfs4_put_state_owner(sp);
-		goto restart;
 	}
+	rcu_read_unlock();
 	spin_unlock(&clp->cl_lock);
 	return status;
 }
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 452d964..e96ec55 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -47,11 +47,7 @@ struct nfs_client {
 	u64			cl_clientid;	/* constant */
 	unsigned long		cl_state;
 
-	struct rb_root		cl_openowner_id;
-	struct rb_root		cl_lockowner_id;
-
 	struct list_head	cl_delegations;
-	struct rb_root		cl_state_owners;
 	spinlock_t		cl_lock;
 
 	unsigned long		cl_lease_time;
@@ -148,6 +144,11 @@ struct nfs_server {
 						   that are supported on this
 						   filesystem */
 	struct pnfs_layoutdriver_type  *pnfs_curr_ld; /* Active layout driver */
+
+	/* the following fields are protected by nfs_client->cl_lock */
+	struct rb_root		state_owners;
+	struct rb_root		openowner_id;
+	struct rb_root		lockowner_id;
 #endif
 	void (*destroy)(struct nfs_server *);
 


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 3/4] NFS: Introduce nfs_detach_delegations()
  2010-12-23 21:54 [PATCH 0/4] Refactor data structures to support NFSv4 migration Chuck Lever
  2010-12-23 21:54 ` [PATCH 1/4] NFS: Allow walking nfs_client.cl_superblocks list outside client.c Chuck Lever
  2010-12-23 21:54 ` [PATCH 2/4] NFS: Move cl_state_owners and related fields to the nfs_server struct Chuck Lever
@ 2010-12-23 21:54 ` Chuck Lever
  2010-12-23 21:54 ` [PATCH 4/4] NFS: Move cl_delegations to the nfs_server struct Chuck Lever
  3 siblings, 0 replies; 11+ messages in thread
From: Chuck Lever @ 2010-12-23 21:54 UTC (permalink / raw)
  To: trond.myklebust; +Cc: linux-nfs

Clean up:  Refactor code that takes clp->cl_lock and calls
nfs_detach_delegations_locked() into its own function.

While we're changing the call sites, get rid of the second parameter
and the logic in nfs_detach_delegations_locked() that uses it, since
callers always set that parameter of nfs_detach_delegations_locked()
to NULL.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/delegation.c |   41 ++++++++++++++++++++---------------------
 1 files changed, 20 insertions(+), 21 deletions(-)

diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 1fd62fc..521d71b 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -175,9 +175,9 @@ static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation
 	return inode;
 }
 
-static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi,
-							   const nfs4_stateid *stateid,
-							   struct nfs_client *clp)
+static struct nfs_delegation *
+nfs_detach_delegation_locked(struct nfs_inode *nfsi,
+			     struct nfs_client *clp)
 {
 	struct nfs_delegation *delegation =
 		rcu_dereference_protected(nfsi->delegation,
@@ -185,22 +185,29 @@ static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfs
 
 	if (delegation == NULL)
 		goto nomatch;
+
 	spin_lock(&delegation->lock);
-	if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
-				sizeof(delegation->stateid.data)) != 0)
-		goto nomatch_unlock;
 	list_del_rcu(&delegation->super_list);
 	delegation->inode = NULL;
 	nfsi->delegation_state = 0;
 	rcu_assign_pointer(nfsi->delegation, NULL);
 	spin_unlock(&delegation->lock);
 	return delegation;
-nomatch_unlock:
-	spin_unlock(&delegation->lock);
 nomatch:
 	return NULL;
 }
 
+static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi,
+						    struct nfs_client *clp)
+{
+	struct nfs_delegation *delegation;
+
+	spin_lock(&clp->cl_lock);
+	delegation = nfs_detach_delegation_locked(nfsi, clp);
+	spin_unlock(&clp->cl_lock);
+	return delegation;
+}
+
 /*
  * Set up a delegation on an inode
  */
@@ -246,7 +253,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
 			delegation = NULL;
 			goto out;
 		}
-		freeme = nfs_detach_delegation_locked(nfsi, NULL, clp);
+		freeme = nfs_detach_delegation_locked(nfsi, clp);
 	}
 	list_add_rcu(&delegation->super_list, &clp->cl_delegations);
 	nfsi->delegation_state = delegation->type;
@@ -307,9 +314,7 @@ restart:
 		inode = nfs_delegation_grab_inode(delegation);
 		if (inode == NULL)
 			continue;
-		spin_lock(&clp->cl_lock);
-		delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp);
-		spin_unlock(&clp->cl_lock);
+		delegation = nfs_detach_delegation(NFS_I(inode), clp);
 		rcu_read_unlock();
 		if (delegation != NULL) {
 			filemap_flush(inode->i_mapping);
@@ -338,9 +343,7 @@ void nfs_inode_return_delegation_noreclaim(struct inode *inode)
 	struct nfs_delegation *delegation;
 
 	if (rcu_access_pointer(nfsi->delegation) != NULL) {
-		spin_lock(&clp->cl_lock);
-		delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
-		spin_unlock(&clp->cl_lock);
+		delegation = nfs_detach_delegation(nfsi, clp);
 		if (delegation != NULL)
 			nfs_do_return_delegation(inode, delegation, 0);
 	}
@@ -354,9 +357,7 @@ int nfs_inode_return_delegation(struct inode *inode)
 	int err = 0;
 
 	if (rcu_access_pointer(nfsi->delegation) != NULL) {
-		spin_lock(&clp->cl_lock);
-		delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
-		spin_unlock(&clp->cl_lock);
+		delegation = nfs_detach_delegation(nfsi, clp);
 		if (delegation != NULL) {
 			nfs_wb_all(inode);
 			err = __nfs_inode_return_delegation(inode, delegation, 1);
@@ -530,9 +531,7 @@ restart:
 		inode = nfs_delegation_grab_inode(delegation);
 		if (inode == NULL)
 			continue;
-		spin_lock(&clp->cl_lock);
-		delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp);
-		spin_unlock(&clp->cl_lock);
+		delegation = nfs_detach_delegation(NFS_I(inode), clp);
 		rcu_read_unlock();
 		if (delegation != NULL)
 			nfs_free_delegation(delegation);


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 4/4] NFS: Move cl_delegations to the nfs_server struct
  2010-12-23 21:54 [PATCH 0/4] Refactor data structures to support NFSv4 migration Chuck Lever
                   ` (2 preceding siblings ...)
  2010-12-23 21:54 ` [PATCH 3/4] NFS: Introduce nfs_detach_delegations() Chuck Lever
@ 2010-12-23 21:54 ` Chuck Lever
  2010-12-23 22:09   ` Trond Myklebust
  3 siblings, 1 reply; 11+ messages in thread
From: Chuck Lever @ 2010-12-23 21:54 UTC (permalink / raw)
  To: trond.myklebust; +Cc: linux-nfs

Delegations are per-inode, not per-nfs_client.  When a server file
system is migrated, delegations on the client must be moved from the
source to the destination nfs_server.  Make it easier to manage a
mount point's delegation list across a migration event by moving the
list to the nfs_server struct.

Clean up: I added documenting comments to public functions I changed
in this patch.  For consistency I added comments to all the other
public functions in fs/nfs/delegation.c.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/client.c           |    4 -
 fs/nfs/delegation.c       |  337 +++++++++++++++++++++++++++++++++------------
 fs/nfs/delegation.h       |    1 
 fs/nfs/nfs4renewd.c       |    2 
 include/linux/nfs_fs_sb.h |    2 
 5 files changed, 253 insertions(+), 93 deletions(-)

diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 05e2ee2..d112887 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -144,7 +144,6 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
 	clp->cl_proto = cl_init->proto;
 
 #ifdef CONFIG_NFS_V4
-	INIT_LIST_HEAD(&clp->cl_delegations);
 	spin_lock_init(&clp->cl_lock);
 	INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
 	rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
@@ -1025,6 +1024,9 @@ static struct nfs_server *nfs_alloc_server(void)
 	/* Zero out the NFS state stuff */
 	INIT_LIST_HEAD(&server->client_link);
 	INIT_LIST_HEAD(&server->master_link);
+#ifdef CONFIG_NFS_V4
+	INIT_LIST_HEAD(&server->delegations);
+#endif
 
 	atomic_set(&server->active, 0);
 
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 521d71b..364e432 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -40,11 +40,23 @@ static void nfs_free_delegation(struct nfs_delegation *delegation)
 	call_rcu(&delegation->rcu, nfs_free_delegation_callback);
 }
 
+/**
+ * nfs_mark_delegation_referenced - set delegation's REFERENCED flag
+ * @delegation: delegation to process
+ *
+ */
 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
 {
 	set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
 }
 
+/**
+ * nfs_have_delegation - check if inode has a delegation
+ * @inode: inode to check
+ * @flags: delegation types to check for
+ *
+ * Returns one if inode has the indicated delegation, otherwise zero.
+ */
 int nfs_have_delegation(struct inode *inode, fmode_t flags)
 {
 	struct nfs_delegation *delegation;
@@ -119,10 +131,15 @@ again:
 	return 0;
 }
 
-/*
- * Set up a delegation on an inode
+/**
+ * nfs_inode_reclaim_delegation - process a delegation reclaim request
+ * @inode: inode to process
+ * @cred: credential to use for request
+ * @res: new delegation state from server
+ *
  */
-void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
+void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
+				  struct nfs_openres *res)
 {
 	struct nfs_delegation *delegation;
 	struct rpc_cred *oldcred = NULL;
@@ -177,11 +194,11 @@ static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation
 
 static struct nfs_delegation *
 nfs_detach_delegation_locked(struct nfs_inode *nfsi,
-			     struct nfs_client *clp)
+			     struct nfs_server *server)
 {
 	struct nfs_delegation *delegation =
 		rcu_dereference_protected(nfsi->delegation,
-					  lockdep_is_held(&clp->cl_lock));
+				lockdep_is_held(&server->nfs_client->cl_lock));
 
 	if (delegation == NULL)
 		goto nomatch;
@@ -198,22 +215,29 @@ nomatch:
 }
 
 static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi,
-						    struct nfs_client *clp)
+						    struct nfs_server *server)
 {
+	struct nfs_client *clp = server->nfs_client;
 	struct nfs_delegation *delegation;
 
 	spin_lock(&clp->cl_lock);
-	delegation = nfs_detach_delegation_locked(nfsi, clp);
+	delegation = nfs_detach_delegation_locked(nfsi, server);
 	spin_unlock(&clp->cl_lock);
 	return delegation;
 }
 
-/*
- * Set up a delegation on an inode
+/**
+ * nfs_inode_set_delegation - set up a delegation on an inode
+ * @inode: inode to which delegation applies
+ * @cred: cred to use for subsequent delegation processing
+ * @res: new delegation state from server
+ *
+ * Returns zero on success, or a negative errno value.
  */
 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
 {
-	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+	struct nfs_server *server = NFS_SERVER(inode);
+	struct nfs_client *clp = server->nfs_client;
 	struct nfs_inode *nfsi = NFS_I(inode);
 	struct nfs_delegation *delegation, *old_delegation;
 	struct nfs_delegation *freeme = NULL;
@@ -234,7 +258,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
 
 	spin_lock(&clp->cl_lock);
 	old_delegation = rcu_dereference_protected(nfsi->delegation,
-						   lockdep_is_held(&clp->cl_lock));
+					lockdep_is_held(&clp->cl_lock));
 	if (old_delegation != NULL) {
 		if (memcmp(&delegation->stateid, &old_delegation->stateid,
 					sizeof(old_delegation->stateid)) == 0 &&
@@ -253,9 +277,9 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
 			delegation = NULL;
 			goto out;
 		}
-		freeme = nfs_detach_delegation_locked(nfsi, clp);
+		freeme = nfs_detach_delegation_locked(nfsi, server);
 	}
-	list_add_rcu(&delegation->super_list, &clp->cl_delegations);
+	list_add_rcu(&delegation->super_list, &server->delegations);
 	nfsi->delegation_state = delegation->type;
 	rcu_assign_pointer(nfsi->delegation, delegation);
 	delegation = NULL;
@@ -297,67 +321,85 @@ out:
 	return err;
 }
 
-/*
- * Return all delegations that have been marked for return
+/**
+ * nfs_client_return_marked_delegations - return previously marked delegations
+ * @clp: nfs_client to process
+ *
+ * Returns zero on success, or a negative errno value.
  */
 int nfs_client_return_marked_delegations(struct nfs_client *clp)
 {
 	struct nfs_delegation *delegation;
+	struct nfs_server *server;
 	struct inode *inode;
 	int err = 0;
 
 restart:
 	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
-		if (!test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
-			continue;
-		inode = nfs_delegation_grab_inode(delegation);
-		if (inode == NULL)
-			continue;
-		delegation = nfs_detach_delegation(NFS_I(inode), clp);
-		rcu_read_unlock();
-		if (delegation != NULL) {
-			filemap_flush(inode->i_mapping);
-			err = __nfs_inode_return_delegation(inode, delegation, 0);
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		list_for_each_entry_rcu(delegation, &server->delegations,
+								super_list) {
+			if (!test_and_clear_bit(NFS_DELEGATION_RETURN,
+							&delegation->flags))
+				continue;
+			inode = nfs_delegation_grab_inode(delegation);
+			if (inode == NULL)
+				continue;
+			delegation = nfs_detach_delegation(NFS_I(inode),
+								server);
+			rcu_read_unlock();
+
+			if (delegation != NULL) {
+				filemap_flush(inode->i_mapping);
+				err = __nfs_inode_return_delegation(inode,
+								delegation, 0);
+			}
+			iput(inode);
+			if (!err)
+				goto restart;
+			set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
+			return err;
 		}
-		iput(inode);
-		if (!err)
-			goto restart;
-		set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
-		return err;
 	}
 	rcu_read_unlock();
 	return 0;
 }
 
-/*
- * This function returns the delegation without reclaiming opens
- * or protecting against delegation reclaims.
- * It is therefore really only safe to be called from
- * nfs4_clear_inode()
+/**
+ * nfs_inode_return_delegation_noreclaim - return delegation, don't reclaim opens
+ * @inode: inode to process
+ *
+ * Does not protect against delegation reclaims, therefore really only safe
+ * to be called from nfs4_clear_inode().
  */
 void nfs_inode_return_delegation_noreclaim(struct inode *inode)
 {
-	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+	struct nfs_server *server = NFS_SERVER(inode);
 	struct nfs_inode *nfsi = NFS_I(inode);
 	struct nfs_delegation *delegation;
 
 	if (rcu_access_pointer(nfsi->delegation) != NULL) {
-		delegation = nfs_detach_delegation(nfsi, clp);
+		delegation = nfs_detach_delegation(nfsi, server);
 		if (delegation != NULL)
 			nfs_do_return_delegation(inode, delegation, 0);
 	}
 }
 
+/**
+ * nfs_inode_return_delegation - synchronously return a delegation
+ * @inode: inode to process
+ *
+ * Returns zero on success, or a negative errno value.
+ */
 int nfs_inode_return_delegation(struct inode *inode)
 {
-	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+	struct nfs_server *server = NFS_SERVER(inode);
 	struct nfs_inode *nfsi = NFS_I(inode);
 	struct nfs_delegation *delegation;
 	int err = 0;
 
 	if (rcu_access_pointer(nfsi->delegation) != NULL) {
-		delegation = nfs_detach_delegation(nfsi, clp);
+		delegation = nfs_detach_delegation(nfsi, server);
 		if (delegation != NULL) {
 			nfs_wb_all(inode);
 			err = __nfs_inode_return_delegation(inode, delegation, 1);
@@ -366,46 +408,61 @@ int nfs_inode_return_delegation(struct inode *inode)
 	return err;
 }
 
-static void nfs_mark_return_delegation(struct nfs_client *clp, struct nfs_delegation *delegation)
+static void nfs_mark_return_delegation(struct nfs_delegation *delegation)
 {
+	struct nfs_client *clp = NFS_SERVER(delegation->inode)->nfs_client;
+
 	set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
 	set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
 }
 
-/*
- * Return all delegations associated to a super block
+/**
+ * nfs_super_return_all_delegations - return delegations for one superblock
+ * @sb: sb to process
+ *
  */
 void nfs_super_return_all_delegations(struct super_block *sb)
 {
-	struct nfs_client *clp = NFS_SB(sb)->nfs_client;
+	struct nfs_server *server = NFS_SB(sb);
+	struct nfs_client *clp = server->nfs_client;
 	struct nfs_delegation *delegation;
 
 	if (clp == NULL)
 		return;
+
 	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
 		spin_lock(&delegation->lock);
-		if (delegation->inode != NULL && delegation->inode->i_sb == sb)
-			set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
+		set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
 		spin_unlock(&delegation->lock);
 	}
 	rcu_read_unlock();
+
 	if (nfs_client_return_marked_delegations(clp) != 0)
 		nfs4_schedule_state_manager(clp);
 }
 
-static
-void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp, fmode_t flags)
+static void nfs_mark_return_all_delegation_types(struct nfs_server *server,
+						 fmode_t flags)
 {
 	struct nfs_delegation *delegation;
 
-	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
 		if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
 			continue;
 		if (delegation->type & flags)
-			nfs_mark_return_delegation(clp, delegation);
+			nfs_mark_return_delegation(delegation);
 	}
+}
+
+static void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp,
+							fmode_t flags)
+{
+	struct nfs_server *server;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs_mark_return_all_delegation_types(server, flags);
 	rcu_read_unlock();
 }
 
@@ -420,19 +477,32 @@ static void nfs_delegation_run_state_manager(struct nfs_client *clp)
 		nfs4_schedule_state_manager(clp);
 }
 
+/**
+ * nfs_expire_all_delegation_types
+ * @clp: client to process
+ * @flags: delegation types to expire
+ *
+ */
 void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags)
 {
 	nfs_client_mark_return_all_delegation_types(clp, flags);
 	nfs_delegation_run_state_manager(clp);
 }
 
+/**
+ * nfs_expire_all_delegations
+ * @clp: client to process
+ *
+ */
 void nfs_expire_all_delegations(struct nfs_client *clp)
 {
 	nfs_expire_all_delegation_types(clp, FMODE_READ|FMODE_WRITE);
 }
 
-/*
- * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
+/**
+ * nfs_handle_cb_pathdown - return all delegations after NFS4ERR_CB_PATH_DOWN
+ * @clp: client to process
+ *
  */
 void nfs_handle_cb_pathdown(struct nfs_client *clp)
 {
@@ -441,29 +511,43 @@ void nfs_handle_cb_pathdown(struct nfs_client *clp)
 	nfs_client_mark_return_all_delegations(clp);
 }
 
-static void nfs_client_mark_return_unreferenced_delegations(struct nfs_client *clp)
+static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server)
 {
 	struct nfs_delegation *delegation;
 
-	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
 		if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
 			continue;
-		nfs_mark_return_delegation(clp, delegation);
+		nfs_mark_return_delegation(delegation);
 	}
-	rcu_read_unlock();
 }
 
+/**
+ * nfs_expire_unreferenced_delegations - Eliminate unused delegations
+ * @clp: nfs_client to process
+ *
+ */
 void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
 {
-	nfs_client_mark_return_unreferenced_delegations(clp);
+	struct nfs_server *server;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs_mark_return_unreferenced_delegations(server);
+	rcu_read_unlock();
+
 	nfs_delegation_run_state_manager(clp);
 }
 
-/*
- * Asynchronous delegation recall!
+/**
+ * nfs_async_inode_return_delegation - asynchronously return a delegation
+ * @inode: inode to process
+ * @stateid: state ID information from CB_RECALL arguments
+ *
+ * Returns zero on success, or a negative errno value.
  */
-int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
+int nfs_async_inode_return_delegation(struct inode *inode,
+				      const nfs4_stateid *stateid)
 {
 	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
 	struct nfs_delegation *delegation;
@@ -475,22 +559,21 @@ int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *s
 		rcu_read_unlock();
 		return -ENOENT;
 	}
-
-	nfs_mark_return_delegation(clp, delegation);
+	nfs_mark_return_delegation(delegation);
 	rcu_read_unlock();
+
 	nfs_delegation_run_state_manager(clp);
 	return 0;
 }
 
-/*
- * Retrieve the inode associated with a delegation
- */
-struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle)
+static struct inode *
+nfs_delegation_find_inode_server(struct nfs_server *server,
+				 const struct nfs_fh *fhandle)
 {
 	struct nfs_delegation *delegation;
 	struct inode *res = NULL;
-	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
+
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
 		spin_lock(&delegation->lock);
 		if (delegation->inode != NULL &&
 		    nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
@@ -500,47 +583,121 @@ struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs
 		if (res != NULL)
 			break;
 	}
+	return res;
+}
+
+/**
+ * nfs_delegation_find_inode - retrieve the inode associated with a delegation
+ * @clp: client state handle
+ * @fhandle: filehandle from a delegation recall
+ *
+ * Returns pointer to inode matching "fhandle," or NULL if a matching inode
+ * cannot be found.
+ */
+struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
+					const struct nfs_fh *fhandle)
+{
+	struct nfs_server *server;
+	struct inode *res = NULL;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		res = nfs_delegation_find_inode_server(server, fhandle);
+		if (res != NULL)
+			break;
+	}
 	rcu_read_unlock();
 	return res;
 }
 
-/*
- * Mark all delegations as needing to be reclaimed
+static void nfs_delegation_mark_reclaim_server(struct nfs_server *server)
+{
+	struct nfs_delegation *delegation;
+
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list)
+		set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
+}
+
+/**
+ * nfs_delegation_mark_reclaim - mark all delegations as needing to be reclaimed
+ * @clp: nfs_client to process
+ *
  */
 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
 {
-	struct nfs_delegation *delegation;
+	struct nfs_server *server;
+
 	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list)
-		set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs_delegation_mark_reclaim_server(server);
 	rcu_read_unlock();
 }
 
-/*
- * Reap all unclaimed delegations after reboot recovery is done
+/**
+ * nfs_delegation_reap_unclaimed - reap unclaimed delegations after reboot recovery is done
+ * @clp: nfs_client to process
+ *
  */
 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
 {
 	struct nfs_delegation *delegation;
+	struct nfs_server *server;
 	struct inode *inode;
+
 restart:
 	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
-		if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0)
-			continue;
-		inode = nfs_delegation_grab_inode(delegation);
-		if (inode == NULL)
-			continue;
-		delegation = nfs_detach_delegation(NFS_I(inode), clp);
-		rcu_read_unlock();
-		if (delegation != NULL)
-			nfs_free_delegation(delegation);
-		iput(inode);
-		goto restart;
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		list_for_each_entry_rcu(delegation, &server->delegations,
+								super_list) {
+			if (test_bit(NFS_DELEGATION_NEED_RECLAIM,
+						&delegation->flags) == 0)
+				continue;
+			inode = nfs_delegation_grab_inode(delegation);
+			if (inode == NULL)
+				continue;
+			delegation = nfs_detach_delegation(NFS_I(inode),
+								server);
+			rcu_read_unlock();
+
+			if (delegation != NULL)
+				nfs_free_delegation(delegation);
+			iput(inode);
+			goto restart;
+		}
 	}
 	rcu_read_unlock();
 }
 
+/**
+ * nfs_delegations_present - check for existence of delegations
+ * @clp: client state handle
+ *
+ * Returns one if there are any nfs_delegation structures attached
+ * to this nfs_client.
+ */
+int nfs_delegations_present(struct nfs_client *clp)
+{
+	struct nfs_server *server;
+	int ret = 0;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		if (!list_empty(&server->delegations)) {
+			ret = 1;
+			break;
+		}
+	rcu_read_unlock();
+	return ret;
+}
+
+/**
+ * nfs4_copy_delegation_stateid - Copy inode's state ID information
+ * @dst: stateid data structure to fill in
+ * @inode: inode to check
+ *
+ * Returns one and fills in "dst->data" * if inode had a delegation,
+ * otherwise zero is returned.
+ */
 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
 {
 	struct nfs_inode *nfsi = NFS_I(inode);
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 2026304..d9322e4 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -44,6 +44,7 @@ void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags);
 void nfs_expire_unreferenced_delegations(struct nfs_client *clp);
 void nfs_handle_cb_pathdown(struct nfs_client *clp);
 int nfs_client_return_marked_delegations(struct nfs_client *clp);
+int nfs_delegations_present(struct nfs_client *clp);
 
 void nfs_delegation_mark_reclaim(struct nfs_client *clp);
 void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index cde5650..402143d 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -80,7 +80,7 @@ nfs4_renew_state(struct work_struct *work)
 		cred = ops->get_state_renewal_cred_locked(clp);
 		spin_unlock(&clp->cl_lock);
 		if (cred == NULL) {
-			if (list_empty(&clp->cl_delegations)) {
+			if (!nfs_delegations_present(clp)) {
 				set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
 				goto out;
 			}
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index e96ec55..2ddf7aa 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -47,7 +47,6 @@ struct nfs_client {
 	u64			cl_clientid;	/* constant */
 	unsigned long		cl_state;
 
-	struct list_head	cl_delegations;
 	spinlock_t		cl_lock;
 
 	unsigned long		cl_lease_time;
@@ -149,6 +148,7 @@ struct nfs_server {
 	struct rb_root		state_owners;
 	struct rb_root		openowner_id;
 	struct rb_root		lockowner_id;
+	struct list_head	delegations;
 #endif
 	void (*destroy)(struct nfs_server *);
 


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH 2/4] NFS: Move cl_state_owners and related fields to the nfs_server struct
  2010-12-23 21:54 ` [PATCH 2/4] NFS: Move cl_state_owners and related fields to the nfs_server struct Chuck Lever
@ 2010-12-23 22:04   ` Trond Myklebust
  2010-12-23 22:49     ` Chuck Lever
  0 siblings, 1 reply; 11+ messages in thread
From: Trond Myklebust @ 2010-12-23 22:04 UTC (permalink / raw)
  To: Chuck Lever; +Cc: linux-nfs

On Thu, 2010-12-23 at 16:54 -0500, Chuck Lever wrote:
> NFSv4 migration needs to reassociate state owners from the source to
> the destination nfs_server data structures.  To make that easier, move
> the cl_state_owners field to the nfs_server struct.  cl_openowner_id
> and cl_lockowner_id accompany this move, as they are used in
> conjunction with cl_state_owners.
> 
> The cl_lock field in the parent nfs_client continues to protect all
> three of these fields.
> 
> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
> ---
> 
>  fs/nfs/nfs4_fs.h          |    2 
>  fs/nfs/nfs4state.c        |  239 ++++++++++++++++++++++++++++++++-------------
>  include/linux/nfs_fs_sb.h |    9 +-
>  3 files changed, 177 insertions(+), 73 deletions(-)
> 
> diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
> index 7a6eecf..fa5c5d3 100644
> --- a/fs/nfs/nfs4_fs.h
> +++ b/fs/nfs/nfs4_fs.h
> @@ -109,7 +109,7 @@ struct nfs_unique_id {
>  struct nfs4_state_owner {
>  	struct nfs_unique_id so_owner_id;
>  	struct nfs_server    *so_server;
> -	struct rb_node	     so_client_node;
> +	struct rb_node	     so_server_node;
>  
>  	struct rpc_cred	     *so_cred;	 /* Associated cred */
>  
> diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
> index f575a31..fbcfe72 100644
> --- a/fs/nfs/nfs4state.c
> +++ b/fs/nfs/nfs4state.c
> @@ -105,14 +105,17 @@ static void nfs4_clear_machine_cred(struct nfs_client *clp)
>  		put_rpccred(cred);
>  }
>  
> -struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
> +static struct rpc_cred *
> +nfs4_get_renew_cred_server_locked(struct nfs_server *server)
>  {
> +	struct rpc_cred *cred = NULL;
>  	struct nfs4_state_owner *sp;
>  	struct rb_node *pos;
> -	struct rpc_cred *cred = NULL;
>  
> -	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
> -		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
> +	for (pos = rb_first(&server->state_owners);
> +	     pos != NULL;
> +	     pos = rb_next(pos)) {
> +		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
>  		if (list_empty(&sp->so_states))
>  			continue;
>  		cred = get_rpccred(sp->so_cred);
> @@ -121,6 +124,28 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
>  	return cred;
>  }
>  
> +/**
> + * nfs4_get_renew_cred_locked - Acquire credential for a renew operation
> + * @clp: client state handle
> + *
> + * Returns an rpc_cred with reference count bumped, or NULL.
> + * Caller must hold clp->cl_lock.
> + */
> +struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
> +{
> +	struct rpc_cred *cred = NULL;
> +	struct nfs_server *server;
> +
> +	rcu_read_lock();
> +	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
> +		cred = nfs4_get_renew_cred_server_locked(server);
> +		if (cred != NULL)
> +			break;
> +	}
> +	rcu_read_unlock();
> +	return cred;
> +}
> +
>  #if defined(CONFIG_NFS_V4_1)
>  
>  static int nfs41_setup_state_renewal(struct nfs_client *clp)
> @@ -210,21 +235,45 @@ struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
>  
>  #endif /* CONFIG_NFS_V4_1 */
>  
> -struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
> +static struct rpc_cred *
> +nfs4_get_setclientid_cred_server_locked(struct nfs_server *server)
>  {
> +	struct rpc_cred *cred = NULL;
>  	struct nfs4_state_owner *sp;
>  	struct rb_node *pos;
> +
> +	pos = rb_first(&server->state_owners);
> +	if (pos != NULL) {
> +		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
> +		cred = get_rpccred(sp->so_cred);
> +	}
> +	return cred;
> +}
> +
> +/**
> + * nfs4_get_setclientid_cred - Acquire credential for a setclientid operation
> + * @clp: client state handle
> + *
> + * Returns an rpc_cred with reference count bumped, or NULL.
> + */
> +struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
> +{
> +	struct nfs_server *server;
>  	struct rpc_cred *cred;
>  
>  	spin_lock(&clp->cl_lock);
>  	cred = nfs4_get_machine_cred_locked(clp);
>  	if (cred != NULL)
>  		goto out;
> -	pos = rb_first(&clp->cl_state_owners);
> -	if (pos != NULL) {
> -		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
> -		cred = get_rpccred(sp->so_cred);
> +
> +	rcu_read_lock();

Here's another rcu_read_lock() nested inside a spin locked section.

> +	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
> +		cred = nfs4_get_setclientid_cred_server_locked(server);
> +		if (cred != NULL)
> +			break;
>  	}
> +	rcu_read_unlock();
> +
>  out:
>  	spin_unlock(&clp->cl_lock);
>  	return cred;
> @@ -286,16 +335,15 @@ static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id)
>  }
>  
>  static struct nfs4_state_owner *
> -nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
> +nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred)
>  {
> -	struct nfs_client *clp = server->nfs_client;
> -	struct rb_node **p = &clp->cl_state_owners.rb_node,
> +	struct rb_node **p = &server->state_owners.rb_node,
>  		       *parent = NULL;
>  	struct nfs4_state_owner *sp, *res = NULL;
>  
>  	while (*p != NULL) {
>  		parent = *p;
> -		sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
> +		sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
>  
>  		if (server < sp->so_server) {
>  			p = &parent->rb_left;
> @@ -319,24 +367,17 @@ nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
>  }
>  
>  static struct nfs4_state_owner *
> -nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
> +nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
>  {
> -	struct rb_node **p = &clp->cl_state_owners.rb_node,
> +	struct nfs_server *server = new->so_server;
> +	struct rb_node **p = &server->state_owners.rb_node,
>  		       *parent = NULL;
>  	struct nfs4_state_owner *sp;
>  
>  	while (*p != NULL) {
>  		parent = *p;
> -		sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
> +		sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
>  
> -		if (new->so_server < sp->so_server) {
> -			p = &parent->rb_left;
> -			continue;
> -		}
> -		if (new->so_server > sp->so_server) {
> -			p = &parent->rb_right;
> -			continue;
> -		}
>  		if (new->so_cred < sp->so_cred)
>  			p = &parent->rb_left;
>  		else if (new->so_cred > sp->so_cred)
> @@ -346,18 +387,20 @@ nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
>  			return sp;
>  		}
>  	}
> -	nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64);
> -	rb_link_node(&new->so_client_node, parent, p);
> -	rb_insert_color(&new->so_client_node, &clp->cl_state_owners);
> +	nfs_alloc_unique_id(&server->openowner_id, &new->so_owner_id, 1, 64);
> +	rb_link_node(&new->so_server_node, parent, p);
> +	rb_insert_color(&new->so_server_node, &server->state_owners);
>  	return new;
>  }
>  
>  static void
> -nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp)
> +nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp)
>  {
> -	if (!RB_EMPTY_NODE(&sp->so_client_node))
> -		rb_erase(&sp->so_client_node, &clp->cl_state_owners);
> -	nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id);
> +	struct nfs_server *server = sp->so_server;
> +
> +	if (!RB_EMPTY_NODE(&sp->so_server_node))
> +		rb_erase(&sp->so_server_node, &server->state_owners);
> +	nfs_free_unique_id(&server->openowner_id, &sp->so_owner_id);
>  }
>  
>  /*
> @@ -386,23 +429,32 @@ nfs4_alloc_state_owner(void)
>  static void
>  nfs4_drop_state_owner(struct nfs4_state_owner *sp)
>  {
> -	if (!RB_EMPTY_NODE(&sp->so_client_node)) {
> -		struct nfs_client *clp = sp->so_server->nfs_client;
> +	if (!RB_EMPTY_NODE(&sp->so_server_node)) {
> +		struct nfs_server *server = sp->so_server;
> +		struct nfs_client *clp = server->nfs_client;
>  
>  		spin_lock(&clp->cl_lock);
> -		rb_erase(&sp->so_client_node, &clp->cl_state_owners);
> -		RB_CLEAR_NODE(&sp->so_client_node);
> +		rb_erase(&sp->so_server_node, &server->state_owners);
> +		RB_CLEAR_NODE(&sp->so_server_node);
>  		spin_unlock(&clp->cl_lock);
>  	}
>  }
>  
> -struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
> +/**
> + * nfs4_get_state_owner - Look up a state owner given a credential
> + * @server: nfs_server to search
> + * @cred: RPC credential to match
> + *
> + * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL.
> + */
> +struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
> +					      struct rpc_cred *cred)
>  {
>  	struct nfs_client *clp = server->nfs_client;
>  	struct nfs4_state_owner *sp, *new;
>  
>  	spin_lock(&clp->cl_lock);
> -	sp = nfs4_find_state_owner(server, cred);
> +	sp = nfs4_find_state_owner_locked(server, cred);
>  	spin_unlock(&clp->cl_lock);
>  	if (sp != NULL)
>  		return sp;
> @@ -412,7 +464,7 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct
>  	new->so_server = server;
>  	new->so_cred = cred;
>  	spin_lock(&clp->cl_lock);
> -	sp = nfs4_insert_state_owner(clp, new);
> +	sp = nfs4_insert_state_owner_locked(new);
>  	spin_unlock(&clp->cl_lock);
>  	if (sp == new)
>  		get_rpccred(cred);
> @@ -423,6 +475,11 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct
>  	return sp;
>  }
>  
> +/**
> + * nfs4_put_state_owner - Release a nfs4_state_owner
> + * @sp: state owner data to release
> + *
> + */
>  void nfs4_put_state_owner(struct nfs4_state_owner *sp)
>  {
>  	struct nfs_client *clp = sp->so_server->nfs_client;
> @@ -430,7 +487,7 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
>  
>  	if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
>  		return;
> -	nfs4_remove_state_owner(clp, sp);
> +	nfs4_remove_state_owner_locked(sp);
>  	spin_unlock(&clp->cl_lock);
>  	rpc_destroy_wait_queue(&sp->so_sequence.wait);
>  	put_rpccred(cred);
> @@ -633,7 +690,8 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_p
>  static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
>  {
>  	struct nfs4_lock_state *lsp;
> -	struct nfs_client *clp = state->owner->so_server->nfs_client;
> +	struct nfs_server *server = state->owner->so_server;
> +	struct nfs_client *clp = server->nfs_client;
>  
>  	lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
>  	if (lsp == NULL)
> @@ -657,7 +715,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
>  		return NULL;
>  	}
>  	spin_lock(&clp->cl_lock);
> -	nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
> +	nfs_alloc_unique_id(&server->lockowner_id, &lsp->ls_id, 1, 64);
>  	spin_unlock(&clp->cl_lock);
>  	INIT_LIST_HEAD(&lsp->ls_locks);
>  	return lsp;
> @@ -665,10 +723,11 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
>  
>  static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
>  {
> -	struct nfs_client *clp = lsp->ls_state->owner->so_server->nfs_client;
> +	struct nfs_server *server = lsp->ls_state->owner->so_server;
> +	struct nfs_client *clp = server->nfs_client;
>  
>  	spin_lock(&clp->cl_lock);
> -	nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
> +	nfs_free_unique_id(&server->lockowner_id, &lsp->ls_id);
>  	spin_unlock(&clp->cl_lock);
>  	rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
>  	kfree(lsp);
> @@ -1114,25 +1173,40 @@ static void nfs4_clear_open_state(struct nfs4_state *state)
>  	}
>  }
>  
> -static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
> +static void nfs4_reset_seqids_locked(struct nfs_server *server,
> +	int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
>  {
>  	struct nfs4_state_owner *sp;
>  	struct rb_node *pos;
>  	struct nfs4_state *state;
>  
> -	/* Reset all sequence ids to zero */
> -	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
> -		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
> +	for (pos = rb_first(&server->state_owners);
> +	     pos != NULL;
> +	     pos = rb_next(pos)) {
> +		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
>  		sp->so_seqid.flags = 0;
>  		spin_lock(&sp->so_lock);
>  		list_for_each_entry(state, &sp->so_states, open_states) {
> -			if (mark_reclaim(clp, state))
> +			if (mark_reclaim(server->nfs_client, state))
>  				nfs4_clear_open_state(state);
>  		}
>  		spin_unlock(&sp->so_lock);
>  	}
>  }
>  
> +static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp,
> +	int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
> +{
> +	struct nfs_server *server;
> +
> +	spin_lock(&clp->cl_lock);
> +	rcu_read_lock();

Ditto.

> +	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
> +		nfs4_reset_seqids_locked(server, mark_reclaim);
> +	rcu_read_unlock();
> +	spin_unlock(&clp->cl_lock);
> +}
> +
>  static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
>  {
>  	/* Mark all delegations for reclaim */
> @@ -1148,26 +1222,42 @@ static void nfs4_reclaim_complete(struct nfs_client *clp,
>  		(void)ops->reclaim_complete(clp);
>  }
>  
> -static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
> +static void nfs4_clear_reclaim_server_locked(struct nfs_server *server)
>  {
>  	struct nfs4_state_owner *sp;
>  	struct rb_node *pos;
>  	struct nfs4_state *state;
>  
> -	if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
> -		return 0;
> -
> -	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
> -		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
> +	for (pos = rb_first(&server->state_owners);
> +	     pos != NULL;
> +	     pos = rb_next(pos)) {
> +		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
>  		spin_lock(&sp->so_lock);
>  		list_for_each_entry(state, &sp->so_states, open_states) {
> -			if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags))
> +			if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT,
> +						&state->flags))
>  				continue;
> -			nfs4_state_mark_reclaim_nograce(clp, state);
> +			nfs4_state_mark_reclaim_nograce(server->nfs_client,
> +							state);
>  		}
>  		spin_unlock(&sp->so_lock);
>  	}
>  
> +}
> +
> +static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
> +{
> +	struct nfs_server *server;
> +
> +	if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
> +		return 0;
> +
> +	spin_lock(&clp->cl_lock);
> +	rcu_read_lock();
> +	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
> +		nfs4_clear_reclaim_server_locked(server);
> +	rcu_read_unlock();

Ditto

> +	spin_unlock(&clp->cl_lock);
>  	nfs_delegation_reap_unclaimed(clp);
>  	return 1;
>  }
> @@ -1238,26 +1328,39 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
>  
>  static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
>  {
> +	struct nfs4_state_owner *sp;
> +	struct nfs_server *server;
>  	struct rb_node *pos;
>  	int status = 0;
>  
>  restart:
>  	spin_lock(&clp->cl_lock);
> -	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
> -		struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
> -		if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags))
> -			continue;
> -		atomic_inc(&sp->so_count);
> -		spin_unlock(&clp->cl_lock);
> -		status = nfs4_reclaim_open_state(sp, ops);
> -		if (status < 0) {
> -			set_bit(ops->owner_flag_bit, &sp->so_flags);
> +	rcu_read_lock();

Ditto.
> +	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
> +		for (pos = rb_first(&server->state_owners);
> +		     pos != NULL;
> +		     pos = rb_next(pos)) {
> +			sp = rb_entry(pos,
> +				struct nfs4_state_owner, so_server_node);
> +			if (!test_and_clear_bit(ops->owner_flag_bit,
> +							&sp->so_flags))
> +				continue;
> +			atomic_inc(&sp->so_count);
> +			rcu_read_unlock();
> +			spin_unlock(&clp->cl_lock);
> +
> +			status = nfs4_reclaim_open_state(sp, ops);
> +			if (status < 0) {
> +				set_bit(ops->owner_flag_bit, &sp->so_flags);
> +				nfs4_put_state_owner(sp);
> +				return nfs4_recovery_handle_error(clp, status);
> +			}
> +
>  			nfs4_put_state_owner(sp);
> -			return nfs4_recovery_handle_error(clp, status);
> +			goto restart;
>  		}
> -		nfs4_put_state_owner(sp);
> -		goto restart;
>  	}
> +	rcu_read_unlock();
>  	spin_unlock(&clp->cl_lock);
>  	return status;
>  }
> diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
> index 452d964..e96ec55 100644
> --- a/include/linux/nfs_fs_sb.h
> +++ b/include/linux/nfs_fs_sb.h
> @@ -47,11 +47,7 @@ struct nfs_client {
>  	u64			cl_clientid;	/* constant */
>  	unsigned long		cl_state;
>  
> -	struct rb_root		cl_openowner_id;
> -	struct rb_root		cl_lockowner_id;
> -
>  	struct list_head	cl_delegations;
> -	struct rb_root		cl_state_owners;
>  	spinlock_t		cl_lock;
>  
>  	unsigned long		cl_lease_time;
> @@ -148,6 +144,11 @@ struct nfs_server {
>  						   that are supported on this
>  						   filesystem */
>  	struct pnfs_layoutdriver_type  *pnfs_curr_ld; /* Active layout driver */
> +
> +	/* the following fields are protected by nfs_client->cl_lock */
> +	struct rb_root		state_owners;
> +	struct rb_root		openowner_id;
> +	struct rb_root		lockowner_id;
>  #endif
>  	void (*destroy)(struct nfs_server *);
>  
> 

-- 
Trond Myklebust
Linux NFS client maintainer

NetApp
Trond.Myklebust@netapp.com
www.netapp.com


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 4/4] NFS: Move cl_delegations to the nfs_server struct
  2010-12-23 21:54 ` [PATCH 4/4] NFS: Move cl_delegations to the nfs_server struct Chuck Lever
@ 2010-12-23 22:09   ` Trond Myklebust
       [not found]     ` <1293142182.9718.3.camel-rJ7iovZKK19ZJLDQqaL3InhyD016LWXt@public.gmane.org>
  0 siblings, 1 reply; 11+ messages in thread
From: Trond Myklebust @ 2010-12-23 22:09 UTC (permalink / raw)
  To: Chuck Lever; +Cc: linux-nfs

On Thu, 2010-12-23 at 16:54 -0500, Chuck Lever wrote:
> Delegations are per-inode, not per-nfs_client.  When a server file
> system is migrated, delegations on the client must be moved from the
> source to the destination nfs_server.  Make it easier to manage a
> mount point's delegation list across a migration event by moving the
> list to the nfs_server struct.
> 
> Clean up: I added documenting comments to public functions I changed
> in this patch.  For consistency I added comments to all the other
> public functions in fs/nfs/delegation.c.
> 
> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
> ---
> 
>  fs/nfs/client.c           |    4 -
>  fs/nfs/delegation.c       |  337 +++++++++++++++++++++++++++++++++------------
>  fs/nfs/delegation.h       |    1 
>  fs/nfs/nfs4renewd.c       |    2 
>  include/linux/nfs_fs_sb.h |    2 
>  5 files changed, 253 insertions(+), 93 deletions(-)
> 
> diff --git a/fs/nfs/client.c b/fs/nfs/client.c
> index 05e2ee2..d112887 100644
> --- a/fs/nfs/client.c
> +++ b/fs/nfs/client.c
> @@ -144,7 +144,6 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
>  	clp->cl_proto = cl_init->proto;
>  
>  #ifdef CONFIG_NFS_V4
> -	INIT_LIST_HEAD(&clp->cl_delegations);
>  	spin_lock_init(&clp->cl_lock);
>  	INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
>  	rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
> @@ -1025,6 +1024,9 @@ static struct nfs_server *nfs_alloc_server(void)
>  	/* Zero out the NFS state stuff */
>  	INIT_LIST_HEAD(&server->client_link);
>  	INIT_LIST_HEAD(&server->master_link);
> +#ifdef CONFIG_NFS_V4
> +	INIT_LIST_HEAD(&server->delegations);
> +#endif

As I said earlier, can we please avoid this? Just define the
'delegations' field unconditionally in struct nfs_server, and get rid of
the CONFIG_NFS_V4 above.

Cheers
  Trond

>  	atomic_set(&server->active, 0);


 
> diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
> index e96ec55..2ddf7aa 100644
> --- a/include/linux/nfs_fs_sb.h
> +++ b/include/linux/nfs_fs_sb.h
> @@ -47,7 +47,6 @@ struct nfs_client {
>  	u64			cl_clientid;	/* constant */
>  	unsigned long		cl_state;
>  
> -	struct list_head	cl_delegations;
>  	spinlock_t		cl_lock;
>  
>  	unsigned long		cl_lease_time;
> @@ -149,6 +148,7 @@ struct nfs_server {
>  	struct rb_root		state_owners;
>  	struct rb_root		openowner_id;
>  	struct rb_root		lockowner_id;
> +	struct list_head	delegations;
>  #endif
>  	void (*destroy)(struct nfs_server *);
>  
> 

-- 
Trond Myklebust
Linux NFS client maintainer

NetApp
Trond.Myklebust@netapp.com
www.netapp.com


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 4/4] NFS: Move cl_delegations to the nfs_server struct
       [not found]     ` <1293142182.9718.3.camel-rJ7iovZKK19ZJLDQqaL3InhyD016LWXt@public.gmane.org>
@ 2010-12-23 22:14       ` Chuck Lever
  0 siblings, 0 replies; 11+ messages in thread
From: Chuck Lever @ 2010-12-23 22:14 UTC (permalink / raw)
  To: Trond Myklebust; +Cc: linux-nfs


On Dec 23, 2010, at 5:09 PM, Trond Myklebust wrote:

> On Thu, 2010-12-23 at 16:54 -0500, Chuck Lever wrote:
>> Delegations are per-inode, not per-nfs_client.  When a server file
>> system is migrated, delegations on the client must be moved from the
>> source to the destination nfs_server.  Make it easier to manage a
>> mount point's delegation list across a migration event by moving the
>> list to the nfs_server struct.
>> 
>> Clean up: I added documenting comments to public functions I changed
>> in this patch.  For consistency I added comments to all the other
>> public functions in fs/nfs/delegation.c.
>> 
>> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
>> ---
>> 
>> fs/nfs/client.c           |    4 -
>> fs/nfs/delegation.c       |  337 +++++++++++++++++++++++++++++++++------------
>> fs/nfs/delegation.h       |    1 
>> fs/nfs/nfs4renewd.c       |    2 
>> include/linux/nfs_fs_sb.h |    2 
>> 5 files changed, 253 insertions(+), 93 deletions(-)
>> 
>> diff --git a/fs/nfs/client.c b/fs/nfs/client.c
>> index 05e2ee2..d112887 100644
>> --- a/fs/nfs/client.c
>> +++ b/fs/nfs/client.c
>> @@ -144,7 +144,6 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
>> 	clp->cl_proto = cl_init->proto;
>> 
>> #ifdef CONFIG_NFS_V4
>> -	INIT_LIST_HEAD(&clp->cl_delegations);
>> 	spin_lock_init(&clp->cl_lock);
>> 	INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
>> 	rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
>> @@ -1025,6 +1024,9 @@ static struct nfs_server *nfs_alloc_server(void)
>> 	/* Zero out the NFS state stuff */
>> 	INIT_LIST_HEAD(&server->client_link);
>> 	INIT_LIST_HEAD(&server->master_link);
>> +#ifdef CONFIG_NFS_V4
>> +	INIT_LIST_HEAD(&server->delegations);
>> +#endif
> 
> As I said earlier, can we please avoid this? Just define the
> 'delegations' field unconditionally in struct nfs_server, and get rid of
> the CONFIG_NFS_V4 above.

Sorry, dude.  I forgot that one.

> 
> Cheers
>  Trond
> 
>> 	atomic_set(&server->active, 0);
> 
> 
> 
>> diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
>> index e96ec55..2ddf7aa 100644
>> --- a/include/linux/nfs_fs_sb.h
>> +++ b/include/linux/nfs_fs_sb.h
>> @@ -47,7 +47,6 @@ struct nfs_client {
>> 	u64			cl_clientid;	/* constant */
>> 	unsigned long		cl_state;
>> 
>> -	struct list_head	cl_delegations;
>> 	spinlock_t		cl_lock;
>> 
>> 	unsigned long		cl_lease_time;
>> @@ -149,6 +148,7 @@ struct nfs_server {
>> 	struct rb_root		state_owners;
>> 	struct rb_root		openowner_id;
>> 	struct rb_root		lockowner_id;
>> +	struct list_head	delegations;
>> #endif
>> 	void (*destroy)(struct nfs_server *);
>> 
>> 
> 
> -- 
> Trond Myklebust
> Linux NFS client maintainer
> 
> NetApp
> Trond.Myklebust@netapp.com
> www.netapp.com
> 

-- 
Chuck Lever
chuck[dot]lever[at]oracle[dot]com





^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 2/4] NFS: Move cl_state_owners and related fields to the nfs_server struct
  2010-12-23 22:04   ` Trond Myklebust
@ 2010-12-23 22:49     ` Chuck Lever
  2010-12-23 23:27       ` Trond Myklebust
  0 siblings, 1 reply; 11+ messages in thread
From: Chuck Lever @ 2010-12-23 22:49 UTC (permalink / raw)
  To: Trond Myklebust; +Cc: linux-nfs


On Dec 23, 2010, at 5:04 PM, Trond Myklebust wrote:

> On Thu, 2010-12-23 at 16:54 -0500, Chuck Lever wrote:
>> NFSv4 migration needs to reassociate state owners from the source to
>> the destination nfs_server data structures.  To make that easier, move
>> the cl_state_owners field to the nfs_server struct.  cl_openowner_id
>> and cl_lockowner_id accompany this move, as they are used in
>> conjunction with cl_state_owners.
>> 
>> The cl_lock field in the parent nfs_client continues to protect all
>> three of these fields.
>> 
>> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
>> ---
>> 
>> fs/nfs/nfs4_fs.h          |    2 
>> fs/nfs/nfs4state.c        |  239 ++++++++++++++++++++++++++++++++-------------
>> include/linux/nfs_fs_sb.h |    9 +-
>> 3 files changed, 177 insertions(+), 73 deletions(-)
>> 
>> diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
>> index 7a6eecf..fa5c5d3 100644
>> --- a/fs/nfs/nfs4_fs.h
>> +++ b/fs/nfs/nfs4_fs.h
>> @@ -109,7 +109,7 @@ struct nfs_unique_id {
>> struct nfs4_state_owner {
>> 	struct nfs_unique_id so_owner_id;
>> 	struct nfs_server    *so_server;
>> -	struct rb_node	     so_client_node;
>> +	struct rb_node	     so_server_node;
>> 
>> 	struct rpc_cred	     *so_cred;	 /* Associated cred */
>> 
>> diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
>> index f575a31..fbcfe72 100644
>> --- a/fs/nfs/nfs4state.c
>> +++ b/fs/nfs/nfs4state.c
>> @@ -105,14 +105,17 @@ static void nfs4_clear_machine_cred(struct nfs_client *clp)
>> 		put_rpccred(cred);
>> }
>> 
>> -struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
>> +static struct rpc_cred *
>> +nfs4_get_renew_cred_server_locked(struct nfs_server *server)
>> {
>> +	struct rpc_cred *cred = NULL;
>> 	struct nfs4_state_owner *sp;
>> 	struct rb_node *pos;
>> -	struct rpc_cred *cred = NULL;
>> 
>> -	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
>> -		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
>> +	for (pos = rb_first(&server->state_owners);
>> +	     pos != NULL;
>> +	     pos = rb_next(pos)) {
>> +		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
>> 		if (list_empty(&sp->so_states))
>> 			continue;
>> 		cred = get_rpccred(sp->so_cred);
>> @@ -121,6 +124,28 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
>> 	return cred;
>> }
>> 
>> +/**
>> + * nfs4_get_renew_cred_locked - Acquire credential for a renew operation
>> + * @clp: client state handle
>> + *
>> + * Returns an rpc_cred with reference count bumped, or NULL.
>> + * Caller must hold clp->cl_lock.

Here's a case where clp->cl_lock will always be held before we invoke rcu_read_lock().  This is why I tried to take cl_lock first, then invoke rcu_read_lock().  I guess that's not a valid concern.

>> + */
>> +struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
>> +{
>> +	struct rpc_cred *cred = NULL;
>> +	struct nfs_server *server;
>> +
>> +	rcu_read_lock();
>> +	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
>> +		cred = nfs4_get_renew_cred_server_locked(server);
>> +		if (cred != NULL)
>> +			break;
>> +	}
>> +	rcu_read_unlock();
>> +	return cred;
>> +}
>> +
>> #if defined(CONFIG_NFS_V4_1)
>> 
>> static int nfs41_setup_state_renewal(struct nfs_client *clp)
>> @@ -210,21 +235,45 @@ struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
>> 
>> #endif /* CONFIG_NFS_V4_1 */
>> 
>> -struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
>> +static struct rpc_cred *
>> +nfs4_get_setclientid_cred_server_locked(struct nfs_server *server)
>> {
>> +	struct rpc_cred *cred = NULL;
>> 	struct nfs4_state_owner *sp;
>> 	struct rb_node *pos;
>> +
>> +	pos = rb_first(&server->state_owners);
>> +	if (pos != NULL) {
>> +		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
>> +		cred = get_rpccred(sp->so_cred);
>> +	}
>> +	return cred;
>> +}
>> +
>> +/**
>> + * nfs4_get_setclientid_cred - Acquire credential for a setclientid operation
>> + * @clp: client state handle
>> + *
>> + * Returns an rpc_cred with reference count bumped, or NULL.
>> + */
>> +struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
>> +{
>> +	struct nfs_server *server;
>> 	struct rpc_cred *cred;
>> 
>> 	spin_lock(&clp->cl_lock);
>> 	cred = nfs4_get_machine_cred_locked(clp);
>> 	if (cred != NULL)
>> 		goto out;
>> -	pos = rb_first(&clp->cl_state_owners);
>> -	if (pos != NULL) {
>> -		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
>> -		cred = get_rpccred(sp->so_cred);
>> +
>> +	rcu_read_lock();
> 
> Here's another rcu_read_lock() nested inside a spin locked section.

In this case, we're already holding cl_lock in order to call nfs4_get_machine_cred_locked().  Is it worth the extra logic to release the lock and then grab it repeatedly in the loop?

> 
>> +	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
>> +		cred = nfs4_get_setclientid_cred_server_locked(server);
>> +		if (cred != NULL)
>> +			break;
>> 	}
>> +	rcu_read_unlock();
>> +
>> out:
>> 	spin_unlock(&clp->cl_lock);
>> 	return cred;
>> @@ -286,16 +335,15 @@ static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id)
>> }
>> 
>> static struct nfs4_state_owner *
>> -nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
>> +nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred)
>> {
>> -	struct nfs_client *clp = server->nfs_client;
>> -	struct rb_node **p = &clp->cl_state_owners.rb_node,
>> +	struct rb_node **p = &server->state_owners.rb_node,
>> 		       *parent = NULL;
>> 	struct nfs4_state_owner *sp, *res = NULL;
>> 
>> 	while (*p != NULL) {
>> 		parent = *p;
>> -		sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
>> +		sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
>> 
>> 		if (server < sp->so_server) {
>> 			p = &parent->rb_left;
>> @@ -319,24 +367,17 @@ nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
>> }
>> 
>> static struct nfs4_state_owner *
>> -nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
>> +nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
>> {
>> -	struct rb_node **p = &clp->cl_state_owners.rb_node,
>> +	struct nfs_server *server = new->so_server;
>> +	struct rb_node **p = &server->state_owners.rb_node,
>> 		       *parent = NULL;
>> 	struct nfs4_state_owner *sp;
>> 
>> 	while (*p != NULL) {
>> 		parent = *p;
>> -		sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
>> +		sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
>> 
>> -		if (new->so_server < sp->so_server) {
>> -			p = &parent->rb_left;
>> -			continue;
>> -		}
>> -		if (new->so_server > sp->so_server) {
>> -			p = &parent->rb_right;
>> -			continue;
>> -		}
>> 		if (new->so_cred < sp->so_cred)
>> 			p = &parent->rb_left;
>> 		else if (new->so_cred > sp->so_cred)
>> @@ -346,18 +387,20 @@ nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
>> 			return sp;
>> 		}
>> 	}
>> -	nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64);
>> -	rb_link_node(&new->so_client_node, parent, p);
>> -	rb_insert_color(&new->so_client_node, &clp->cl_state_owners);
>> +	nfs_alloc_unique_id(&server->openowner_id, &new->so_owner_id, 1, 64);
>> +	rb_link_node(&new->so_server_node, parent, p);
>> +	rb_insert_color(&new->so_server_node, &server->state_owners);
>> 	return new;
>> }
>> 
>> static void
>> -nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp)
>> +nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp)
>> {
>> -	if (!RB_EMPTY_NODE(&sp->so_client_node))
>> -		rb_erase(&sp->so_client_node, &clp->cl_state_owners);
>> -	nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id);
>> +	struct nfs_server *server = sp->so_server;
>> +
>> +	if (!RB_EMPTY_NODE(&sp->so_server_node))
>> +		rb_erase(&sp->so_server_node, &server->state_owners);
>> +	nfs_free_unique_id(&server->openowner_id, &sp->so_owner_id);
>> }
>> 
>> /*
>> @@ -386,23 +429,32 @@ nfs4_alloc_state_owner(void)
>> static void
>> nfs4_drop_state_owner(struct nfs4_state_owner *sp)
>> {
>> -	if (!RB_EMPTY_NODE(&sp->so_client_node)) {
>> -		struct nfs_client *clp = sp->so_server->nfs_client;
>> +	if (!RB_EMPTY_NODE(&sp->so_server_node)) {
>> +		struct nfs_server *server = sp->so_server;
>> +		struct nfs_client *clp = server->nfs_client;
>> 
>> 		spin_lock(&clp->cl_lock);
>> -		rb_erase(&sp->so_client_node, &clp->cl_state_owners);
>> -		RB_CLEAR_NODE(&sp->so_client_node);
>> +		rb_erase(&sp->so_server_node, &server->state_owners);
>> +		RB_CLEAR_NODE(&sp->so_server_node);
>> 		spin_unlock(&clp->cl_lock);
>> 	}
>> }
>> 
>> -struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
>> +/**
>> + * nfs4_get_state_owner - Look up a state owner given a credential
>> + * @server: nfs_server to search
>> + * @cred: RPC credential to match
>> + *
>> + * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL.
>> + */
>> +struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
>> +					      struct rpc_cred *cred)
>> {
>> 	struct nfs_client *clp = server->nfs_client;
>> 	struct nfs4_state_owner *sp, *new;
>> 
>> 	spin_lock(&clp->cl_lock);
>> -	sp = nfs4_find_state_owner(server, cred);
>> +	sp = nfs4_find_state_owner_locked(server, cred);
>> 	spin_unlock(&clp->cl_lock);
>> 	if (sp != NULL)
>> 		return sp;
>> @@ -412,7 +464,7 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct
>> 	new->so_server = server;
>> 	new->so_cred = cred;
>> 	spin_lock(&clp->cl_lock);
>> -	sp = nfs4_insert_state_owner(clp, new);
>> +	sp = nfs4_insert_state_owner_locked(new);
>> 	spin_unlock(&clp->cl_lock);
>> 	if (sp == new)
>> 		get_rpccred(cred);
>> @@ -423,6 +475,11 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct
>> 	return sp;
>> }
>> 
>> +/**
>> + * nfs4_put_state_owner - Release a nfs4_state_owner
>> + * @sp: state owner data to release
>> + *
>> + */
>> void nfs4_put_state_owner(struct nfs4_state_owner *sp)
>> {
>> 	struct nfs_client *clp = sp->so_server->nfs_client;
>> @@ -430,7 +487,7 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
>> 
>> 	if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
>> 		return;
>> -	nfs4_remove_state_owner(clp, sp);
>> +	nfs4_remove_state_owner_locked(sp);
>> 	spin_unlock(&clp->cl_lock);
>> 	rpc_destroy_wait_queue(&sp->so_sequence.wait);
>> 	put_rpccred(cred);
>> @@ -633,7 +690,8 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_p
>> static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
>> {
>> 	struct nfs4_lock_state *lsp;
>> -	struct nfs_client *clp = state->owner->so_server->nfs_client;
>> +	struct nfs_server *server = state->owner->so_server;
>> +	struct nfs_client *clp = server->nfs_client;
>> 
>> 	lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
>> 	if (lsp == NULL)
>> @@ -657,7 +715,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
>> 		return NULL;
>> 	}
>> 	spin_lock(&clp->cl_lock);
>> -	nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
>> +	nfs_alloc_unique_id(&server->lockowner_id, &lsp->ls_id, 1, 64);
>> 	spin_unlock(&clp->cl_lock);
>> 	INIT_LIST_HEAD(&lsp->ls_locks);
>> 	return lsp;
>> @@ -665,10 +723,11 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
>> 
>> static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
>> {
>> -	struct nfs_client *clp = lsp->ls_state->owner->so_server->nfs_client;
>> +	struct nfs_server *server = lsp->ls_state->owner->so_server;
>> +	struct nfs_client *clp = server->nfs_client;
>> 
>> 	spin_lock(&clp->cl_lock);
>> -	nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
>> +	nfs_free_unique_id(&server->lockowner_id, &lsp->ls_id);
>> 	spin_unlock(&clp->cl_lock);
>> 	rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
>> 	kfree(lsp);
>> @@ -1114,25 +1173,40 @@ static void nfs4_clear_open_state(struct nfs4_state *state)
>> 	}
>> }
>> 
>> -static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
>> +static void nfs4_reset_seqids_locked(struct nfs_server *server,
>> +	int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
>> {
>> 	struct nfs4_state_owner *sp;
>> 	struct rb_node *pos;
>> 	struct nfs4_state *state;
>> 
>> -	/* Reset all sequence ids to zero */
>> -	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
>> -		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
>> +	for (pos = rb_first(&server->state_owners);
>> +	     pos != NULL;
>> +	     pos = rb_next(pos)) {
>> +		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
>> 		sp->so_seqid.flags = 0;
>> 		spin_lock(&sp->so_lock);
>> 		list_for_each_entry(state, &sp->so_states, open_states) {
>> -			if (mark_reclaim(clp, state))
>> +			if (mark_reclaim(server->nfs_client, state))
>> 				nfs4_clear_open_state(state);
>> 		}
>> 		spin_unlock(&sp->so_lock);
>> 	}
>> }
>> 
>> +static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp,
>> +	int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
>> +{
>> +	struct nfs_server *server;
>> +
>> +	spin_lock(&clp->cl_lock);
>> +	rcu_read_lock();
> 
> Ditto.

One begins to wonder if we should have a per-nfs_server lock for the state_owners fields, instead of re-using cl_lock.  Such a lock would probably have to protect sp->so_cred as well.

> 
>> +	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
>> +		nfs4_reset_seqids_locked(server, mark_reclaim);
>> +	rcu_read_unlock();
>> +	spin_unlock(&clp->cl_lock);
>> +}
>> +
>> static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
>> {
>> 	/* Mark all delegations for reclaim */
>> @@ -1148,26 +1222,42 @@ static void nfs4_reclaim_complete(struct nfs_client *clp,
>> 		(void)ops->reclaim_complete(clp);
>> }
>> 
>> -static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
>> +static void nfs4_clear_reclaim_server_locked(struct nfs_server *server)
>> {
>> 	struct nfs4_state_owner *sp;
>> 	struct rb_node *pos;
>> 	struct nfs4_state *state;
>> 
>> -	if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
>> -		return 0;
>> -
>> -	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
>> -		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
>> +	for (pos = rb_first(&server->state_owners);
>> +	     pos != NULL;
>> +	     pos = rb_next(pos)) {
>> +		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
>> 		spin_lock(&sp->so_lock);
>> 		list_for_each_entry(state, &sp->so_states, open_states) {
>> -			if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags))
>> +			if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT,
>> +						&state->flags))
>> 				continue;
>> -			nfs4_state_mark_reclaim_nograce(clp, state);
>> +			nfs4_state_mark_reclaim_nograce(server->nfs_client,
>> +							state);
>> 		}
>> 		spin_unlock(&sp->so_lock);
>> 	}
>> 
>> +}
>> +
>> +static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
>> +{
>> +	struct nfs_server *server;
>> +
>> +	if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
>> +		return 0;
>> +
>> +	spin_lock(&clp->cl_lock);
>> +	rcu_read_lock();
>> +	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
>> +		nfs4_clear_reclaim_server_locked(server);
>> +	rcu_read_unlock();
> 
> Ditto
> 
>> +	spin_unlock(&clp->cl_lock);
>> 	nfs_delegation_reap_unclaimed(clp);
>> 	return 1;
>> }
>> @@ -1238,26 +1328,39 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
>> 
>> static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
>> {
>> +	struct nfs4_state_owner *sp;
>> +	struct nfs_server *server;
>> 	struct rb_node *pos;
>> 	int status = 0;
>> 
>> restart:
>> 	spin_lock(&clp->cl_lock);
>> -	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
>> -		struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
>> -		if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags))
>> -			continue;
>> -		atomic_inc(&sp->so_count);
>> -		spin_unlock(&clp->cl_lock);
>> -		status = nfs4_reclaim_open_state(sp, ops);
>> -		if (status < 0) {
>> -			set_bit(ops->owner_flag_bit, &sp->so_flags);
>> +	rcu_read_lock();
> 
> Ditto.
>> +	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
>> +		for (pos = rb_first(&server->state_owners);
>> +		     pos != NULL;
>> +		     pos = rb_next(pos)) {
>> +			sp = rb_entry(pos,
>> +				struct nfs4_state_owner, so_server_node);
>> +			if (!test_and_clear_bit(ops->owner_flag_bit,
>> +							&sp->so_flags))
>> +				continue;
>> +			atomic_inc(&sp->so_count);
>> +			rcu_read_unlock();
>> +			spin_unlock(&clp->cl_lock);
>> +
>> +			status = nfs4_reclaim_open_state(sp, ops);
>> +			if (status < 0) {
>> +				set_bit(ops->owner_flag_bit, &sp->so_flags);
>> +				nfs4_put_state_owner(sp);
>> +				return nfs4_recovery_handle_error(clp, status);
>> +			}
>> +
>> 			nfs4_put_state_owner(sp);
>> -			return nfs4_recovery_handle_error(clp, status);
>> +			goto restart;
>> 		}
>> -		nfs4_put_state_owner(sp);
>> -		goto restart;
>> 	}
>> +	rcu_read_unlock();
>> 	spin_unlock(&clp->cl_lock);
>> 	return status;
>> }
>> diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
>> index 452d964..e96ec55 100644
>> --- a/include/linux/nfs_fs_sb.h
>> +++ b/include/linux/nfs_fs_sb.h
>> @@ -47,11 +47,7 @@ struct nfs_client {
>> 	u64			cl_clientid;	/* constant */
>> 	unsigned long		cl_state;
>> 
>> -	struct rb_root		cl_openowner_id;
>> -	struct rb_root		cl_lockowner_id;
>> -
>> 	struct list_head	cl_delegations;
>> -	struct rb_root		cl_state_owners;
>> 	spinlock_t		cl_lock;
>> 
>> 	unsigned long		cl_lease_time;
>> @@ -148,6 +144,11 @@ struct nfs_server {
>> 						   that are supported on this
>> 						   filesystem */
>> 	struct pnfs_layoutdriver_type  *pnfs_curr_ld; /* Active layout driver */
>> +
>> +	/* the following fields are protected by nfs_client->cl_lock */
>> +	struct rb_root		state_owners;
>> +	struct rb_root		openowner_id;
>> +	struct rb_root		lockowner_id;
>> #endif
>> 	void (*destroy)(struct nfs_server *);
>> 
>> 
> 
> -- 
> Trond Myklebust
> Linux NFS client maintainer
> 
> NetApp
> Trond.Myklebust@netapp.com
> www.netapp.com
> 

-- 
Chuck Lever
chuck[dot]lever[at]oracle[dot]com





^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 2/4] NFS: Move cl_state_owners and related fields to the nfs_server struct
  2010-12-23 22:49     ` Chuck Lever
@ 2010-12-23 23:27       ` Trond Myklebust
  0 siblings, 0 replies; 11+ messages in thread
From: Trond Myklebust @ 2010-12-23 23:27 UTC (permalink / raw)
  To: Chuck Lever; +Cc: linux-nfs

On Thu, 2010-12-23 at 17:49 -0500, Chuck Lever wrote:
> On Dec 23, 2010, at 5:04 PM, Trond Myklebust wrote:
> 
> > On Thu, 2010-12-23 at 16:54 -0500, Chuck Lever wrote:
> >> NFSv4 migration needs to reassociate state owners from the source to
> >> the destination nfs_server data structures.  To make that easier, move
> >> the cl_state_owners field to the nfs_server struct.  cl_openowner_id
> >> and cl_lockowner_id accompany this move, as they are used in
> >> conjunction with cl_state_owners.
> >> 
> >> The cl_lock field in the parent nfs_client continues to protect all
> >> three of these fields.
> >> 
> >> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
> >> ---
> >> 
> >> fs/nfs/nfs4_fs.h          |    2 
> >> fs/nfs/nfs4state.c        |  239 ++++++++++++++++++++++++++++++++-------------
> >> include/linux/nfs_fs_sb.h |    9 +-
> >> 3 files changed, 177 insertions(+), 73 deletions(-)
> >> 
> >> diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
> >> index 7a6eecf..fa5c5d3 100644
> >> --- a/fs/nfs/nfs4_fs.h
> >> +++ b/fs/nfs/nfs4_fs.h
> >> @@ -109,7 +109,7 @@ struct nfs_unique_id {
> >> struct nfs4_state_owner {
> >> 	struct nfs_unique_id so_owner_id;
> >> 	struct nfs_server    *so_server;
> >> -	struct rb_node	     so_client_node;
> >> +	struct rb_node	     so_server_node;
> >> 
> >> 	struct rpc_cred	     *so_cred;	 /* Associated cred */
> >> 
> >> diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
> >> index f575a31..fbcfe72 100644
> >> --- a/fs/nfs/nfs4state.c
> >> +++ b/fs/nfs/nfs4state.c
> >> @@ -105,14 +105,17 @@ static void nfs4_clear_machine_cred(struct nfs_client *clp)
> >> 		put_rpccred(cred);
> >> }
> >> 
> >> -struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
> >> +static struct rpc_cred *
> >> +nfs4_get_renew_cred_server_locked(struct nfs_server *server)
> >> {
> >> +	struct rpc_cred *cred = NULL;
> >> 	struct nfs4_state_owner *sp;
> >> 	struct rb_node *pos;
> >> -	struct rpc_cred *cred = NULL;
> >> 
> >> -	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
> >> -		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
> >> +	for (pos = rb_first(&server->state_owners);
> >> +	     pos != NULL;
> >> +	     pos = rb_next(pos)) {
> >> +		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
> >> 		if (list_empty(&sp->so_states))
> >> 			continue;
> >> 		cred = get_rpccred(sp->so_cred);
> >> @@ -121,6 +124,28 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
> >> 	return cred;
> >> }
> >> 
> >> +/**
> >> + * nfs4_get_renew_cred_locked - Acquire credential for a renew operation
> >> + * @clp: client state handle
> >> + *
> >> + * Returns an rpc_cred with reference count bumped, or NULL.
> >> + * Caller must hold clp->cl_lock.
> 
> Here's a case where clp->cl_lock will always be held before we invoke rcu_read_lock().  This is why I tried to take cl_lock first, then invoke rcu_read_lock().  I guess that's not a valid concern.
> 
> >> + */
> >> +struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
> >> +{
> >> +	struct rpc_cred *cred = NULL;
> >> +	struct nfs_server *server;
> >> +
> >> +	rcu_read_lock();
> >> +	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
> >> +		cred = nfs4_get_renew_cred_server_locked(server);
> >> +		if (cred != NULL)
> >> +			break;
> >> +	}
> >> +	rcu_read_unlock();
> >> +	return cred;
> >> +}
> >> +
> >> #if defined(CONFIG_NFS_V4_1)
> >> 
> >> static int nfs41_setup_state_renewal(struct nfs_client *clp)
> >> @@ -210,21 +235,45 @@ struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
> >> 
> >> #endif /* CONFIG_NFS_V4_1 */
> >> 
> >> -struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
> >> +static struct rpc_cred *
> >> +nfs4_get_setclientid_cred_server_locked(struct nfs_server *server)
> >> {
> >> +	struct rpc_cred *cred = NULL;
> >> 	struct nfs4_state_owner *sp;
> >> 	struct rb_node *pos;
> >> +
> >> +	pos = rb_first(&server->state_owners);
> >> +	if (pos != NULL) {
> >> +		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
> >> +		cred = get_rpccred(sp->so_cred);
> >> +	}
> >> +	return cred;
> >> +}
> >> +
> >> +/**
> >> + * nfs4_get_setclientid_cred - Acquire credential for a setclientid operation
> >> + * @clp: client state handle
> >> + *
> >> + * Returns an rpc_cred with reference count bumped, or NULL.
> >> + */
> >> +struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
> >> +{
> >> +	struct nfs_server *server;
> >> 	struct rpc_cred *cred;
> >> 
> >> 	spin_lock(&clp->cl_lock);
> >> 	cred = nfs4_get_machine_cred_locked(clp);
> >> 	if (cred != NULL)
> >> 		goto out;
> >> -	pos = rb_first(&clp->cl_state_owners);
> >> -	if (pos != NULL) {
> >> -		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
> >> -		cred = get_rpccred(sp->so_cred);
> >> +
> >> +	rcu_read_lock();
> > 
> > Here's another rcu_read_lock() nested inside a spin locked section.
> 
> In this case, we're already holding cl_lock in order to call nfs4_get_machine_cred_locked().  Is it worth the extra logic to release the lock and then grab it repeatedly in the loop?

I'd prefer to just see the rcu_read_lock() moved outside the spin lock
in situations like this. Doing so shouldn't make a difference to the
performance, but it looks neater.

There are a couple of comments in code elsewhere that say that you
shouldn't rely on rcu_read_lock() being the same as preempt_disable(),
so I suppose we shouldn't get rid of it altogether.

Cheers
  Trond
-- 
Trond Myklebust
Linux NFS client maintainer

NetApp
Trond.Myklebust@netapp.com
www.netapp.com


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 3/4] NFS: Introduce nfs_detach_delegations()
  2010-12-24  1:32 [PATCH 0/4] Refactor data structures to support NFSv4 migration (2) Chuck Lever
@ 2010-12-24  1:32 ` Chuck Lever
  0 siblings, 0 replies; 11+ messages in thread
From: Chuck Lever @ 2010-12-24  1:32 UTC (permalink / raw)
  To: trond.myklebust; +Cc: linux-nfs

Clean up:  Refactor code that takes clp->cl_lock and calls
nfs_detach_delegations_locked() into its own function.

While we're changing the call sites, get rid of the second parameter
and the logic in nfs_detach_delegations_locked() that uses it, since
callers always set that parameter of nfs_detach_delegations_locked()
to NULL.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/delegation.c |   41 ++++++++++++++++++++---------------------
 1 files changed, 20 insertions(+), 21 deletions(-)

diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 1fd62fc..521d71b 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -175,9 +175,9 @@ static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation
 	return inode;
 }
 
-static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi,
-							   const nfs4_stateid *stateid,
-							   struct nfs_client *clp)
+static struct nfs_delegation *
+nfs_detach_delegation_locked(struct nfs_inode *nfsi,
+			     struct nfs_client *clp)
 {
 	struct nfs_delegation *delegation =
 		rcu_dereference_protected(nfsi->delegation,
@@ -185,22 +185,29 @@ static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfs
 
 	if (delegation == NULL)
 		goto nomatch;
+
 	spin_lock(&delegation->lock);
-	if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
-				sizeof(delegation->stateid.data)) != 0)
-		goto nomatch_unlock;
 	list_del_rcu(&delegation->super_list);
 	delegation->inode = NULL;
 	nfsi->delegation_state = 0;
 	rcu_assign_pointer(nfsi->delegation, NULL);
 	spin_unlock(&delegation->lock);
 	return delegation;
-nomatch_unlock:
-	spin_unlock(&delegation->lock);
 nomatch:
 	return NULL;
 }
 
+static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi,
+						    struct nfs_client *clp)
+{
+	struct nfs_delegation *delegation;
+
+	spin_lock(&clp->cl_lock);
+	delegation = nfs_detach_delegation_locked(nfsi, clp);
+	spin_unlock(&clp->cl_lock);
+	return delegation;
+}
+
 /*
  * Set up a delegation on an inode
  */
@@ -246,7 +253,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
 			delegation = NULL;
 			goto out;
 		}
-		freeme = nfs_detach_delegation_locked(nfsi, NULL, clp);
+		freeme = nfs_detach_delegation_locked(nfsi, clp);
 	}
 	list_add_rcu(&delegation->super_list, &clp->cl_delegations);
 	nfsi->delegation_state = delegation->type;
@@ -307,9 +314,7 @@ restart:
 		inode = nfs_delegation_grab_inode(delegation);
 		if (inode == NULL)
 			continue;
-		spin_lock(&clp->cl_lock);
-		delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp);
-		spin_unlock(&clp->cl_lock);
+		delegation = nfs_detach_delegation(NFS_I(inode), clp);
 		rcu_read_unlock();
 		if (delegation != NULL) {
 			filemap_flush(inode->i_mapping);
@@ -338,9 +343,7 @@ void nfs_inode_return_delegation_noreclaim(struct inode *inode)
 	struct nfs_delegation *delegation;
 
 	if (rcu_access_pointer(nfsi->delegation) != NULL) {
-		spin_lock(&clp->cl_lock);
-		delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
-		spin_unlock(&clp->cl_lock);
+		delegation = nfs_detach_delegation(nfsi, clp);
 		if (delegation != NULL)
 			nfs_do_return_delegation(inode, delegation, 0);
 	}
@@ -354,9 +357,7 @@ int nfs_inode_return_delegation(struct inode *inode)
 	int err = 0;
 
 	if (rcu_access_pointer(nfsi->delegation) != NULL) {
-		spin_lock(&clp->cl_lock);
-		delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
-		spin_unlock(&clp->cl_lock);
+		delegation = nfs_detach_delegation(nfsi, clp);
 		if (delegation != NULL) {
 			nfs_wb_all(inode);
 			err = __nfs_inode_return_delegation(inode, delegation, 1);
@@ -530,9 +531,7 @@ restart:
 		inode = nfs_delegation_grab_inode(delegation);
 		if (inode == NULL)
 			continue;
-		spin_lock(&clp->cl_lock);
-		delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp);
-		spin_unlock(&clp->cl_lock);
+		delegation = nfs_detach_delegation(NFS_I(inode), clp);
 		rcu_read_unlock();
 		if (delegation != NULL)
 			nfs_free_delegation(delegation);


^ permalink raw reply related	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2010-12-24  1:32 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-12-23 21:54 [PATCH 0/4] Refactor data structures to support NFSv4 migration Chuck Lever
2010-12-23 21:54 ` [PATCH 1/4] NFS: Allow walking nfs_client.cl_superblocks list outside client.c Chuck Lever
2010-12-23 21:54 ` [PATCH 2/4] NFS: Move cl_state_owners and related fields to the nfs_server struct Chuck Lever
2010-12-23 22:04   ` Trond Myklebust
2010-12-23 22:49     ` Chuck Lever
2010-12-23 23:27       ` Trond Myklebust
2010-12-23 21:54 ` [PATCH 3/4] NFS: Introduce nfs_detach_delegations() Chuck Lever
2010-12-23 21:54 ` [PATCH 4/4] NFS: Move cl_delegations to the nfs_server struct Chuck Lever
2010-12-23 22:09   ` Trond Myklebust
     [not found]     ` <1293142182.9718.3.camel-rJ7iovZKK19ZJLDQqaL3InhyD016LWXt@public.gmane.org>
2010-12-23 22:14       ` Chuck Lever
2010-12-24  1:32 [PATCH 0/4] Refactor data structures to support NFSv4 migration (2) Chuck Lever
2010-12-24  1:32 ` [PATCH 3/4] NFS: Introduce nfs_detach_delegations() Chuck Lever

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).