All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 01/20] NFS: Fix comment misspelling in struct nfs_client definition
@ 2012-04-23 20:53 Chuck Lever
  2012-04-23 20:53 ` [PATCH 02/20] NFS: Use proper naming conventions for NFSv4.1 server scope fields Chuck Lever
                   ` (18 more replies)
  0 siblings, 19 replies; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 20:53 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: linux-nfs

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 include/linux/nfs_fs_sb.h |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 7073fc7..5498e9d 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -79,7 +79,7 @@ struct nfs_client {
 	u32			cl_seqid;
 	/* The flags used for obtaining the clientid during EXCHANGE_ID */
 	u32			cl_exchange_flags;
-	struct nfs4_session	*cl_session; 	/* sharred session */
+	struct nfs4_session	*cl_session;	/* shared session */
 #endif /* CONFIG_NFS_V4 */
 
 #ifdef CONFIG_NFS_FSCACHE


^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 02/20] NFS: Use proper naming conventions for NFSv4.1 server scope fields
  2012-04-23 20:53 [PATCH 01/20] NFS: Fix comment misspelling in struct nfs_client definition Chuck Lever
@ 2012-04-23 20:53 ` Chuck Lever
  2012-04-23 20:53 ` [PATCH 03/20] NFS: Use proper naming conventions for nfs_client.impl_id field Chuck Lever
                   ` (17 subsequent siblings)
  18 siblings, 0 replies; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 20:53 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: linux-nfs

Clean up:  When naming fields and data types, follow established
conventions to facilitate accurate grep/cscope searches.

Additionally, for consistency, move the scope field into the NFSv4-
specific part of the nfs_client, and free that memory in the logic
that shuts down NFSv4 nfs_clients.

Introduced by commit 99fe60d0 "nfs41: exchange_id operation", April
1 2009.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/client.c           |    2 +-
 fs/nfs/nfs4_fs.h          |    2 +-
 fs/nfs/nfs4proc.c         |   18 ++++++++++--------
 include/linux/nfs_fs_sb.h |    4 ++--
 include/linux/nfs_xdr.h   |    4 ++--
 5 files changed, 16 insertions(+), 14 deletions(-)

diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index da7b5e4..60e1525 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -235,6 +235,7 @@ static void nfs4_shutdown_client(struct nfs_client *clp)
 		nfs_idmap_delete(clp);
 
 	rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
+	kfree(clp->cl_serverscope);
 }
 
 /* idr_remove_all is not needed as all id's are removed by nfs_put_client */
@@ -303,7 +304,6 @@ static void nfs_free_client(struct nfs_client *clp)
 
 	put_net(clp->net);
 	kfree(clp->cl_hostname);
-	kfree(clp->server_scope);
 	kfree(clp->impl_id);
 	kfree(clp);
 
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 97ecc86..c294c41 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -327,7 +327,7 @@ extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs
 extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
 extern void nfs41_handle_recall_slot(struct nfs_client *clp);
 extern void nfs41_handle_server_scope(struct nfs_client *,
-				      struct server_scope **);
+				      struct nfs41_server_scope **);
 extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
 extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
 extern void nfs4_select_rw_stateid(nfs4_stateid *, struct nfs4_state *,
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index f82bde0..669b181 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -4987,7 +4987,8 @@ out_inval:
 }
 
 static bool
-nfs41_same_server_scope(struct server_scope *a, struct server_scope *b)
+nfs41_same_server_scope(struct nfs41_server_scope *a,
+			struct nfs41_server_scope *b)
 {
 	if (a->server_scope_sz == b->server_scope_sz &&
 	    memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
@@ -5035,7 +5036,8 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
 				init_utsname()->domainname,
 				clp->cl_rpcclient->cl_auth->au_flavor);
 
-	res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL);
+	res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
+					GFP_KERNEL);
 	if (unlikely(!res.server_scope)) {
 		status = -ENOMEM;
 		goto out;
@@ -5059,18 +5061,18 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
 		kfree(res.impl_id);
 
 	if (!status) {
-		if (clp->server_scope &&
-		    !nfs41_same_server_scope(clp->server_scope,
+		if (clp->cl_serverscope &&
+		    !nfs41_same_server_scope(clp->cl_serverscope,
 					     res.server_scope)) {
 			dprintk("%s: server_scope mismatch detected\n",
 				__func__);
 			set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
-			kfree(clp->server_scope);
-			clp->server_scope = NULL;
+			kfree(clp->cl_serverscope);
+			clp->cl_serverscope = NULL;
 		}
 
-		if (!clp->server_scope) {
-			clp->server_scope = res.server_scope;
+		if (!clp->cl_serverscope) {
+			clp->cl_serverscope = res.server_scope;
 			goto out;
 		}
 	}
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 5498e9d..900d733 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -17,7 +17,7 @@ struct nfs4_sequence_args;
 struct nfs4_sequence_res;
 struct nfs_server;
 struct nfs4_minor_version_ops;
-struct server_scope;
+struct nfs41_server_scope;
 struct nfs41_impl_id;
 
 /*
@@ -80,13 +80,13 @@ struct nfs_client {
 	/* The flags used for obtaining the clientid during EXCHANGE_ID */
 	u32			cl_exchange_flags;
 	struct nfs4_session	*cl_session;	/* shared session */
+	struct nfs41_server_scope *cl_serverscope;
 #endif /* CONFIG_NFS_V4 */
 
 #ifdef CONFIG_NFS_FSCACHE
 	struct fscache_cookie	*fscache;	/* client index cache cookie */
 #endif
 
-	struct server_scope	*server_scope;	/* from exchange_id */
 	struct nfs41_impl_id	*impl_id;	/* from exchange_id */
 	struct net		*net;
 };
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index bfd0d1b..eb1ce73 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1071,7 +1071,7 @@ struct server_owner {
 	char				major_id[NFS4_OPAQUE_LIMIT];
 };
 
-struct server_scope {
+struct nfs41_server_scope {
 	uint32_t			server_scope_sz;
 	char 				server_scope[NFS4_OPAQUE_LIMIT];
 };
@@ -1085,7 +1085,7 @@ struct nfs41_impl_id {
 struct nfs41_exchange_id_res {
 	struct nfs_client		*client;
 	u32				flags;
-	struct server_scope		*server_scope;
+	struct nfs41_server_scope	*server_scope;
 	struct nfs41_impl_id		*impl_id;
 };
 


^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 03/20] NFS: Use proper naming conventions for nfs_client.impl_id field
  2012-04-23 20:53 [PATCH 01/20] NFS: Fix comment misspelling in struct nfs_client definition Chuck Lever
  2012-04-23 20:53 ` [PATCH 02/20] NFS: Use proper naming conventions for NFSv4.1 server scope fields Chuck Lever
@ 2012-04-23 20:53 ` Chuck Lever
  2012-04-23 20:53 ` [PATCH 04/20] NFS: Use proper naming conventions for the nfs_client.net field Chuck Lever
                   ` (16 subsequent siblings)
  18 siblings, 0 replies; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 20:53 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: linux-nfs

Clean up:  When naming fields and data types, follow established
conventions to facilitate accurate grep/cscope searches.

Additionally, for consistency, move the impl_id field into the NFSv4-
specific part of the nfs_client, and free that memory in the logic
that shuts down NFSv4 nfs_clients.

Introduced by commit 7d2ed9ac "NFSv4: parse and display server
implementation ids," Fri Feb 17, 2012.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/client.c           |    2 +-
 fs/nfs/nfs4proc.c         |   12 ++++++------
 fs/nfs/super.c            |    4 ++--
 include/linux/nfs_fs_sb.h |    2 +-
 4 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 60e1525..654150a 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -236,6 +236,7 @@ static void nfs4_shutdown_client(struct nfs_client *clp)
 
 	rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
 	kfree(clp->cl_serverscope);
+	kfree(clp->cl_implid);
 }
 
 /* idr_remove_all is not needed as all id's are removed by nfs_put_client */
@@ -304,7 +305,6 @@ static void nfs_free_client(struct nfs_client *clp)
 
 	put_net(clp->net);
 	kfree(clp->cl_hostname);
-	kfree(clp->impl_id);
 	kfree(clp);
 
 	dprintk("<-- nfs_free_client()\n");
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 669b181..3778e4f 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -5055,8 +5055,8 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
 
 	if (!status) {
 		/* use the most recent implementation id */
-		kfree(clp->impl_id);
-		clp->impl_id = res.impl_id;
+		kfree(clp->cl_implid);
+		clp->cl_implid = res.impl_id;
 	} else
 		kfree(res.impl_id);
 
@@ -5080,12 +5080,12 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
 out_server_scope:
 	kfree(res.server_scope);
 out:
-	if (clp->impl_id)
+	if (clp->cl_implid)
 		dprintk("%s: Server Implementation ID: "
 			"domain: %s, name: %s, date: %llu,%u\n",
-			__func__, clp->impl_id->domain, clp->impl_id->name,
-			clp->impl_id->date.seconds,
-			clp->impl_id->date.nseconds);
+			__func__, clp->cl_implid->domain, clp->cl_implid->name,
+			clp->cl_implid->date.seconds,
+			clp->cl_implid->date.nseconds);
 	dprintk("<-- %s status= %d\n", __func__, status);
 	return status;
 }
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 37412f7..6decf3c 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -786,8 +786,8 @@ static void show_pnfs(struct seq_file *m, struct nfs_server *server)
 
 static void show_implementation_id(struct seq_file *m, struct nfs_server *nfss)
 {
-	if (nfss->nfs_client && nfss->nfs_client->impl_id) {
-		struct nfs41_impl_id *impl_id = nfss->nfs_client->impl_id;
+	if (nfss->nfs_client && nfss->nfs_client->cl_implid) {
+		struct nfs41_impl_id *impl_id = nfss->nfs_client->cl_implid;
 		seq_printf(m, "\n\timpl_id:\tname='%s',domain='%s',"
 			   "date='%llu,%u'",
 			   impl_id->name, impl_id->domain,
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 900d733..773e021 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -81,13 +81,13 @@ struct nfs_client {
 	u32			cl_exchange_flags;
 	struct nfs4_session	*cl_session;	/* shared session */
 	struct nfs41_server_scope *cl_serverscope;
+	struct nfs41_impl_id	*cl_implid;
 #endif /* CONFIG_NFS_V4 */
 
 #ifdef CONFIG_NFS_FSCACHE
 	struct fscache_cookie	*fscache;	/* client index cache cookie */
 #endif
 
-	struct nfs41_impl_id	*impl_id;	/* from exchange_id */
 	struct net		*net;
 };
 


^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 04/20] NFS: Use proper naming conventions for the nfs_client.net field
  2012-04-23 20:53 [PATCH 01/20] NFS: Fix comment misspelling in struct nfs_client definition Chuck Lever
  2012-04-23 20:53 ` [PATCH 02/20] NFS: Use proper naming conventions for NFSv4.1 server scope fields Chuck Lever
  2012-04-23 20:53 ` [PATCH 03/20] NFS: Use proper naming conventions for nfs_client.impl_id field Chuck Lever
@ 2012-04-23 20:53 ` Chuck Lever
  2012-04-23 20:53 ` [PATCH 05/20] NFS: Clean up return code checking in nfs4_proc_exchange_id() Chuck Lever
                   ` (15 subsequent siblings)
  18 siblings, 0 replies; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 20:53 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: linux-nfs

Clean up:  When naming fields and data types, follow established
conventions to facilitate accurate grep/cscope searches.

Introduced by commit e50a7a1a "NFS: make NFS client allocated per
network namespace context," Tue Jan 10, 2012.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/blocklayout/blocklayoutdev.c |    2 +-
 fs/nfs/client.c                     |   22 +++++++++++-----------
 fs/nfs/idmap.c                      |    4 ++--
 fs/nfs/nfs4filelayoutdev.c          |    2 +-
 include/linux/nfs_fs_sb.h           |    2 +-
 5 files changed, 16 insertions(+), 16 deletions(-)

diff --git a/fs/nfs/blocklayout/blocklayoutdev.c b/fs/nfs/blocklayout/blocklayoutdev.c
index 45aabbf..9d3b642 100644
--- a/fs/nfs/blocklayout/blocklayoutdev.c
+++ b/fs/nfs/blocklayout/blocklayoutdev.c
@@ -123,7 +123,7 @@ nfs4_blk_decode_device(struct nfs_server *server,
 	uint8_t *dataptr;
 	DECLARE_WAITQUEUE(wq, current);
 	int offset, len, i, rc;
-	struct net *net = server->nfs_client->net;
+	struct net *net = server->nfs_client->cl_net;
 	struct nfs_net *nn = net_generic(net, nfs_net_id);
 	struct bl_dev_msg *reply = &nn->bl_mount_reply;
 
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 654150a..c0232aa 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -65,7 +65,7 @@ static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq);
 static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
 {
 	int ret = 0;
-	struct nfs_net *nn = net_generic(clp->net, nfs_net_id);
+	struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
 
 	if (clp->rpc_ops->version != 4 || minorversion != 0)
 		return ret;
@@ -172,7 +172,7 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
 	clp->cl_rpcclient = ERR_PTR(-EINVAL);
 
 	clp->cl_proto = cl_init->proto;
-	clp->net = get_net(cl_init->net);
+	clp->cl_net = get_net(cl_init->net);
 
 #ifdef CONFIG_NFS_V4
 	err = nfs_get_cb_ident_idr(clp, cl_init->minorversion);
@@ -250,7 +250,7 @@ void nfs_cleanup_cb_ident_idr(struct net *net)
 /* nfs_client_lock held */
 static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
 {
-	struct nfs_net *nn = net_generic(clp->net, nfs_net_id);
+	struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
 
 	if (clp->cl_cb_ident)
 		idr_remove(&nn->cb_ident_idr, clp->cl_cb_ident);
@@ -303,7 +303,7 @@ static void nfs_free_client(struct nfs_client *clp)
 	if (clp->cl_machine_cred != NULL)
 		put_rpccred(clp->cl_machine_cred);
 
-	put_net(clp->net);
+	put_net(clp->cl_net);
 	kfree(clp->cl_hostname);
 	kfree(clp);
 
@@ -321,7 +321,7 @@ void nfs_put_client(struct nfs_client *clp)
 		return;
 
 	dprintk("--> nfs_put_client({%d})\n", atomic_read(&clp->cl_count));
-	nn = net_generic(clp->net, nfs_net_id);
+	nn = net_generic(clp->cl_net, nfs_net_id);
 
 	if (atomic_dec_and_lock(&clp->cl_count, &nn->nfs_client_lock)) {
 		list_del(&clp->cl_share_link);
@@ -659,7 +659,7 @@ static int nfs_create_rpc_client(struct nfs_client *clp,
 {
 	struct rpc_clnt		*clnt = NULL;
 	struct rpc_create_args args = {
-		.net		= clp->net,
+		.net		= clp->cl_net,
 		.protocol	= clp->cl_proto,
 		.address	= (struct sockaddr *)&clp->cl_addr,
 		.addrsize	= clp->cl_addrlen,
@@ -713,7 +713,7 @@ static int nfs_start_lockd(struct nfs_server *server)
 		.nfs_version	= clp->rpc_ops->version,
 		.noresvport	= server->flags & NFS_MOUNT_NORESVPORT ?
 					1 : 0,
-		.net		= clp->net,
+		.net		= clp->cl_net,
 	};
 
 	if (nlm_init.nfs_version > 3)
@@ -1048,7 +1048,7 @@ static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_serve
 static void nfs_server_insert_lists(struct nfs_server *server)
 {
 	struct nfs_client *clp = server->nfs_client;
-	struct nfs_net *nn = net_generic(clp->net, nfs_net_id);
+	struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
 
 	spin_lock(&nn->nfs_client_lock);
 	list_add_tail_rcu(&server->client_link, &clp->cl_superblocks);
@@ -1065,7 +1065,7 @@ static void nfs_server_remove_lists(struct nfs_server *server)
 
 	if (clp == NULL)
 		return;
-	nn = net_generic(clp->net, nfs_net_id);
+	nn = net_generic(clp->cl_net, nfs_net_id);
 	spin_lock(&nn->nfs_client_lock);
 	list_del_rcu(&server->client_link);
 	if (list_empty(&clp->cl_superblocks))
@@ -1474,7 +1474,7 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
 		.rpc_ops = &nfs_v4_clientops,
 		.proto = ds_proto,
 		.minorversion = mds_clp->cl_minorversion,
-		.net = mds_clp->net,
+		.net = mds_clp->cl_net,
 	};
 	struct rpc_timeout ds_timeout = {
 		.to_initval = 15 * HZ,
@@ -1701,7 +1701,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
 				rpc_protocol(parent_server->client),
 				parent_server->client->cl_timeout,
 				parent_client->cl_mvops->minor_version,
-				parent_client->net);
+				parent_client->cl_net);
 	if (error < 0)
 		goto error;
 
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index b7f348b..f349eca 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -415,7 +415,7 @@ static int __nfs_idmap_register(struct dentry *dir,
 static void nfs_idmap_unregister(struct nfs_client *clp,
 				      struct rpc_pipe *pipe)
 {
-	struct net *net = clp->net;
+	struct net *net = clp->cl_net;
 	struct super_block *pipefs_sb;
 
 	pipefs_sb = rpc_get_sb_net(net);
@@ -429,7 +429,7 @@ static int nfs_idmap_register(struct nfs_client *clp,
 				   struct idmap *idmap,
 				   struct rpc_pipe *pipe)
 {
-	struct net *net = clp->net;
+	struct net *net = clp->cl_net;
 	struct super_block *pipefs_sb;
 	int err = 0;
 
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index 9b7696b..9f7c43a 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -602,7 +602,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags)
 
 		mp_count = be32_to_cpup(p); /* multipath count */
 		for (j = 0; j < mp_count; j++) {
-			da = decode_ds_addr(NFS_SERVER(ino)->nfs_client->net,
+			da = decode_ds_addr(NFS_SERVER(ino)->nfs_client->cl_net,
 					    &stream, gfp_flags);
 			if (da)
 				list_add_tail(&da->da_node, &dsaddrs);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 773e021..59410b3 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -88,7 +88,7 @@ struct nfs_client {
 	struct fscache_cookie	*fscache;	/* client index cache cookie */
 #endif
 
-	struct net		*net;
+	struct net		*cl_net;
 };
 
 /*


^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 05/20] NFS: Clean up return code checking in nfs4_proc_exchange_id()
  2012-04-23 20:53 [PATCH 01/20] NFS: Fix comment misspelling in struct nfs_client definition Chuck Lever
                   ` (2 preceding siblings ...)
  2012-04-23 20:53 ` [PATCH 04/20] NFS: Use proper naming conventions for the nfs_client.net field Chuck Lever
@ 2012-04-23 20:53 ` Chuck Lever
  2012-04-23 21:07   ` Myklebust, Trond
  2012-04-23 20:54 ` [PATCH 06/20] NFS: Remove nfs_unique_id Chuck Lever
                   ` (14 subsequent siblings)
  18 siblings, 1 reply; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 20:53 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: linux-nfs

Clean up: prefer using the proper types in "if" expressions.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/nfs4proc.c |   16 ++++++++--------
 1 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 3778e4f..0af657d 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -5038,30 +5038,30 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
 
 	res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
 					GFP_KERNEL);
-	if (unlikely(!res.server_scope)) {
+	if (unlikely(res.server_scope == NULL)) {
 		status = -ENOMEM;
 		goto out;
 	}
 
 	res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_KERNEL);
-	if (unlikely(!res.impl_id)) {
+	if (unlikely(res.impl_id == NULL)) {
 		status = -ENOMEM;
 		goto out_server_scope;
 	}
 
 	status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
-	if (!status)
+	if (status == NFS4_OK)
 		status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
 
-	if (!status) {
+	if (status == NFS4_OK) {
 		/* use the most recent implementation id */
 		kfree(clp->cl_implid);
 		clp->cl_implid = res.impl_id;
 	} else
 		kfree(res.impl_id);
 
-	if (!status) {
-		if (clp->cl_serverscope &&
+	if (status == NFS4_OK) {
+		if (clp->cl_serverscope != NULL &&
 		    !nfs41_same_server_scope(clp->cl_serverscope,
 					     res.server_scope)) {
 			dprintk("%s: server_scope mismatch detected\n",
@@ -5071,7 +5071,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
 			clp->cl_serverscope = NULL;
 		}
 
-		if (!clp->cl_serverscope) {
+		if (clp->cl_serverscope == NULL) {
 			clp->cl_serverscope = res.server_scope;
 			goto out;
 		}
@@ -5080,7 +5080,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
 out_server_scope:
 	kfree(res.server_scope);
 out:
-	if (clp->cl_implid)
+	if (clp->cl_implid != NULL)
 		dprintk("%s: Server Implementation ID: "
 			"domain: %s, name: %s, date: %llu,%u\n",
 			__func__, clp->cl_implid->domain, clp->cl_implid->name,


^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 06/20] NFS: Remove nfs_unique_id
  2012-04-23 20:53 [PATCH 01/20] NFS: Fix comment misspelling in struct nfs_client definition Chuck Lever
                   ` (3 preceding siblings ...)
  2012-04-23 20:53 ` [PATCH 05/20] NFS: Clean up return code checking in nfs4_proc_exchange_id() Chuck Lever
@ 2012-04-23 20:54 ` Chuck Lever
  2012-04-23 20:54 ` [PATCH 07/20] NFS: Don't swap bytes in nfs4_construct_boot_verifier() Chuck Lever
                   ` (13 subsequent siblings)
  18 siblings, 0 replies; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 20:54 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: linux-nfs

Clean up:  this structure is unused.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/nfs4_fs.h |    5 -----
 1 files changed, 0 insertions(+), 5 deletions(-)

diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index c294c41..c87e846 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -52,11 +52,6 @@ struct nfs4_minor_version_ops {
 	const struct nfs4_state_maintenance_ops *state_renewal_ops;
 };
 
-struct nfs_unique_id {
-	struct rb_node rb_node;
-	__u64 id;
-};
-
 #define NFS_SEQID_CONFIRMED 1
 struct nfs_seqid_counter {
 	int owner_id;


^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 07/20] NFS: Don't swap bytes in nfs4_construct_boot_verifier()
  2012-04-23 20:53 [PATCH 01/20] NFS: Fix comment misspelling in struct nfs_client definition Chuck Lever
                   ` (4 preceding siblings ...)
  2012-04-23 20:54 ` [PATCH 06/20] NFS: Remove nfs_unique_id Chuck Lever
@ 2012-04-23 20:54 ` Chuck Lever
  2012-04-23 20:54 ` [PATCH 08/20] NFS: Fix NFSv4 BAD_SEQID recovery Chuck Lever
                   ` (12 subsequent siblings)
  18 siblings, 0 replies; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 20:54 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: linux-nfs

The SETCLIENTID boot verifier is opaque to NFSv4 servers, thus there
is no requirement for byte swapping before the client puts the
verifier on the wire.

This treatment is similar to other timestamp-based verifiers.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/nfs4proc.c |    4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 0af657d..6ae0c14 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3880,8 +3880,8 @@ static void nfs4_construct_boot_verifier(struct nfs_client *clp,
 {
 	__be32 verf[2];
 
-	verf[0] = htonl((u32)clp->cl_boot_time.tv_sec);
-	verf[1] = htonl((u32)clp->cl_boot_time.tv_nsec);
+	verf[0] = (__be32)clp->cl_boot_time.tv_sec;
+	verf[1] = (__be32)clp->cl_boot_time.tv_nsec;
 	memcpy(bootverf->data, verf, sizeof(bootverf->data));
 }
 


^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 08/20] NFS: Fix NFSv4 BAD_SEQID recovery
  2012-04-23 20:53 [PATCH 01/20] NFS: Fix comment misspelling in struct nfs_client definition Chuck Lever
                   ` (5 preceding siblings ...)
  2012-04-23 20:54 ` [PATCH 07/20] NFS: Don't swap bytes in nfs4_construct_boot_verifier() Chuck Lever
@ 2012-04-23 20:54 ` Chuck Lever
  2012-04-23 20:54 ` [PATCH 09/20] NFS: Force server to drop NFSv4 state Chuck Lever
                   ` (11 subsequent siblings)
  18 siblings, 0 replies; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 20:54 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: linux-nfs

The proper response to receiving NFS4ERR_BAD_SEQID from a server is to
try the failing operation again with a different (new) open owner that
has a reset sequence ID.

Our client drops a state owner when it receives NFS4ERR_BAD_SEQID,
as it should.  But chances are good we will pick up exactly the same
open owner ID from the nfs_server's ida struct and use it for the
retry, which is not really useful.

To fix this, add a uniquifier to the open owners generated by our NFS
client.  The per-nfs_server ida struct will continue to guarantee we
don't hand out the same open owner more than once concurrently, while
the new uniquifier guarantees that the open owner string is always
different after a NFS4ERR_BAD_SEQID error.

The current value of the uniqifier is planted in a state owner upon
its creation.  If our client receives NFS4ERR_BAD_SEQID, it bumps the
uniquifier and drops the state owner.  The next state owner to be
created will have a fresh uniquifier, and thus should appear different
to the server.  State owners that continue to be cached on the client
remain unchanged.

A certain amount of code clean up was also done.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/nfs4_fs.h          |    1 +
 fs/nfs/nfs4proc.c         |   21 +++++++++++----------
 fs/nfs/nfs4state.c        |   16 +++++++++++-----
 fs/nfs/nfs4xdr.c          |   15 +++++++--------
 include/linux/nfs_fs_sb.h |    1 +
 include/linux/nfs_xdr.h   |    6 ++++--
 6 files changed, 35 insertions(+), 25 deletions(-)

diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index c87e846..d8c2d39 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -55,6 +55,7 @@ struct nfs4_minor_version_ops {
 #define NFS_SEQID_CONFIRMED 1
 struct nfs_seqid_counter {
 	int owner_id;
+	int instance;
 	int flags;
 	u32 counter;
 	spinlock_t lock;		/* Protects the list */
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 6ae0c14..84a26d9 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -838,7 +838,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
 	p->o_arg.open_flags = flags;
 	p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
 	p->o_arg.clientid = server->nfs_client->cl_clientid;
-	p->o_arg.id = sp->so_seqid.owner_id;
+	p->o_arg.owner_id = sp->so_seqid.owner_id;
+	p->o_arg.instance = sp->so_seqid.instance;
 	p->o_arg.name = &dentry->d_name;
 	p->o_arg.server = server;
 	p->o_arg.bitmask = server->attr_bitmask;
@@ -1467,7 +1468,8 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
 		rcu_read_unlock();
 	}
 	/* Update sequence id. */
-	data->o_arg.id = sp->so_seqid.owner_id;
+	data->o_arg.owner_id = sp->so_seqid.owner_id;
+	data->o_arg.instance = sp->so_seqid.instance;
 	data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
 	if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
@@ -1871,13 +1873,9 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry,
 		 * If we receive a BAD_SEQID error in the particular case of
 		 * doing an OPEN, we assume that nfs_increment_open_seqid() will
 		 * have unhashed the old state_owner for us, and that we can
-		 * therefore safely retry using a new one. We should still warn
-		 * the user though...
+		 * therefore safely retry using a new one.
 		 */
 		if (status == -NFS4ERR_BAD_SEQID) {
-			pr_warn_ratelimited("NFS: v4 server %s "
-					" returned a bad sequence-id error!\n",
-					NFS_SERVER(dir)->nfs_client->cl_hostname);
 			exception.retry = 1;
 			continue;
 		}
@@ -4137,7 +4135,8 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
 	if (status != 0)
 		goto out;
 	lsp = request->fl_u.nfs4_fl.owner;
-	arg.lock_owner.id = lsp->ls_seqid.owner_id;
+	arg.lock_owner.owner_id = lsp->ls_seqid.owner_id;
+	arg.lock_owner.instance = lsp->ls_seqid.instance;
 	arg.lock_owner.s_dev = server->s_dev;
 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
 	switch (status) {
@@ -4382,7 +4381,8 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
 		goto out_free_seqid;
 	p->arg.lock_stateid = &lsp->ls_stateid;
 	p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
-	p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
+	p->arg.lock_owner.owner_id = lsp->ls_seqid.owner_id;
+	p->arg.lock_owner.instance = lsp->ls_seqid.instance;
 	p->arg.lock_owner.s_dev = server->s_dev;
 	p->res.lock_seqid = p->arg.lock_seqid;
 	p->lsp = lsp;
@@ -4832,7 +4832,8 @@ int nfs4_release_lockowner(struct nfs4_lock_state *lsp)
 	data->lsp = lsp;
 	data->server = server;
 	data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
-	data->args.lock_owner.id = lsp->ls_seqid.owner_id;
+	data->args.lock_owner.owner_id = lsp->ls_seqid.owner_id;
+	data->args.lock_owner.instance = lsp->ls_seqid.instance;
 	data->args.lock_owner.s_dev = server->s_dev;
 	msg.rpc_argp = &data->args;
 	rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 0f43414..cbef366 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -503,6 +503,7 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
 		if (ida_pre_get(&server->openowner_id, gfp_flags) == 0)
 			break;
 		spin_lock(&clp->cl_lock);
+		new->so_seqid.instance = server->bad_seqid_count;
 		sp = nfs4_insert_state_owner_locked(new);
 		spin_unlock(&clp->cl_lock);
 	} while (sp == ERR_PTR(-EAGAIN));
@@ -763,6 +764,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
 {
 	struct nfs4_lock_state *lsp;
 	struct nfs_server *server = state->owner->so_server;
+	struct nfs_client *clp = server->nfs_client;
 
 	lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
 	if (lsp == NULL)
@@ -784,6 +786,9 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
 	lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id, 0, 0, GFP_NOFS);
 	if (lsp->ls_seqid.owner_id < 0)
 		goto out_free;
+	spin_lock(&clp->cl_lock);
+	lsp->ls_seqid.instance = server->bad_seqid_count;
+	spin_unlock(&clp->cl_lock);
 	INIT_LIST_HEAD(&lsp->ls_locks);
 	return lsp;
 out_free:
@@ -989,10 +994,6 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
 		case -NFS4ERR_BAD_SEQID:
 			if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
 				return;
-			pr_warn_ratelimited("NFS: v4 server returned a bad"
-					" sequence-id error on an"
-					" unconfirmed sequence %p!\n",
-					seqid->sequence);
 		case -NFS4ERR_STALE_CLIENTID:
 		case -NFS4ERR_STALE_STATEID:
 		case -NFS4ERR_BAD_STATEID:
@@ -1015,8 +1016,13 @@ void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
 					struct nfs4_state_owner, so_seqid);
 	struct nfs_server *server = sp->so_server;
 
-	if (status == -NFS4ERR_BAD_SEQID)
+	if (status == -NFS4ERR_BAD_SEQID) {
+		struct nfs_client *clp = server->nfs_client;
+		spin_lock(&clp->cl_lock);
+		++server->bad_seqid_count;
+		spin_unlock(&clp->cl_lock);
 		nfs4_drop_state_owner(sp);
+	}
 	if (!nfs4_has_session(server->nfs_client))
 		nfs_increment_seqid(status, seqid);
 }
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index c74fdb1..9fce30f 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1242,8 +1242,9 @@ static void encode_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lo
 	p = xdr_encode_hyper(p, lowner->clientid);
 	*p++ = cpu_to_be32(20);
 	p = xdr_encode_opaque_fixed(p, "lock id:", 8);
-	*p++ = cpu_to_be32(lowner->s_dev);
-	xdr_encode_hyper(p, lowner->id);
+	*p++ = (__be32)lowner->s_dev;
+	*p++ = (__be32)lowner->instance;
+	*p++ = (__be32)lowner->owner_id;
 }
 
 /*
@@ -1334,18 +1335,16 @@ static void encode_share_access(struct xdr_stream *xdr, fmode_t fmode)
 static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_openargs *arg)
 {
 	__be32 *p;
- /*
- * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4,
- * owner 4 = 32
- */
+
 	encode_nfs4_seqid(xdr, arg->seqid);
 	encode_share_access(xdr, arg->fmode);
 	p = reserve_space(xdr, 32);
 	p = xdr_encode_hyper(p, arg->clientid);
 	*p++ = cpu_to_be32(20);
 	p = xdr_encode_opaque_fixed(p, "open id:", 8);
-	*p++ = cpu_to_be32(arg->server->s_dev);
-	xdr_encode_hyper(p, arg->id);
+	*p++ = (__be32)arg->server->s_dev;
+	*p++ = (__be32)arg->instance;
+	*p++ = (__be32)arg->owner_id;
 }
 
 static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg)
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 59410b3..3c38291 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -157,6 +157,7 @@ struct nfs_server {
 
 	/* the following fields are protected by nfs_client->cl_lock */
 	struct rb_root		state_owners;
+	u32			bad_seqid_count;
 #endif
 	struct ida		openowner_id;
 	struct ida		lockowner_id;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index eb1ce73..ece705d 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -321,7 +321,8 @@ struct nfs_openargs {
 	int			open_flags;
 	fmode_t			fmode;
 	__u64                   clientid;
-	__u64                   id;
+	int			owner_id;
+	int			instance;
 	union {
 		struct {
 			struct iattr *  attrs;    /* UNCHECKED, GUARDED */
@@ -395,7 +396,8 @@ struct nfs_closeres {
  *   */
 struct nfs_lowner {
 	__u64			clientid;
-	__u64			id;
+	int			owner_id;
+	int			instance;
 	dev_t			s_dev;
 };
 


^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 09/20] NFS: Force server to drop NFSv4 state
  2012-04-23 20:53 [PATCH 01/20] NFS: Fix comment misspelling in struct nfs_client definition Chuck Lever
                   ` (6 preceding siblings ...)
  2012-04-23 20:54 ` [PATCH 08/20] NFS: Fix NFSv4 BAD_SEQID recovery Chuck Lever
@ 2012-04-23 20:54 ` Chuck Lever
  2012-04-23 21:13   ` Myklebust, Trond
  2012-04-23 20:54 ` [PATCH 10/20] NFS: Always use the same SETCLIENTID boot verifier Chuck Lever
                   ` (10 subsequent siblings)
  18 siblings, 1 reply; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 20:54 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: linux-nfs

A SETCLIENTID boot verifier is nothing more than a boot timestamp.
An NFSv4 server is obligated to wipe all NFSv4 state for an NFS client
when the client presents an updated SETCLIENTID boot verifier.  This
is how servers detect client reboots.

nfs4_reset_all_state() forces a boot verifier refresh to cause a
server to wipe state as part of recovering from a server reporting
that it has revoked some or all of a client's NFSv4 state.  This wipes
the slate for full state recovery.

Soon we want to get rid of the per-nfs_client cl_boot_time field,
however.  Without cl_boot_time, the NFS client will need to find a
different way to force the server to purge the client's NFSv4 state.

Because these verifiers are opaque (ie, the server doesn't know or
care that they are timestamps), we can do this by using the same
trick we use now, but then afterwards establish a fresh client ID
using the old boot verifier again.

Hopefully there are no extra paranoid server implementations that keep
track of the client's boot verifiers and prevent clients from reusing
a previous one.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/nfs4_fs.h   |    1 +
 fs/nfs/nfs4proc.c  |    9 +++++++--
 fs/nfs/nfs4state.c |    7 ++++++-
 3 files changed, 14 insertions(+), 3 deletions(-)

diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index d8c2d39..2953f2c 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -24,6 +24,7 @@ enum nfs4_client_state {
 	NFS4CLNT_RECALL_SLOT,
 	NFS4CLNT_LEASE_CONFIRM,
 	NFS4CLNT_SERVER_SCOPE_MISMATCH,
+	NFS4CLNT_PURGE_STATE,
 };
 
 enum nfs4_session_state {
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 84a26d9..b19cf81 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3878,8 +3878,13 @@ static void nfs4_construct_boot_verifier(struct nfs_client *clp,
 {
 	__be32 verf[2];
 
-	verf[0] = (__be32)clp->cl_boot_time.tv_sec;
-	verf[1] = (__be32)clp->cl_boot_time.tv_nsec;
+	if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
+		verf[0] = (__be32)CURRENT_TIME.tv_sec;
+		verf[1] = (__be32)CURRENT_TIME.tv_nsec;
+	} else {
+		verf[0] = (__be32)clp->cl_boot_time.tv_sec;
+		verf[1] = (__be32)clp->cl_boot_time.tv_nsec;
+	}
 	memcpy(bootverf->data, verf, sizeof(bootverf->data));
 }
 
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index cbef366..7f56502 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1612,7 +1612,7 @@ void nfs41_handle_recall_slot(struct nfs_client *clp)
 static void nfs4_reset_all_state(struct nfs_client *clp)
 {
 	if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
-		clp->cl_boot_time = CURRENT_TIME;
+		set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
 		nfs4_state_start_reclaim_nograce(clp);
 		nfs4_schedule_state_manager(clp);
 	}
@@ -1759,6 +1759,11 @@ static void nfs4_state_manager(struct nfs_client *clp)
 
 	/* Ensure exclusive access to NFSv4 state */
 	do {
+		if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
+			nfs4_reclaim_lease(clp);
+			clear_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
+		}
+
 		if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
 			/* We're going to have to re-establish a clientid */
 			status = nfs4_reclaim_lease(clp);


^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 10/20] NFS: Always use the same SETCLIENTID boot verifier
  2012-04-23 20:53 [PATCH 01/20] NFS: Fix comment misspelling in struct nfs_client definition Chuck Lever
                   ` (7 preceding siblings ...)
  2012-04-23 20:54 ` [PATCH 09/20] NFS: Force server to drop NFSv4 state Chuck Lever
@ 2012-04-23 20:54 ` Chuck Lever
  2012-04-23 20:54 ` [PATCH 11/20] NFS: Refactor nfs_get_client(): add nfs_found_client() Chuck Lever
                   ` (9 subsequent siblings)
  18 siblings, 0 replies; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 20:54 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: linux-nfs

Currently our NFS client assigns a unique SETCLIENTID boot verifier
for each server IP address.  It's set to CURRENT_TIME when a struct
nfs_client for that server is created.

During the SETCLIENTID operation, our client also presents an
nfs_client_id4 string to servers as an identifier on which the server
can hang all of this client's NFSv4 state.  Our client's
nfs_client_id4 string is also unique for each server IP address.

An NFSv4 server is obligated to wipe all NFSv4 state associated with
an nfs_client_id4 string when the client presents this string with a
changed SETCLIENTID boot verifier.

When our client unmounts the last of a server's shares, it destroys
that server's struct nfs_client.  The next time the client mounts that
NFS server, it creates a fresh struct nfs_client with a fresh boot
verifier.  On seeing the fresh verifer, the server wipes any previous
NFSv4 state associated with that nfs_client_id4.

However, NFSv4.1 clients are supposed to present the same
nfs_client_id4 string to all servers.  And, to support Transparent
State Migration, the same nfs_client_id4 string should be presented
to all servers so they recognize that migrated state for this client
belongs with state a server may already have for this client.

If the nfs_client_id4 string is the same but the boot verifier changes
for each server IP address, SETCLIENTIDs from such a client could
result in the server wiping the client's previously obtained lease.

Thus, if our NFS client is going to use a fixed nfs_client_id4 string,
our NFS client should also use a SETCLIENTID boot verifier that does
not change depending on which NFS server is being contacted.

Replace our current per-nfs_client boot verifier with a per-nfs_net
boot verifier.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/client.c           |    2 +-
 fs/nfs/netns.h            |    5 +++++
 fs/nfs/nfs4proc.c         |   14 ++++++++------
 fs/nfs/nfs4xdr.c          |    5 ++++-
 include/linux/nfs_fs_sb.h |    3 ---
 5 files changed, 18 insertions(+), 11 deletions(-)

diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index c0232aa..3ba6c62 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -182,7 +182,6 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
 	spin_lock_init(&clp->cl_lock);
 	INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
 	rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
-	clp->cl_boot_time = CURRENT_TIME;
 	clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
 	clp->cl_minorversion = cl_init->minorversion;
 	clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
@@ -1804,6 +1803,7 @@ void nfs_clients_init(struct net *net)
 	idr_init(&nn->cb_ident_idr);
 #endif
 	spin_lock_init(&nn->nfs_client_lock);
+	nn->boot_time = CURRENT_TIME;
 }
 
 #ifdef CONFIG_PROC_FS
diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h
index aa14ec3..8a6394e 100644
--- a/fs/nfs/netns.h
+++ b/fs/nfs/netns.h
@@ -1,3 +1,7 @@
+/*
+ * NFS-private data for each "struct net".  Accessed with net_generic().
+ */
+
 #ifndef __NFS_NETNS_H__
 #define __NFS_NETNS_H__
 
@@ -20,6 +24,7 @@ struct nfs_net {
 	struct idr cb_ident_idr; /* Protected by nfs_client_lock */
 #endif
 	spinlock_t nfs_client_lock;
+	struct timespec boot_time;
 };
 
 extern int nfs_net_id;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index b19cf81..8bdc6fd 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -64,6 +64,7 @@
 #include "iostat.h"
 #include "callback.h"
 #include "pnfs.h"
+#include "netns.h"
 
 #define NFSDBG_FACILITY		NFSDBG_PROC
 
@@ -3873,8 +3874,8 @@ wait_on_recovery:
 	return -EAGAIN;
 }
 
-static void nfs4_construct_boot_verifier(struct nfs_client *clp,
-					 nfs4_verifier *bootverf)
+static void nfs4_init_boot_verifier(const struct nfs_client *clp,
+				    nfs4_verifier *bootverf)
 {
 	__be32 verf[2];
 
@@ -3882,8 +3883,9 @@ static void nfs4_construct_boot_verifier(struct nfs_client *clp,
 		verf[0] = (__be32)CURRENT_TIME.tv_sec;
 		verf[1] = (__be32)CURRENT_TIME.tv_nsec;
 	} else {
-		verf[0] = (__be32)clp->cl_boot_time.tv_sec;
-		verf[1] = (__be32)clp->cl_boot_time.tv_nsec;
+		struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
+		verf[0] = (__be32)nn->boot_time.tv_sec;
+		verf[1] = (__be32)nn->boot_time.tv_nsec;
 	}
 	memcpy(bootverf->data, verf, sizeof(bootverf->data));
 }
@@ -3907,7 +3909,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
 	int loop = 0;
 	int status;
 
-	nfs4_construct_boot_verifier(clp, &sc_verifier);
+	nfs4_init_boot_verifier(clp, &sc_verifier);
 
 	for(;;) {
 		rcu_read_lock();
@@ -5033,7 +5035,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
 	dprintk("--> %s\n", __func__);
 	BUG_ON(clp == NULL);
 
-	nfs4_construct_boot_verifier(clp, &verifier);
+	nfs4_init_boot_verifier(clp, &verifier);
 
 	args.id_len = scnprintf(args.id, sizeof(args.id),
 				"%s/%s.%s/%u",
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 9fce30f..312f619 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -53,9 +53,11 @@
 #include <linux/nfs4.h>
 #include <linux/nfs_fs.h>
 #include <linux/nfs_idmap.h>
+
 #include "nfs4_fs.h"
 #include "internal.h"
 #include "pnfs.h"
+#include "netns.h"
 
 #define NFSDBG_FACILITY		NFSDBG_XDR
 
@@ -1724,6 +1726,7 @@ static void encode_create_session(struct xdr_stream *xdr,
 	char machine_name[NFS4_MAX_MACHINE_NAME_LEN];
 	uint32_t len;
 	struct nfs_client *clp = args->client;
+	struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
 	u32 max_resp_sz_cached;
 
 	/*
@@ -1765,7 +1768,7 @@ static void encode_create_session(struct xdr_stream *xdr,
 	*p++ = cpu_to_be32(RPC_AUTH_UNIX);			/* auth_sys */
 
 	/* authsys_parms rfc1831 */
-	*p++ = cpu_to_be32((u32)clp->cl_boot_time.tv_nsec);	/* stamp */
+	*p++ = (__be32)nn->boot_time.tv_nsec;		/* stamp */
 	p = xdr_encode_opaque(p, machine_name, len);
 	*p++ = cpu_to_be32(0);				/* UID */
 	*p++ = cpu_to_be32(0);				/* GID */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 3c38291..b246582 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -61,9 +61,6 @@ struct nfs_client {
 
 	struct rpc_wait_queue	cl_rpcwaitq;
 
-	/* used for the setclientid verifier */
-	struct timespec		cl_boot_time;
-
 	/* idmapper */
 	struct idmap *		cl_idmap;
 


^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 11/20] NFS: Refactor nfs_get_client(): add nfs_found_client()
  2012-04-23 20:53 [PATCH 01/20] NFS: Fix comment misspelling in struct nfs_client definition Chuck Lever
                   ` (8 preceding siblings ...)
  2012-04-23 20:54 ` [PATCH 10/20] NFS: Always use the same SETCLIENTID boot verifier Chuck Lever
@ 2012-04-23 20:54 ` Chuck Lever
  2012-04-23 20:54 ` [PATCH 12/20] NFS: Refactor nfs_get_client(): initialize nfs_client Chuck Lever
                   ` (8 subsequent siblings)
  18 siblings, 0 replies; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 20:54 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: linux-nfs

Clean up: Rationalize the locking in nfs_get_client() for the case
where the target server already appears in the nfs_client_list.

Code that takes and releases nfs_client_lock remains in
nfs_get_client().  The rest of the logic is moved to a separate
function.

No behavior change is expected.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/client.c |   71 ++++++++++++++++++++++++++++++++-----------------------
 1 files changed, 41 insertions(+), 30 deletions(-)

diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 3ba6c62..09b3f55 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -504,6 +504,39 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
 }
 
 /*
+ * Found an existing client.  Make sure it's ready before returning.
+ */
+static struct nfs_client *
+nfs_found_client(const struct nfs_client_initdata *cl_init,
+		 struct nfs_client *clp)
+{
+	int error;
+
+	error = wait_event_killable(nfs_client_active_wq,
+				clp->cl_cons_state < NFS_CS_INITING);
+	if (error < 0) {
+		dprintk("<-- nfs_get_client error waiting for nfs_client %p "
+			"for %s (%d)\n", clp, cl_init->hostname ?: "", error);
+		nfs_put_client(clp);
+		return ERR_PTR(-ERESTARTSYS);
+	}
+
+	if (clp->cl_cons_state < NFS_CS_READY) {
+		error = clp->cl_cons_state;
+		dprintk("<-- nfs_get_client found nfs_client %p for %s, not "
+			"ready (%d)\n", clp, cl_init->hostname ?: "", error);
+		nfs_put_client(clp);
+		return ERR_PTR(error);
+	}
+
+	BUG_ON(clp->cl_cons_state != NFS_CS_READY);
+
+	dprintk("<-- nfs_get_client found nfs_client %p for %s\n",
+		clp, cl_init->hostname ?: "");
+	return clp;
+}
+
+/*
  * Look up a client by IP address and protocol version
  * - creates a new record if one doesn't yet exist
  */
@@ -526,8 +559,12 @@ nfs_get_client(const struct nfs_client_initdata *cl_init,
 		spin_lock(&nn->nfs_client_lock);
 
 		clp = nfs_match_client(cl_init);
-		if (clp)
-			goto found_client;
+		if (clp) {
+			spin_unlock(&nn->nfs_client_lock);
+			if (new)
+				nfs_free_client(new);
+			return nfs_found_client(cl_init, clp);
+		}
 		if (new)
 			goto install_client;
 
@@ -536,7 +573,8 @@ nfs_get_client(const struct nfs_client_initdata *cl_init,
 		new = nfs_alloc_client(cl_init);
 	} while (!IS_ERR(new));
 
-	dprintk("--> nfs_get_client() = %ld [failed]\n", PTR_ERR(new));
+	dprintk("<-- nfs_get_client() Failed to find %s (%ld)\n",
+		cl_init->hostname ?: "", PTR_ERR(new));
 	return new;
 
 	/* install a new client and return with it unready */
@@ -553,33 +591,6 @@ install_client:
 	}
 	dprintk("--> nfs_get_client() = %p [new]\n", clp);
 	return clp;
-
-	/* found an existing client
-	 * - make sure it's ready before returning
-	 */
-found_client:
-	spin_unlock(&nn->nfs_client_lock);
-
-	if (new)
-		nfs_free_client(new);
-
-	error = wait_event_killable(nfs_client_active_wq,
-				clp->cl_cons_state < NFS_CS_INITING);
-	if (error < 0) {
-		nfs_put_client(clp);
-		return ERR_PTR(-ERESTARTSYS);
-	}
-
-	if (clp->cl_cons_state < NFS_CS_READY) {
-		error = clp->cl_cons_state;
-		nfs_put_client(clp);
-		return ERR_PTR(error);
-	}
-
-	BUG_ON(clp->cl_cons_state != NFS_CS_READY);
-
-	dprintk("--> nfs_get_client() = %p [share]\n", clp);
-	return clp;
 }
 
 /*


^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 12/20] NFS: Refactor nfs_get_client(): initialize nfs_client
  2012-04-23 20:53 [PATCH 01/20] NFS: Fix comment misspelling in struct nfs_client definition Chuck Lever
                   ` (9 preceding siblings ...)
  2012-04-23 20:54 ` [PATCH 11/20] NFS: Refactor nfs_get_client(): add nfs_found_client() Chuck Lever
@ 2012-04-23 20:54 ` Chuck Lever
  2012-04-23 20:55 ` [PATCH 13/20] NFS: Fix recovery from NFS4ERR_CLID_INUSE Chuck Lever
                   ` (7 subsequent siblings)
  18 siblings, 0 replies; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 20:54 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: linux-nfs

Clean up: Continue to rationalize the locking in nfs_get_client() by
moving the logic that handles the case where a matching server IP
address is not found.

When we support server trunking detection, client initialization may
return a different nfs_client struct than was passed to it.  Change
the synopsis of the init_client methods to return an nfs_client.

The client initialization logic in nfs_get_client() is not much more
than a wrapper around ->init_client.  It's simpler to keep the little
bits of error handling in the version-specific init_client methods.

No behavior change is expected.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/client.c         |   52 ++++++++++++++++++++---------------------------
 fs/nfs/internal.h       |   19 +++++++++--------
 include/linux/nfs_xdr.h |    3 ++-
 3 files changed, 34 insertions(+), 40 deletions(-)

diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 09b3f55..d62f7e4 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -548,7 +548,6 @@ nfs_get_client(const struct nfs_client_initdata *cl_init,
 	       int noresvport)
 {
 	struct nfs_client *clp, *new = NULL;
-	int error;
 	struct nfs_net *nn = net_generic(cl_init->net, nfs_net_id);
 
 	dprintk("--> nfs_get_client(%s,v%u)\n",
@@ -565,8 +564,13 @@ nfs_get_client(const struct nfs_client_initdata *cl_init,
 				nfs_free_client(new);
 			return nfs_found_client(cl_init, clp);
 		}
-		if (new)
-			goto install_client;
+		if (new) {
+			list_add(&new->cl_share_link, &nn->nfs_client_list);
+			spin_unlock(&nn->nfs_client_lock);
+			return cl_init->rpc_ops->init_client(new,
+						timeparms, ip_addr,
+						authflavour, noresvport);
+		}
 
 		spin_unlock(&nn->nfs_client_lock);
 
@@ -576,21 +580,6 @@ nfs_get_client(const struct nfs_client_initdata *cl_init,
 	dprintk("<-- nfs_get_client() Failed to find %s (%ld)\n",
 		cl_init->hostname ?: "", PTR_ERR(new));
 	return new;
-
-	/* install a new client and return with it unready */
-install_client:
-	clp = new;
-	list_add(&clp->cl_share_link, &nn->nfs_client_list);
-	spin_unlock(&nn->nfs_client_lock);
-
-	error = cl_init->rpc_ops->init_client(clp, timeparms, ip_addr,
-					      authflavour, noresvport);
-	if (error < 0) {
-		nfs_put_client(clp);
-		return ERR_PTR(error);
-	}
-	dprintk("--> nfs_get_client() = %p [new]\n", clp);
-	return clp;
 }
 
 /*
@@ -818,7 +807,8 @@ static int nfs_init_server_rpcclient(struct nfs_server *server,
 /*
  * Initialise an NFS2 or NFS3 client
  */
-int nfs_init_client(struct nfs_client *clp, const struct rpc_timeout *timeparms,
+struct nfs_client *nfs_init_client(struct nfs_client *clp,
+		    const struct rpc_timeout *timeparms,
 		    const char *ip_addr, rpc_authflavor_t authflavour,
 		    int noresvport)
 {
@@ -827,7 +817,7 @@ int nfs_init_client(struct nfs_client *clp, const struct rpc_timeout *timeparms,
 	if (clp->cl_cons_state == NFS_CS_READY) {
 		/* the client is already initialised */
 		dprintk("<-- nfs_init_client() = 0 [already %p]\n", clp);
-		return 0;
+		return clp;
 	}
 
 	/*
@@ -839,12 +829,13 @@ int nfs_init_client(struct nfs_client *clp, const struct rpc_timeout *timeparms,
 	if (error < 0)
 		goto error;
 	nfs_mark_client_ready(clp, NFS_CS_READY);
-	return 0;
+	return clp;
 
 error:
 	nfs_mark_client_ready(clp, error);
+	nfs_put_client(clp);
 	dprintk("<-- nfs_init_client() = xerror %d\n", error);
-	return error;
+	return ERR_PTR(error);
 }
 
 /*
@@ -1353,11 +1344,11 @@ static int nfs4_init_client_minor_version(struct nfs_client *clp)
 /*
  * Initialise an NFS4 client record
  */
-int nfs4_init_client(struct nfs_client *clp,
-		     const struct rpc_timeout *timeparms,
-		     const char *ip_addr,
-		     rpc_authflavor_t authflavour,
-		     int noresvport)
+struct nfs_client *nfs4_init_client(struct nfs_client *clp,
+				    const struct rpc_timeout *timeparms,
+				    const char *ip_addr,
+				    rpc_authflavor_t authflavour,
+				    int noresvport)
 {
 	char buf[INET6_ADDRSTRLEN + 1];
 	int error;
@@ -1365,7 +1356,7 @@ int nfs4_init_client(struct nfs_client *clp,
 	if (clp->cl_cons_state == NFS_CS_READY) {
 		/* the client is initialised already */
 		dprintk("<-- nfs4_init_client() = 0 [already %p]\n", clp);
-		return 0;
+		return clp;
 	}
 
 	/* Check NFS protocol revision and initialize RPC op vector */
@@ -1405,12 +1396,13 @@ int nfs4_init_client(struct nfs_client *clp,
 
 	if (!nfs4_has_session(clp))
 		nfs_mark_client_ready(clp, NFS_CS_READY);
-	return 0;
+	return clp;
 
 error:
 	nfs_mark_client_ready(clp, error);
+	nfs_put_client(clp);
 	dprintk("<-- nfs4_init_client() = xerror %d\n", error);
-	return error;
+	return ERR_PTR(error);
 }
 
 /*
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index faf15eb..f1021c8 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -154,6 +154,16 @@ extern struct nfs_client *nfs4_find_client_ident(struct net *, int);
 extern struct nfs_client *
 nfs4_find_client_sessionid(struct net *, const struct sockaddr *,
 				struct nfs4_sessionid *);
+extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
+				const struct rpc_timeout *timeparms,
+				const char *ip_addr,
+				rpc_authflavor_t authflavour,
+				int noresvport);
+extern struct nfs_client *nfs4_init_client(struct nfs_client *clp,
+				const struct rpc_timeout *timeparms,
+				const char *ip_addr,
+				rpc_authflavor_t authflavour,
+				int noresvport);
 extern struct nfs_server *nfs_create_server(
 					const struct nfs_parsed_mount_data *,
 					struct nfs_fh *);
@@ -241,10 +251,6 @@ extern int nfs4_init_ds_session(struct nfs_client *clp);
 
 /* proc.c */
 void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
-extern int nfs_init_client(struct nfs_client *clp,
-			   const struct rpc_timeout *timeparms,
-			   const char *ip_addr, rpc_authflavor_t authflavour,
-			   int noresvport);
 
 /* dir.c */
 extern int nfs_access_cache_shrinker(struct shrinker *shrink,
@@ -345,11 +351,6 @@ extern int nfs_migrate_page(struct address_space *,
 /* nfs4proc.c */
 extern void __nfs4_read_done_cb(struct nfs_read_data *);
 extern void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data);
-extern int nfs4_init_client(struct nfs_client *clp,
-			    const struct rpc_timeout *timeparms,
-			    const char *ip_addr,
-			    rpc_authflavor_t authflavour,
-			    int noresvport);
 extern void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data);
 extern int _nfs4_call_sync(struct rpc_clnt *clnt,
 			   struct nfs_server *server,
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index ece705d..7437931 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1284,7 +1284,8 @@ struct nfs_rpc_ops {
 				struct nfs_open_context *ctx,
 				int open_flags,
 				struct iattr *iattr);
-	int	(*init_client) (struct nfs_client *, const struct rpc_timeout *,
+	struct nfs_client *
+		(*init_client) (struct nfs_client *, const struct rpc_timeout *,
 				const char *, rpc_authflavor_t, int);
 	int	(*secinfo)(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
 };


^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 13/20] NFS: Fix recovery from NFS4ERR_CLID_INUSE
  2012-04-23 20:53 [PATCH 01/20] NFS: Fix comment misspelling in struct nfs_client definition Chuck Lever
                   ` (10 preceding siblings ...)
  2012-04-23 20:54 ` [PATCH 12/20] NFS: Refactor nfs_get_client(): initialize nfs_client Chuck Lever
@ 2012-04-23 20:55 ` Chuck Lever
  2012-04-26 16:24   ` Chuck Lever
  2012-04-23 20:55 ` [PATCH 14/20] NFS: Add nfs_client behavior flags Chuck Lever
                   ` (6 subsequent siblings)
  18 siblings, 1 reply; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 20:55 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: linux-nfs

For NFSv4 minor version 0, currently the cl_id_uniquifier allows the
Linux client to generate a unique nfs_client_id4 string whenever a
server replies with NFS4ERR_CLID_INUSE.

NFS4ERR_CLID_INUSE actually means that the client has presented this
nfs_client_id4 string with a different authentication flavor in the
past.  Retrying with a different nfs_client_id4 string means the
client orphans NFSv4 state on the server.  This state will take at
least a whole lease period to be purged.

Change recovery to try the identification operation again with a
different auth flavor until it works.  The retry loop is factored
out of nfs4_proc_setclientid() and into the state manager, so that
both mv0 and mv1 client ID establishment is covered by the same
CLID_INUSE recovery logic.

XXX: On further review, I'm not sure how it would be possible to
send an nfs_client_id4 with the wrong authentication flavor, since
the au_name is part of the string itself...

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/nfs4proc.c         |   75 ++++++++++++++++++++++++++++++---------------
 fs/nfs/nfs4state.c        |   37 ++++++++++++++++++----
 include/linux/nfs_fs_sb.h |    3 +-
 3 files changed, 81 insertions(+), 34 deletions(-)

diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 8bdc6fd..7ec1b68 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3890,6 +3890,37 @@ static void nfs4_init_boot_verifier(const struct nfs_client *clp,
 	memcpy(bootverf->data, verf, sizeof(bootverf->data));
 }
 
+static unsigned int
+nfs4_init_nonuniform_client_string(const struct nfs_client *clp,
+				   char *buf, size_t len)
+{
+	unsigned int result;
+
+	rcu_read_lock();
+	result = scnprintf(buf, len, "%s/%s %s %s non-uniform",
+				clp->cl_ipaddr,
+				rpc_peeraddr2str(clp->cl_rpcclient,
+							RPC_DISPLAY_ADDR),
+				rpc_peeraddr2str(clp->cl_rpcclient,
+							RPC_DISPLAY_PROTO),
+				clp->cl_rpcclient->cl_auth->au_ops->au_name);
+	rcu_read_unlock();
+	return result;
+}
+
+/**
+ * nfs4_proc_setclientid - Negotiate client ID
+ * @clp: state data structure
+ * @program: RPC program for NFSv4 callback service
+ * @port: IP port number for NFS4 callback service
+ * @cred: RPC credential to use for this call
+ * @res: where to place the result
+ *
+ * Returns zero or a negative NFS4ERR status code.
+ *
+ * A status of -NFS4ERR_CLID_INUSE means the caller should try
+ * again with a different authentication flavor.
+ */
 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
 		unsigned short port, struct rpc_cred *cred,
 		struct nfs4_setclientid_res *res)
@@ -3906,41 +3937,30 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
 		.rpc_resp = res,
 		.rpc_cred = cred,
 	};
-	int loop = 0;
 	int status;
 
+	/* Client ID */
 	nfs4_init_boot_verifier(clp, &sc_verifier);
+	setclientid.sc_name_len = nfs4_init_nonuniform_client_string(clp,
+						setclientid.sc_name,
+						sizeof(setclientid.sc_name));
 
-	for(;;) {
-		rcu_read_lock();
-		setclientid.sc_name_len = scnprintf(setclientid.sc_name,
-				sizeof(setclientid.sc_name), "%s/%s %s %s %u",
-				clp->cl_ipaddr,
-				rpc_peeraddr2str(clp->cl_rpcclient,
-							RPC_DISPLAY_ADDR),
-				rpc_peeraddr2str(clp->cl_rpcclient,
-							RPC_DISPLAY_PROTO),
-				clp->cl_rpcclient->cl_auth->au_ops->au_name,
-				clp->cl_id_uniquifier);
-		setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
+	/* Callback info */
+	rcu_read_lock();
+	setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
 				sizeof(setclientid.sc_netid),
 				rpc_peeraddr2str(clp->cl_rpcclient,
 							RPC_DISPLAY_NETID));
-		setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
+	rcu_read_unlock();
+	setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
 				sizeof(setclientid.sc_uaddr), "%s.%u.%u",
 				clp->cl_ipaddr, port >> 8, port & 255);
-		rcu_read_unlock();
 
-		status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
-		if (status != -NFS4ERR_CLID_INUSE)
-			break;
-		if (loop != 0) {
-			++clp->cl_id_uniquifier;
-			break;
-		}
-		++loop;
-		ssleep(clp->cl_lease_time / HZ + 1);
-	}
+	status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+
+	dprintk("%s: nfs_client_id4 '%.*s' (status %d)\n",
+		__func__, setclientid.sc_name_len, setclientid.sc_name,
+		status);
 	return status;
 }
 
@@ -5008,6 +5028,11 @@ nfs41_same_server_scope(struct nfs41_server_scope *a,
 /*
  * nfs4_proc_exchange_id()
  *
+ * Returns zero or a negative NFS4ERR status code.
+ *
+ * A status of -NFS4ERR_CLID_INUSE means the caller should try
+ * again with a different authentication flavor.
+ *
  * Since the clientid has expired, all compounds using sessions
  * associated with the stale clientid will be returning
  * NFS4ERR_BADSESSION in the sequence operation, and will therefore
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 7f56502..6a1a305 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1576,19 +1576,42 @@ static int nfs4_reclaim_lease(struct nfs_client *clp)
 	struct rpc_cred *cred;
 	const struct nfs4_state_recovery_ops *ops =
 		clp->cl_mvops->reboot_recovery_ops;
-	int status = -ENOENT;
+	rpc_authflavor_t flavors[NFS_MAX_SECFLAVORS];
+	int i, len, status;
 
+	i = 0;
+	len = gss_mech_list_pseudoflavors(flavors);
+
+again:
+	status = -ENOENT;
 	cred = ops->get_clid_cred(clp);
 	if (cred != NULL) {
 		status = ops->establish_clid(clp, cred);
 		put_rpccred(cred);
-		/* Handle case where the user hasn't set up machine creds */
-		if (status == -EACCES && cred == clp->cl_machine_cred) {
-			nfs4_clear_machine_cred(clp);
-			status = -EAGAIN;
-		}
-		if (status == -NFS4ERR_MINOR_VERS_MISMATCH)
+		switch (status) {
+		case 0:
+			break;
+		case -EACCES:	/* the user hasn't set up machine creds */
+			if (cred == clp->cl_machine_cred) {
+				nfs4_clear_machine_cred(clp);
+				status = -EAGAIN;
+			}
+			break;
+		case -NFS4ERR_CLID_INUSE:
+		case -NFS4ERR_WRONGSEC:
+			/*
+			 * XXX: "flavors" is unordered; the client should
+			 *	prefer krb5p for this transport
+			 */
+			if (i < len && rpcauth_create(flavors[i++],
+						clp->cl_rpcclient) != NULL)
+				goto again;
+			status = -EPERM;
+			break;
+		case -NFS4ERR_MINOR_VERS_MISMATCH:
 			status = -EPROTONOSUPPORT;
+			break;
+		}
 	}
 	return status;
 }
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index b246582..1c4c174 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -65,10 +65,9 @@ struct nfs_client {
 	struct idmap *		cl_idmap;
 
 	/* Our own IP address, as a null-terminated string.
-	 * This is used to generate the clientid, and the callback address.
+	 * This is used to generate the mv0 callback address.
 	 */
 	char			cl_ipaddr[48];
-	unsigned char		cl_id_uniquifier;
 	u32			cl_cb_ident;	/* v4.0 callback identifier */
 	const struct nfs4_minor_version_ops *cl_mvops;
 


^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 14/20] NFS: Add nfs_client behavior flags
  2012-04-23 20:53 [PATCH 01/20] NFS: Fix comment misspelling in struct nfs_client definition Chuck Lever
                   ` (11 preceding siblings ...)
  2012-04-23 20:55 ` [PATCH 13/20] NFS: Fix recovery from NFS4ERR_CLID_INUSE Chuck Lever
@ 2012-04-23 20:55 ` Chuck Lever
  2012-04-23 20:55 ` [PATCH 15/20] NFS: Introduce "migration" mount option Chuck Lever
                   ` (5 subsequent siblings)
  18 siblings, 0 replies; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 20:55 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: linux-nfs

"noresvport" and "discrtry" can be passed to nfs_create_rpc_client()
by setting flags in the passed-in nfs_client.  This change makes it
easy to add new flags.

Note that these settings are now "sticky" over the lifetime of a
struct nfs_client, and may even be copied when an nfs_client is
cloned.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/client.c           |   40 ++++++++++++++++++++--------------------
 fs/nfs/internal.h         |    6 ++----
 include/linux/nfs_fs_sb.h |    3 +++
 include/linux/nfs_xdr.h   |    2 +-
 4 files changed, 26 insertions(+), 25 deletions(-)

diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index d62f7e4..76dec6a 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -129,6 +129,7 @@ const struct rpc_program nfsacl_program = {
 #endif  /* CONFIG_NFS_V3_ACL */
 
 struct nfs_client_initdata {
+	unsigned long init_flags;
 	const char *hostname;
 	const struct sockaddr *addr;
 	size_t addrlen;
@@ -544,8 +545,7 @@ static struct nfs_client *
 nfs_get_client(const struct nfs_client_initdata *cl_init,
 	       const struct rpc_timeout *timeparms,
 	       const char *ip_addr,
-	       rpc_authflavor_t authflavour,
-	       int noresvport)
+	       rpc_authflavor_t authflavour)
 {
 	struct nfs_client *clp, *new = NULL;
 	struct nfs_net *nn = net_generic(cl_init->net, nfs_net_id);
@@ -567,9 +567,10 @@ nfs_get_client(const struct nfs_client_initdata *cl_init,
 		if (new) {
 			list_add(&new->cl_share_link, &nn->nfs_client_list);
 			spin_unlock(&nn->nfs_client_lock);
+			new->cl_flags = cl_init->init_flags;
 			return cl_init->rpc_ops->init_client(new,
 						timeparms, ip_addr,
-						authflavour, noresvport);
+						authflavour);
 		}
 
 		spin_unlock(&nn->nfs_client_lock);
@@ -653,8 +654,7 @@ static void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
  */
 static int nfs_create_rpc_client(struct nfs_client *clp,
 				 const struct rpc_timeout *timeparms,
-				 rpc_authflavor_t flavor,
-				 int discrtry, int noresvport)
+				 rpc_authflavor_t flavor)
 {
 	struct rpc_clnt		*clnt = NULL;
 	struct rpc_create_args args = {
@@ -669,9 +669,9 @@ static int nfs_create_rpc_client(struct nfs_client *clp,
 		.authflavor	= flavor,
 	};
 
-	if (discrtry)
+	if (test_bit(NFS_CS_DISCRTRY, &clp->cl_flags))
 		args.flags |= RPC_CLNT_CREATE_DISCRTRY;
-	if (noresvport)
+	if (test_bit(NFS_CS_NORESVPORT, &clp->cl_flags))
 		args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
 
 	if (!IS_ERR(clp->cl_rpcclient))
@@ -809,8 +809,7 @@ static int nfs_init_server_rpcclient(struct nfs_server *server,
  */
 struct nfs_client *nfs_init_client(struct nfs_client *clp,
 		    const struct rpc_timeout *timeparms,
-		    const char *ip_addr, rpc_authflavor_t authflavour,
-		    int noresvport)
+		    const char *ip_addr, rpc_authflavor_t authflavour)
 {
 	int error;
 
@@ -824,8 +823,7 @@ struct nfs_client *nfs_init_client(struct nfs_client *clp,
 	 * Create a client RPC handle for doing FSSTAT with UNIX auth only
 	 * - RFC 2623, sec 2.3.2
 	 */
-	error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX,
-				      0, noresvport);
+	error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX);
 	if (error < 0)
 		goto error;
 	nfs_mark_client_ready(clp, NFS_CS_READY);
@@ -865,10 +863,11 @@ static int nfs_init_server(struct nfs_server *server,
 
 	nfs_init_timeout_values(&timeparms, data->nfs_server.protocol,
 			data->timeo, data->retrans);
+	if (data->flags & NFS_MOUNT_NORESVPORT)
+		set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
 
 	/* Allocate or find a client reference we can use */
-	clp = nfs_get_client(&cl_init, &timeparms, NULL, RPC_AUTH_UNIX,
-			     data->flags & NFS_MOUNT_NORESVPORT);
+	clp = nfs_get_client(&cl_init, &timeparms, NULL, RPC_AUTH_UNIX);
 	if (IS_ERR(clp)) {
 		dprintk("<-- nfs_init_server() = error %ld\n", PTR_ERR(clp));
 		return PTR_ERR(clp);
@@ -1347,8 +1346,7 @@ static int nfs4_init_client_minor_version(struct nfs_client *clp)
 struct nfs_client *nfs4_init_client(struct nfs_client *clp,
 				    const struct rpc_timeout *timeparms,
 				    const char *ip_addr,
-				    rpc_authflavor_t authflavour,
-				    int noresvport)
+				    rpc_authflavor_t authflavour)
 {
 	char buf[INET6_ADDRSTRLEN + 1];
 	int error;
@@ -1362,8 +1360,8 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
 	/* Check NFS protocol revision and initialize RPC op vector */
 	clp->rpc_ops = &nfs_v4_clientops;
 
-	error = nfs_create_rpc_client(clp, timeparms, authflavour,
-				      1, noresvport);
+	__set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
+	error = nfs_create_rpc_client(clp, timeparms, authflavour);
 	if (error < 0)
 		goto error;
 
@@ -1431,9 +1429,11 @@ static int nfs4_set_client(struct nfs_server *server,
 
 	dprintk("--> nfs4_set_client()\n");
 
+	if (server->flags & NFS_MOUNT_NORESVPORT)
+		set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+
 	/* Allocate or find a client reference we can use */
-	clp = nfs_get_client(&cl_init, timeparms, ip_addr, authflavour,
-			     server->flags & NFS_MOUNT_NORESVPORT);
+	clp = nfs_get_client(&cl_init, timeparms, ip_addr, authflavour);
 	if (IS_ERR(clp)) {
 		error = PTR_ERR(clp);
 		goto error;
@@ -1492,7 +1492,7 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
 	 * (section 13.1 RFC 5661).
 	 */
 	clp = nfs_get_client(&cl_init, &ds_timeout, mds_clp->cl_ipaddr,
-			     mds_clp->cl_rpcclient->cl_auth->au_flavor, 0);
+			     mds_clp->cl_rpcclient->cl_auth->au_flavor);
 
 	dprintk("<-- %s %p\n", __func__, clp);
 	return clp;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index f1021c8..315dc86 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -157,13 +157,11 @@ nfs4_find_client_sessionid(struct net *, const struct sockaddr *,
 extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
 				const struct rpc_timeout *timeparms,
 				const char *ip_addr,
-				rpc_authflavor_t authflavour,
-				int noresvport);
+				rpc_authflavor_t authflavour);
 extern struct nfs_client *nfs4_init_client(struct nfs_client *clp,
 				const struct rpc_timeout *timeparms,
 				const char *ip_addr,
-				rpc_authflavor_t authflavour,
-				int noresvport);
+				rpc_authflavor_t authflavour);
 extern struct nfs_server *nfs_create_server(
 					const struct nfs_parsed_mount_data *,
 					struct nfs_fh *);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 1c4c174..6f7a93c 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -35,6 +35,9 @@ struct nfs_client {
 #define NFS_CS_RENEWD		3		/* - renewd started */
 #define NFS_CS_STOP_RENEW	4		/* no more state to renew */
 #define NFS_CS_CHECK_LEASE_TIME	5		/* need to check lease time */
+	unsigned long		cl_flags;	/* behavior switches */
+#define NFS_CS_NORESVPORT	0		/* - use ephemeral src port */
+#define NFS_CS_DISCRTRY		1		/* - disconnect on RPC retry */
 	struct sockaddr_storage	cl_addr;	/* server identifier */
 	size_t			cl_addrlen;
 	char *			cl_hostname;	/* hostname of server */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 7437931..1fbca8b 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1286,7 +1286,7 @@ struct nfs_rpc_ops {
 				struct iattr *iattr);
 	struct nfs_client *
 		(*init_client) (struct nfs_client *, const struct rpc_timeout *,
-				const char *, rpc_authflavor_t, int);
+				const char *, rpc_authflavor_t);
 	int	(*secinfo)(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
 };
 


^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 15/20] NFS: Introduce "migration" mount option
  2012-04-23 20:53 [PATCH 01/20] NFS: Fix comment misspelling in struct nfs_client definition Chuck Lever
                   ` (12 preceding siblings ...)
  2012-04-23 20:55 ` [PATCH 14/20] NFS: Add nfs_client behavior flags Chuck Lever
@ 2012-04-23 20:55 ` Chuck Lever
  2012-04-23 20:55 ` [PATCH 16/20] NFS: Use the same nfs_client_id4 for every server Chuck Lever
                   ` (4 subsequent siblings)
  18 siblings, 0 replies; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 20:55 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: linux-nfs

Currently, the Linux client uses a unique nfs_client_id4 string
when identifying itself to unique NFS servers.

To support Transparent State Migration, the Linux client will have
to use the same nfs_client_id4 string for all servers it communicates
with (also known as the "uniform client string" model).  Otherwise
NFS servers will not be able to merge NFSv4 state appropriately after
a migration event.

Unfortunately, there are some server features now in the field that
are not compatible with the uniform client string model.

So, by default, our NFSv4.0 mounts will continue to use the current
model, and we will add a mount option that switches them to use
the uniform model.  Client administrators will have to identify
which servers can be mounted with this option in effect.

The first mount of a server controls the behavior for all subsequent
mounts until the client reboots.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/client.c           |    2 ++
 fs/nfs/super.c            |   20 ++++++++++++++++++++
 include/linux/nfs_fs_sb.h |    2 ++
 3 files changed, 24 insertions(+), 0 deletions(-)

diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 76dec6a..07b106e 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -1431,6 +1431,8 @@ static int nfs4_set_client(struct nfs_server *server,
 
 	if (server->flags & NFS_MOUNT_NORESVPORT)
 		set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+	if (server->options & NFS_OPTION_MIGRATION)
+		set_bit(NFS_CS_MIGRATION, &cl_init.init_flags);
 
 	/* Allocate or find a client reference we can use */
 	clp = nfs_get_client(&cl_init, timeparms, ip_addr, authflavour);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 6decf3c..c169dc3 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -86,6 +86,7 @@ enum {
 	Opt_sharecache, Opt_nosharecache,
 	Opt_resvport, Opt_noresvport,
 	Opt_fscache, Opt_nofscache,
+	Opt_migration, Opt_nomigration,
 
 	/* Mount options that take integer arguments */
 	Opt_port,
@@ -145,6 +146,8 @@ static const match_table_t nfs_mount_option_tokens = {
 	{ Opt_noresvport, "noresvport" },
 	{ Opt_fscache, "fsc" },
 	{ Opt_nofscache, "nofsc" },
+	{ Opt_migration, "migration" },
+	{ Opt_nomigration, "nomigration" },
 
 	{ Opt_port, "port=%s" },
 	{ Opt_rsize, "rsize=%s" },
@@ -724,6 +727,9 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
 	if (nfss->options & NFS_OPTION_FSCACHE)
 		seq_printf(m, ",fsc");
 
+	if (nfss->options & NFS_OPTION_MIGRATION)
+		seq_printf(m, ",migration");
+
 	if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) {
 		if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
 			seq_printf(m, ",lookupcache=none");
@@ -1286,6 +1292,12 @@ static int nfs_parse_mount_options(char *raw,
 			kfree(mnt->fscache_uniq);
 			mnt->fscache_uniq = NULL;
 			break;
+		case Opt_migration:
+			mnt->options |= NFS_OPTION_MIGRATION;
+			break;
+		case Opt_nomigration:
+			mnt->options &= NFS_OPTION_MIGRATION;
+			break;
 
 		/*
 		 * options that take numeric values
@@ -1578,6 +1590,10 @@ static int nfs_parse_mount_options(char *raw,
 	if (mnt->minorversion && mnt->version != 4)
 		goto out_minorversion_mismatch;
 
+	if (mnt->options & NFS_OPTION_MIGRATION &&
+	    mnt->version != 4 && mnt->minorversion != 0)
+		goto out_migration_misuse;
+
 	/*
 	 * verify that any proto=/mountproto= options match the address
 	 * familiies in the addr=/mountaddr= options.
@@ -1615,6 +1631,10 @@ out_minorversion_mismatch:
 	printk(KERN_INFO "NFS: mount option vers=%u does not support "
 			 "minorversion=%u\n", mnt->version, mnt->minorversion);
 	return 0;
+out_migration_misuse:
+	printk(KERN_INFO
+		"NFS: 'migration' not supported for this NFS version\n");
+	return 0;
 out_nomem:
 	printk(KERN_INFO "NFS: not enough memory to parse option\n");
 	return 0;
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 6f7a93c..1a5a4cc 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -38,6 +38,7 @@ struct nfs_client {
 	unsigned long		cl_flags;	/* behavior switches */
 #define NFS_CS_NORESVPORT	0		/* - use ephemeral src port */
 #define NFS_CS_DISCRTRY		1		/* - disconnect on RPC retry */
+#define NFS_CS_MIGRATION	2		/* - transparent state migr */
 	struct sockaddr_storage	cl_addr;	/* server identifier */
 	size_t			cl_addrlen;
 	char *			cl_hostname;	/* hostname of server */
@@ -122,6 +123,7 @@ struct nfs_server {
 	unsigned int		namelen;
 	unsigned int		options;	/* extra options enabled by mount */
 #define NFS_OPTION_FSCACHE	0x00000001	/* - local caching enabled */
+#define NFS_OPTION_MIGRATION	0x00000002	/* - NFSv4 migration enabled */
 
 	struct nfs_fsid		fsid;
 	__u64			maxfilesize;	/* maximum file size */


^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 16/20] NFS: Use the same nfs_client_id4 for every server
  2012-04-23 20:53 [PATCH 01/20] NFS: Fix comment misspelling in struct nfs_client definition Chuck Lever
                   ` (13 preceding siblings ...)
  2012-04-23 20:55 ` [PATCH 15/20] NFS: Introduce "migration" mount option Chuck Lever
@ 2012-04-23 20:55 ` Chuck Lever
  2012-04-23 20:55 ` [PATCH 17/20] NFS: EXCHANGE_ID should save the server major and minor ID Chuck Lever
                   ` (3 subsequent siblings)
  18 siblings, 0 replies; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 20:55 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: linux-nfs

Currently, when identifying itself to NFS servers, the Linux NFS
client uses a unique nfs_client_id4 string for each server IP address
it talks with.  For example, when client A talks to server X, the
client identifies itself using a string like "AX".

These strings are opaque to servers.  Each client is free to choose
any content, as long as it is unique from other client instances.  A
server must not parse the contents of the string, it can only test
these strings for equality.  These requirements are specified in
detail by RFC 3530 (and bis).

This form of client identification presents a problem for Transparent
State Migration.  When client A's state on server X is migrated to
server Y, it continues to be associated with string "AX."  But client
A will present string "AY" when communicating with server Y.

Server Y thus has no way to know that client A should be associated
with the state migrated from server X.  "AX" is all but abandoned,
interferring with establishing fresh state for A on server Y.

To support Transparent State Migration, then, NFSv4.0 clients must
instead use the same nfs_client_id4 string to identify themselves to
every NFS server; something like "A".

As part of a migration event, when state associated with string "A"
shows up at server Y, client A identifies itself as "A" and server Y
will know immediately that the state associated with "A," whether it
is native or migrated, is owned by client A.

As a pre-requisite to adding support for NFSv4 migration to the Linux
NFS client, this patch changes the way Linux identifies itself to NFS
servers via the SETCLIENTID (NFSv4 minor version 0) and EXCHANGE_ID
(NFSv4 minor version 1) operations.

In addition to removing the server's IP address from nfs_client_id4,
the Linux NFS client will also no longer use its own source IP address
as part of the nfs_client_id4 string.  On multi-homed clients, the
value of this address depends on the address family and network
routing used to contact the server, thus it can be different for each
server.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/nfs4proc.c |   50 ++++++++++++++++++++++++++++++++++++++++----------
 1 files changed, 40 insertions(+), 10 deletions(-)

diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 7ec1b68..9fe19d4 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3908,6 +3908,34 @@ nfs4_init_nonuniform_client_string(const struct nfs_client *clp,
 	return result;
 }
 
+/*
+ * XXX:
+ *
+ * Our client ID string should not use init_utsname->nodename, but
+ * rather should contain the nodename for the appropriate net
+ * namespace for this nfs_client.
+ *
+ * There's a pointer to the correct net namespace in the rpc_clnt's
+ * xprt, but I have no idea how then to get the net namespace's nodename.
+ *
+ * However, rpc_create() should be using that namespace to construct
+ * ->cl_nodename, which is exactly the string we need...  but it is
+ * currently still using init_utsname().
+ *
+ * For now I'm going to stick with init_utsname.  We can easily work
+ * this out later when the client has container support.
+ */
+static unsigned int
+nfs4_init_uniform_client_string(const struct nfs_client *clp,
+				char *buf, size_t len)
+{
+	return scnprintf(buf, len, "Linux NFSv%u.%u %s %s %s uniform",
+				clp->rpc_ops->version, clp->cl_minorversion,
+				init_utsname()->nodename,
+				init_utsname()->domainname,
+				clp->cl_rpcclient->cl_auth->au_ops->au_name);
+}
+
 /**
  * nfs4_proc_setclientid - Negotiate client ID
  * @clp: state data structure
@@ -3941,7 +3969,14 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
 
 	/* Client ID */
 	nfs4_init_boot_verifier(clp, &sc_verifier);
-	setclientid.sc_name_len = nfs4_init_nonuniform_client_string(clp,
+	if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
+		setclientid.sc_name_len =
+				nfs4_init_uniform_client_string(clp,
+						setclientid.sc_name,
+						sizeof(setclientid.sc_name));
+	else
+		setclientid.sc_name_len =
+				nfs4_init_nonuniform_client_string(clp,
 						setclientid.sc_name,
 						sizeof(setclientid.sc_name));
 
@@ -5061,14 +5096,8 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
 	BUG_ON(clp == NULL);
 
 	nfs4_init_boot_verifier(clp, &verifier);
-
-	args.id_len = scnprintf(args.id, sizeof(args.id),
-				"%s/%s.%s/%u",
-				clp->cl_ipaddr,
-				init_utsname()->nodename,
-				init_utsname()->domainname,
-				clp->cl_rpcclient->cl_auth->au_flavor);
-
+	args.id_len = nfs4_init_uniform_client_string(clp, args.id,
+							sizeof(args.id));
 	res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
 					GFP_KERNEL);
 	if (unlikely(res.server_scope == NULL)) {
@@ -5119,7 +5148,8 @@ out:
 			__func__, clp->cl_implid->domain, clp->cl_implid->name,
 			clp->cl_implid->date.seconds,
 			clp->cl_implid->date.nseconds);
-	dprintk("<-- %s status= %d\n", __func__, status);
+	dprintk("<-- %s nfs_client_id4 '%.*s' (status %d)\n",
+		__func__, args.id_len, args.id, status);
 	return status;
 }
 


^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 17/20] NFS: EXCHANGE_ID should save the server major and minor ID
  2012-04-23 20:53 [PATCH 01/20] NFS: Fix comment misspelling in struct nfs_client definition Chuck Lever
                   ` (14 preceding siblings ...)
  2012-04-23 20:55 ` [PATCH 16/20] NFS: Use the same nfs_client_id4 for every server Chuck Lever
@ 2012-04-23 20:55 ` Chuck Lever
  2012-04-23 20:55 ` [PATCH 18/20] NFS: Detect NFSv4 server trunking when mounting Chuck Lever
                   ` (2 subsequent siblings)
  18 siblings, 0 replies; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 20:55 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: linux-nfs

To detect server trunking, we need to have the server major and minor
ID available in each server's struct nfs_client.  Currently these are
being discarded in the XDR decoder for EXCHANGE_ID.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/client.c           |    1 +
 fs/nfs/nfs4proc.c         |   18 +++++++++++++++++-
 fs/nfs/nfs4xdr.c          |   13 ++++++++-----
 include/linux/nfs_fs_sb.h |    1 +
 include/linux/nfs_xdr.h   |    3 ++-
 5 files changed, 29 insertions(+), 7 deletions(-)

diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 07b106e..920abbc 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -235,6 +235,7 @@ static void nfs4_shutdown_client(struct nfs_client *clp)
 		nfs_idmap_delete(clp);
 
 	rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
+	kfree(clp->cl_serverowner);
 	kfree(clp->cl_serverscope);
 	kfree(clp->cl_implid);
 }
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 9fe19d4..3fd9944 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -5098,11 +5098,19 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
 	nfs4_init_boot_verifier(clp, &verifier);
 	args.id_len = nfs4_init_uniform_client_string(clp, args.id,
 							sizeof(args.id));
+
+	res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
+					GFP_KERNEL);
+	if (unlikely(res.server_owner == NULL)) {
+		status = -ENOMEM;
+		goto out;
+	}
+
 	res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
 					GFP_KERNEL);
 	if (unlikely(res.server_scope == NULL)) {
 		status = -ENOMEM;
-		goto out;
+		goto out_server_owner;
 	}
 
 	res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_KERNEL);
@@ -5116,6 +5124,12 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
 		status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
 
 	if (status == NFS4_OK) {
+		kfree(clp->cl_serverowner);
+		clp->cl_serverowner = res.server_owner;
+		res.server_owner = NULL;
+	}
+
+	if (status == NFS4_OK) {
 		/* use the most recent implementation id */
 		kfree(clp->cl_implid);
 		clp->cl_implid = res.impl_id;
@@ -5139,6 +5153,8 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
 		}
 	}
 
+out_server_owner:
+	kfree(res.server_owner);
 out_server_scope:
 	kfree(res.server_scope);
 out:
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 312f619..3cf7519 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -5163,24 +5163,27 @@ static int decode_exchange_id(struct xdr_stream *xdr,
 	if (dummy != SP4_NONE)
 		return -EIO;
 
-	/* Throw away minor_id */
+	/* server_owner4.so_minor_id */
 	p = xdr_inline_decode(xdr, 8);
 	if (unlikely(!p))
 		goto out_overflow;
+	p = xdr_decode_hyper(p, &res->server_owner->minor_id);
 
-	/* Throw away Major id */
+	/* server_owner4.so_major_id */
 	status = decode_opaque_inline(xdr, &dummy, &dummy_str);
 	if (unlikely(status))
 		return status;
+	if (unlikely(dummy > NFS4_OPAQUE_LIMIT))
+		return -EIO;
+	memcpy(res->server_owner->major_id, dummy_str, dummy);
+	res->server_owner->major_id_sz = dummy;
 
-	/* Save server_scope */
+	/* server_scope4 */
 	status = decode_opaque_inline(xdr, &dummy, &dummy_str);
 	if (unlikely(status))
 		return status;
-
 	if (unlikely(dummy > NFS4_OPAQUE_LIMIT))
 		return -EIO;
-
 	memcpy(res->server_scope->server_scope, dummy_str, dummy);
 	res->server_scope->server_scope_sz = dummy;
 
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 1a5a4cc..597ada7 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -80,6 +80,7 @@ struct nfs_client {
 	/* The flags used for obtaining the clientid during EXCHANGE_ID */
 	u32			cl_exchange_flags;
 	struct nfs4_session	*cl_session;	/* shared session */
+	struct nfs41_server_owner *cl_serverowner;
 	struct nfs41_server_scope *cl_serverscope;
 	struct nfs41_impl_id	*cl_implid;
 #endif /* CONFIG_NFS_V4 */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 1fbca8b..6c4a856 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1067,7 +1067,7 @@ struct nfs41_exchange_id_args {
 	u32				flags;
 };
 
-struct server_owner {
+struct nfs41_server_owner {
 	uint64_t			minor_id;
 	uint32_t			major_id_sz;
 	char				major_id[NFS4_OPAQUE_LIMIT];
@@ -1087,6 +1087,7 @@ struct nfs41_impl_id {
 struct nfs41_exchange_id_res {
 	struct nfs_client		*client;
 	u32				flags;
+	struct nfs41_server_owner	*server_owner;
 	struct nfs41_server_scope	*server_scope;
 	struct nfs41_impl_id		*impl_id;
 };


^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 18/20] NFS: Detect NFSv4 server trunking when mounting
  2012-04-23 20:53 [PATCH 01/20] NFS: Fix comment misspelling in struct nfs_client definition Chuck Lever
                   ` (15 preceding siblings ...)
  2012-04-23 20:55 ` [PATCH 17/20] NFS: EXCHANGE_ID should save the server major and minor ID Chuck Lever
@ 2012-04-23 20:55 ` Chuck Lever
  2012-04-23 21:27   ` Myklebust, Trond
  2012-04-23 20:56 ` [PATCH 19/20] NFS: Add nfs4_unique_id boot parameter Chuck Lever
  2012-04-23 20:56 ` [PATCH 20/20] NFS: Clean up debugging messages in fs/nfs/client.c Chuck Lever
  18 siblings, 1 reply; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 20:55 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: linux-nfs

Currently the Linux NFS client waits to perform a SETCLIENTID until
just before an application wants to open a file.  Quite a bit of
activity can occur before any state is needed.

If the client cares about server trunking, however, no NFSv4
operations can proceed until the client determines who it is talking
to.  Thus server IP trunking detection must be done when the client
first encounters an unfamiliar server IP address.

The nfs_get_client() function walks the nfs_client_list and matches on
server IP address.  The outcome of that walk tells us immediately if
we have an unfamiliar server IP address.  It invokes an init_client()
method in this case.

Thus, nfs4_init_client() can establish a fresh client ID, and perform
trunking detection with it.  The exact process for detecting trunking
is different for NFSv4.0 and NFSv4.1, so a minorversion-specific
init_client callout is introduced.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/client.c    |  223 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 fs/nfs/internal.h  |    6 +
 fs/nfs/nfs4_fs.h   |    7 ++
 fs/nfs/nfs4proc.c  |    2 
 fs/nfs/nfs4state.c |  131 ++++++++++++++++++++++++++++++-
 5 files changed, 367 insertions(+), 2 deletions(-)

diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 920abbc..7330673 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -566,7 +566,8 @@ nfs_get_client(const struct nfs_client_initdata *cl_init,
 			return nfs_found_client(cl_init, clp);
 		}
 		if (new) {
-			list_add(&new->cl_share_link, &nn->nfs_client_list);
+			list_add_tail(&new->cl_share_link,
+					&nn->nfs_client_list);
 			spin_unlock(&nn->nfs_client_lock);
 			new->cl_flags = cl_init->init_flags;
 			return cl_init->rpc_ops->init_client(new,
@@ -584,6 +585,210 @@ nfs_get_client(const struct nfs_client_initdata *cl_init,
 	return new;
 }
 
+#ifdef CONFIG_NFS_V4
+/*
+ * Returns true if the client IDs match
+ */
+static bool
+nfs4_match_clientids(struct nfs_client *a, struct nfs_client *b)
+{
+	if (a->cl_clientid != b->cl_clientid) {
+		dprintk("NFS: --> %s client ID %llx does not match %llx\n",
+			__func__, a->cl_clientid, b->cl_clientid);
+		return false;
+	}
+	dprintk("NFS: --> %s client ID %llx matches %llx\n",
+		__func__, a->cl_clientid, b->cl_clientid);
+	return true;
+}
+
+/**
+ * nfs40_walk_client_list - Find server that recognizes a client ID
+ *
+ * @new: nfs_client with client ID to test
+ * @result: OUT: found nfs_client, or new
+ * @cred: credential to use for trunking test
+ *
+ * Returns NFS4_OK, a negative errno, or a negative NFS4ERR status.
+ * If NFS4_OK is returned, an nfs_client pointer is planted in "result."
+ *
+ * NB: nfs40_walk_client_list() relies on the new nfs_client being
+ *     the last nfs_client on the list.
+ */
+int nfs40_walk_client_list(struct nfs_client *new,
+			   struct nfs_client **result,
+			   struct rpc_cred *cred)
+{
+	struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
+	struct nfs_client *pos, *prev = NULL;
+	struct nfs4_setclientid_res clid = {
+		.clientid	= new->cl_clientid,
+		.confirm	= new->cl_confirm,
+	};
+	int status;
+
+	dprintk("NFS: --> %s nfs_client = %p\n", __func__, new);
+
+	spin_lock(&nn->nfs_client_lock);
+
+	list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
+		if (pos->cl_cons_state < 0)
+			continue;
+
+		if (pos->rpc_ops != new->rpc_ops)
+			continue;
+
+		if (pos->cl_proto != new->cl_proto)
+			continue;
+
+		if (pos->cl_minorversion != new->cl_minorversion)
+			continue;
+
+		dprintk("NFS: --> %s comparing %llx and %llx\n", __func__,
+			new->cl_clientid, pos->cl_clientid);
+		if (pos->cl_clientid != new->cl_clientid)
+			continue;
+
+		atomic_inc(&pos->cl_count);
+		dprintk("%s nfs_client = %p ({%d})\n",
+			__func__, pos, atomic_read(&pos->cl_count));
+		spin_unlock(&nn->nfs_client_lock);
+
+		dprintk("NFS: --> %s confirming %llx\n",
+			__func__, new->cl_clientid);
+
+		if (prev)
+			nfs_put_client(prev);
+
+		status = nfs4_proc_setclientid_confirm(pos, &clid, cred);
+		if (status == NFS4_OK) {
+			/* The new nfs_client doesn't need the extra
+			 * cl_count bump. */
+			nfs_put_client(pos);
+			*result = pos;
+			dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",
+				__func__, pos, atomic_read(&pos->cl_count));
+			return NFS4_OK;
+		}
+		if (status != NFS4ERR_STALE_CLIENTID) {
+			nfs_put_client(pos);
+			dprintk("NFS: <-- %s status = %d, no result\n",
+				__func__, status);
+			return status;
+		}
+
+		spin_lock(&nn->nfs_client_lock);
+		prev = pos;
+	}
+
+	/*
+	 * No matching nfs_client found.  This should be impossible,
+	 * because the new nfs_client has already been added to
+	 * nfs_client_list by nfs_get_client().
+	 *
+	 * Don't BUG(), since the caller is holding a mutex.
+	 */
+	spin_unlock(&nn->nfs_client_lock);
+	printk(KERN_ERR "NFS: %s Error: no matching nfs_client found\n",
+		__func__);
+	return NFS4ERR_STALE_CLIENTID;
+}
+
+#ifdef CONFIG_NFS_V4_1
+/*
+ * Returns true if the server owners match
+ */
+static bool
+nfs4_match_serverowners(struct nfs_client *a, struct nfs_client *b)
+{
+	struct nfs41_server_owner *o1 = a->cl_serverowner;
+	struct nfs41_server_owner *o2 = b->cl_serverowner;
+
+	if (o1->minor_id != o2->minor_id) {
+		dprintk("NFS: --> %s server owner minor IDs do not match\n",
+			__func__);
+		return false;
+	}
+
+	if (o1->major_id_sz != o2->major_id_sz)
+		goto out_major_mismatch;
+	if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0)
+		goto out_major_mismatch;
+
+	dprintk("NFS: --> %s server owners match\n", __func__);
+	return true;
+
+out_major_mismatch:
+	dprintk("NFS: --> %s server owner major IDs do not match\n",
+		__func__);
+	return false;
+}
+
+/**
+ * nfs41_walk_client_list - Find nfs_client that matches a client/server owner
+ *
+ * @new: nfs_client with client ID to test
+ * @result: OUT: found nfs_client, or new
+ * @cred: credential to use for trunking test
+ *
+ * Returns NFS4_OK, a negative errno, or a negative NFS4ERR status.
+ * If NFS4_OK is returned, an nfs_client pointer is planted in "result."
+ *
+ * NB: nfs41_walk_client_list() relies on the new nfs_client being
+ *     the last nfs_client on the list.
+ */
+int nfs41_walk_client_list(struct nfs_client *new,
+			   struct nfs_client **result,
+			   struct rpc_cred *cred)
+{
+	struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
+	struct nfs_client *pos;
+
+	dprintk("NFS: --> %s nfs_client = %p\n", __func__, new);
+
+	spin_lock(&nn->nfs_client_lock);
+
+	list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
+		if (pos->cl_cons_state < 0)
+			continue;
+
+		if (pos->rpc_ops != new->rpc_ops)
+			continue;
+
+		if (pos->cl_proto != new->cl_proto)
+			continue;
+
+		if (pos->cl_minorversion != new->cl_minorversion)
+			continue;
+
+		if (!nfs4_match_clientids(pos, new))
+			continue;
+
+		if (!nfs4_match_serverowners(pos, new))
+			continue;
+
+		atomic_inc(&pos->cl_count);
+		*result = pos;
+		dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",
+			__func__, pos, atomic_read(&pos->cl_count));
+		return NFS4_OK;
+	}
+
+	/*
+	 * No matching nfs_client found.  This should be impossible,
+	 * because the new nfs_client has already been added to
+	 * nfs_client_list by nfs_get_client().
+	 *
+	 * Don't BUG(), since the caller is holding a mutex.
+	 */
+	spin_unlock(&nn->nfs_client_lock);
+	printk(KERN_ERR "NFS: %s Error: no matching nfs_client found\n",
+		__func__);
+	return NFS4ERR_STALE_CLIENTID;
+}
+#endif	/* CONFIG_NFS_V4_1 */
+#endif	/* CONFIG_NFS_V4 */
+
 /*
  * Mark a server as ready or failed
  */
@@ -1350,6 +1555,7 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
 				    rpc_authflavor_t authflavour)
 {
 	char buf[INET6_ADDRSTRLEN + 1];
+	struct nfs_client *old;
 	int error;
 
 	if (clp->cl_cons_state == NFS_CS_READY) {
@@ -1395,6 +1601,21 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
 
 	if (!nfs4_has_session(clp))
 		nfs_mark_client_ready(clp, NFS_CS_READY);
+
+	error = nfs4_detect_trunking(clp, &old);
+	if (error < 0)
+		goto error;
+	if (clp != old) {
+		nfs_mark_client_ready(clp, NFS_CS_READY);
+		nfs_put_client(clp);
+		dprintk("<-- %s() returning %p instead of %p\n",
+			__func__, old, clp);
+		clp = old;
+		atomic_inc(&clp->cl_count);
+		dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",
+			__func__, clp, atomic_read(&clp->cl_count));
+	}
+
 	return clp;
 
 error:
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 315dc86..85888f6 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -162,6 +162,12 @@ extern struct nfs_client *nfs4_init_client(struct nfs_client *clp,
 				const struct rpc_timeout *timeparms,
 				const char *ip_addr,
 				rpc_authflavor_t authflavour);
+extern int nfs40_walk_client_list(struct nfs_client *clp,
+				struct nfs_client **result,
+				struct rpc_cred *cred);
+extern int nfs41_walk_client_list(struct nfs_client *clp,
+				struct nfs_client **result,
+				struct rpc_cred *cred);
 extern struct nfs_server *nfs_create_server(
 					const struct nfs_parsed_mount_data *,
 					struct nfs_fh *);
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 2953f2c..ba13986 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -190,6 +190,8 @@ struct nfs4_state_recovery_ops {
 	int (*establish_clid)(struct nfs_client *, struct rpc_cred *);
 	struct rpc_cred * (*get_clid_cred)(struct nfs_client *);
 	int (*reclaim_complete)(struct nfs_client *);
+	int (*detect_trunking)(struct nfs_client *, struct nfs_client **,
+		struct rpc_cred *);
 };
 
 struct nfs4_state_maintenance_ops {
@@ -297,9 +299,14 @@ extern void nfs4_renew_state(struct work_struct *);
 /* nfs4state.c */
 struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp);
 struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp);
+int nfs4_detect_trunking(struct nfs_client *clp, struct nfs_client **);
+int nfs40_detect_trunking(struct nfs_client *clp, struct nfs_client **,
+			struct rpc_cred *);
 #if defined(CONFIG_NFS_V4_1)
 struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
 struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp);
+int nfs41_detect_trunking(struct nfs_client *clp, struct nfs_client **,
+			struct rpc_cred *);
 extern void nfs4_schedule_session_recovery(struct nfs4_session *);
 #else
 static inline void nfs4_schedule_session_recovery(struct nfs4_session *session)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 3fd9944..00b5d02 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -6453,6 +6453,7 @@ static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
 	.recover_lock	= nfs4_lock_reclaim,
 	.establish_clid = nfs4_init_clientid,
 	.get_clid_cred	= nfs4_get_setclientid_cred,
+	.detect_trunking = nfs40_detect_trunking,
 };
 
 #if defined(CONFIG_NFS_V4_1)
@@ -6464,6 +6465,7 @@ static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
 	.establish_clid = nfs41_init_clientid,
 	.get_clid_cred	= nfs4_get_exchange_id_cred,
 	.reclaim_complete = nfs41_proc_reclaim_complete,
+	.detect_trunking = nfs41_detect_trunking,
 };
 #endif /* CONFIG_NFS_V4_1 */
 
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 6a1a305..df59951 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -57,10 +57,12 @@
 #include "internal.h"
 #include "pnfs.h"
 
+#define NFSDBG_FACILITY		NFSDBG_CLIENT
+
 #define OPENOWNER_POOL_SIZE	8
 
 const nfs4_stateid zero_stateid;
-
+static DEFINE_MUTEX(nfs_clid_init_mutex);
 static LIST_HEAD(nfs4_clientid_list);
 
 int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
@@ -94,6 +96,47 @@ out:
 	return status;
 }
 
+/**
+ * nfs40_detect_trunking - Detect server IP address trunking (mv0)
+ *
+ * @clp: nfs_client under test
+ * @result: OUT: found nfs_client, or clp
+ * @cred: credential to use for trunking test
+ *
+ * Returns NFS4_OK, a negative errno, or a negative NFS4ERR status.
+ * If NFS4_OK is returned, an nfs_client pointer is planted in
+ * "result".
+ */
+int nfs40_detect_trunking(struct nfs_client *clp, struct nfs_client **result,
+			  struct rpc_cred *cred)
+{
+	struct nfs4_setclientid_res clid = {
+		.clientid = clp->cl_clientid,
+		.confirm = clp->cl_confirm,
+	};
+	unsigned short port;
+	int status;
+
+	port = nfs_callback_tcpport;
+	if (clp->cl_addr.ss_family == AF_INET6)
+		port = nfs_callback_tcpport6;
+
+	status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
+	if (status != NFS4_OK)
+		goto out;
+	clp->cl_clientid = clid.clientid;
+	clp->cl_confirm = clid.confirm;
+
+	status = nfs40_walk_client_list(clp, result, cred);
+	if (status != NFS4_OK) {
+		set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+		nfs4_schedule_state_renewal(*result);
+	}
+
+out:
+	return status;
+}
+
 struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
 {
 	struct rpc_cred *cred = NULL;
@@ -264,6 +307,44 @@ out:
 	return status;
 }
 
+/**
+ * nfs41_detect_trunking - Detect server IP address trunking (mv1)
+ *
+ * @clp: nfs_client under test
+ * @result: OUT: found nfs_client, or clp
+ * @cred: credential to use for trunking test
+ *
+ * Returns NFS4_OK, a negative errno, or a negative NFS4ERR status.
+ * If NFS4_OK is returned, an nfs_client pointer is planted in
+ * "result".
+ */
+int nfs41_detect_trunking(struct nfs_client *clp, struct nfs_client **result,
+			  struct rpc_cred *cred)
+{
+	struct nfs_client *trunked;
+	int status;
+
+	nfs4_begin_drain_session(clp);
+	status = nfs4_proc_exchange_id(clp, cred);
+	if (status != NFS4_OK)
+		goto out;
+
+	status = nfs41_walk_client_list(clp, &trunked, cred);
+	if (status != NFS4_OK)
+		goto out;
+
+	set_bit(NFS4CLNT_LEASE_CONFIRM, &trunked->cl_state);
+	status = nfs4_proc_create_session(trunked);
+	if (status != NFS4_OK)
+		goto out;
+	clear_bit(NFS4CLNT_LEASE_CONFIRM, &trunked->cl_state);
+	nfs41_setup_state_renewal(trunked);
+	nfs_mark_client_ready(trunked, NFS_CS_READY);
+	*result = trunked;
+out:
+	return status;
+}
+
 struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
 {
 	struct rpc_cred *cred;
@@ -1579,6 +1660,8 @@ static int nfs4_reclaim_lease(struct nfs_client *clp)
 	rpc_authflavor_t flavors[NFS_MAX_SECFLAVORS];
 	int i, len, status;
 
+	mutex_lock(&nfs_clid_init_mutex);
+
 	i = 0;
 	len = gss_mech_list_pseudoflavors(flavors);
 
@@ -1613,6 +1696,52 @@ again:
 			break;
 		}
 	}
+
+	mutex_unlock(&nfs_clid_init_mutex);
+	return status;
+}
+
+/**
+ * nfs4_detect_trunking - Detect server IP address trunking
+ *
+ * @clp: nfs_client under test
+ * @result: OUT: found nfs_client, or clp
+ *
+ * Returns NFS4_OK, a negative errno, or a negative NFS4ERR status.
+ * If NFS4_OK is returned, an nfs_client pointer is planted in
+ * "result".
+ */
+int nfs4_detect_trunking(struct nfs_client *clp,
+			 struct nfs_client **result)
+{
+	const struct nfs4_state_recovery_ops *ops =
+				clp->cl_mvops->reboot_recovery_ops;
+	struct rpc_cred *cred;
+	int status;
+
+	dprintk("NFS: <-- %s nfs_client = %p\n", __func__, clp);
+	mutex_lock(&nfs_clid_init_mutex);
+
+	status = -ENOENT;
+	cred = ops->get_clid_cred(clp);
+	if (cred != NULL) {
+		status = ops->detect_trunking(clp, result, cred);
+		put_rpccred(cred);
+		/* Handle case where the user hasn't set up machine creds */
+		if (status == -EACCES && cred == clp->cl_machine_cred) {
+			nfs4_clear_machine_cred(clp);
+			status = -EAGAIN;
+		}
+		if (status == -NFS4ERR_MINOR_VERS_MISMATCH)
+			status = -EPROTONOSUPPORT;
+	}
+
+	mutex_unlock(&nfs_clid_init_mutex);
+	if (status == NFS4_OK) {
+		clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+		dprintk("NFS: <-- %s result = %p\n", __func__, *result);
+	} else
+		dprintk("NFS: <-- %s status = %d\n", __func__, status);
 	return status;
 }
 


^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 19/20] NFS: Add nfs4_unique_id boot parameter
  2012-04-23 20:53 [PATCH 01/20] NFS: Fix comment misspelling in struct nfs_client definition Chuck Lever
                   ` (16 preceding siblings ...)
  2012-04-23 20:55 ` [PATCH 18/20] NFS: Detect NFSv4 server trunking when mounting Chuck Lever
@ 2012-04-23 20:56 ` Chuck Lever
  2012-04-23 20:56 ` [PATCH 20/20] NFS: Clean up debugging messages in fs/nfs/client.c Chuck Lever
  18 siblings, 0 replies; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 20:56 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: linux-nfs

An optional boot parameter is introduced to allow client
administrators to specify a string that the Linux NFS client can
insert into its nfs_client_id4 string, to make it both more globally
unique and more fixed.

If this boot parameter is not specified, the client's nodename is
used, as before.

Client installation procedures can create a unique string (typically,
a UUID) which remains unchanged during the lifetime of that client
instance.  This works just like creating a UUID for the label of the
system's root and boot volumes.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 Documentation/filesystems/nfs/nfs.txt |   44 +++++++++++++++++++++++++++++++--
 Documentation/kernel-parameters.txt   |    5 ++++
 fs/nfs/nfs4proc.c                     |   17 +++++++++++++
 3 files changed, 63 insertions(+), 3 deletions(-)

diff --git a/Documentation/filesystems/nfs/nfs.txt b/Documentation/filesystems/nfs/nfs.txt
index f50f26c..f2571c8 100644
--- a/Documentation/filesystems/nfs/nfs.txt
+++ b/Documentation/filesystems/nfs/nfs.txt
@@ -12,9 +12,47 @@ and work is in progress on adding support for minor version 1 of the NFSv4
 protocol.
 
 The purpose of this document is to provide information on some of the
-upcall interfaces that are used in order to provide the NFS client with
-some of the information that it requires in order to fully comply with
-the NFS spec.
+special features of the NFS client that can be configured by system
+administrators.
+
+
+The nfs4_unique_id parameter
+============================
+
+NFSv4 requires clients to identify themselves to servers with a unique
+string.  File open and lock state shared between one client and one server
+is associated with this identity.  To support robust NFSv4 state recovery
+and transparent state migration, this identity string must not change
+across client reboots.
+
+Without any other intervention, the Linux client uses a string that contains
+the local system's node name.  System administrators, however, often do not
+take care to ensure that node names are fully qualified and do not change
+over the lifetime of a client system.  Node names can have other
+administrative requirements that require particular behavior that does not
+work well as part of an nfs_client_id4 string.
+
+The nfs.nfs4_unique_id boot parameter specifies a unique string that can be
+used instead of a system's node name when an NFS client identifies itself to
+a server.  Thus, if the system's node name is not unique, or it changes, its
+nfs.nfs4_unique_id stays the same, preventing collision with other clients
+or loss of state during NFS reboot recovery or transparent state migration.
+
+The nfs.nfs4_unique_id string is typically a UUID, though it can contain
+anything that is believed to be unique across all NFS clients.  An
+nfs4_unique_id string should be chosen when a client system is installed,
+just as a system's root file system gets a fresh UUID in its label at
+install time.
+
+The string should remain fixed for the lifetime of the client.  It can be
+changed safely if care is taken that the client shuts down cleanly and all
+outstanding NFSv4 state has expired, to prevent loss of NFSv4 state.
+
+This string can be stored in an NFS client's grub.conf, or it can be provided
+via a net boot facility such as PXE.  It may also be specified as an nfs.ko
+module parameter.  Specifying a uniquifier string is not support for NFS
+clients running in containers.
+
 
 The DNS resolver
 ================
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index c1601e5..096356b 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1690,6 +1690,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 			will be autodetected by the client, and it will fall
 			back to using the idmapper.
 			To turn off this behaviour, set the value to '0'.
+	nfs.nfs4_unique_id=
+			[NFS4] Specify an additional fixed unique ident-
+			ification string that NFSv4 clients can insert into
+			their nfs_client_id4 string.  This is typically a
+			UUID that is generated at system install time.
 
 	nfs.send_implementation_id =
 			[NFSv4.1] Send client implementation identification
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 00b5d02..d112931 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -75,6 +75,12 @@
 
 static unsigned short max_session_slots = NFS4_DEF_SLOT_TABLE_SIZE;
 
+#define NFS4_CLIENT_ID_UNIQ_LEN		(64)
+static char nfs4_client_id_uniquifier[NFS4_CLIENT_ID_UNIQ_LEN] = "";
+module_param_string(nfs4_unique_id, nfs4_client_id_uniquifier,
+			NFS4_CLIENT_ID_UNIQ_LEN, 0600);
+MODULE_PARM_DESC(nfs4_unique_id, "nfs_client_id4 uniquifier string");
+
 struct nfs4_opendata;
 static int _nfs4_proc_open(struct nfs4_opendata *data);
 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
@@ -3924,16 +3930,27 @@ nfs4_init_nonuniform_client_string(const struct nfs_client *clp,
  *
  * For now I'm going to stick with init_utsname.  We can easily work
  * this out later when the client has container support.
+ *
+ * Specifying a uniquifier string is not supported for NFS clients
+ * running in containers.
  */
 static unsigned int
 nfs4_init_uniform_client_string(const struct nfs_client *clp,
 				char *buf, size_t len)
 {
+	if (nfs4_client_id_uniquifier[0] != '\0')
+		goto have_uniquifier;
 	return scnprintf(buf, len, "Linux NFSv%u.%u %s %s %s uniform",
 				clp->rpc_ops->version, clp->cl_minorversion,
 				init_utsname()->nodename,
 				init_utsname()->domainname,
 				clp->cl_rpcclient->cl_auth->au_ops->au_name);
+
+have_uniquifier:
+	return scnprintf(buf, len, "Linux NFSv%u.%u %s %s uniform",
+				clp->rpc_ops->version, clp->cl_minorversion,
+				nfs4_client_id_uniquifier,
+				clp->cl_rpcclient->cl_auth->au_ops->au_name);
 }
 
 /**


^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 20/20] NFS: Clean up debugging messages in fs/nfs/client.c
  2012-04-23 20:53 [PATCH 01/20] NFS: Fix comment misspelling in struct nfs_client definition Chuck Lever
                   ` (17 preceding siblings ...)
  2012-04-23 20:56 ` [PATCH 19/20] NFS: Add nfs4_unique_id boot parameter Chuck Lever
@ 2012-04-23 20:56 ` Chuck Lever
  2012-04-23 21:23   ` Malahal Naineni
  18 siblings, 1 reply; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 20:56 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: linux-nfs

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---

 fs/nfs/client.c |   21 +++++++++++++++++----
 1 files changed, 17 insertions(+), 4 deletions(-)

diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 7330673..beb001b 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -192,6 +192,8 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
 		clp->cl_machine_cred = cred;
 	nfs_fscache_get_client_cookie(clp);
 
+	dprintk("NFS: %s returning new nfs_client = %p ({1})\n",
+		__func__, clp);
 	return clp;
 
 error_cleanup:
@@ -291,7 +293,7 @@ static void pnfs_init_server(struct nfs_server *server)
  */
 static void nfs_free_client(struct nfs_client *clp)
 {
-	dprintk("--> nfs_free_client(%u)\n", clp->rpc_ops->version);
+	dprintk("--> %s destroying nfs_client = %p\n", __func__, clp);
 
 	nfs4_shutdown_client(clp);
 
@@ -308,7 +310,7 @@ static void nfs_free_client(struct nfs_client *clp)
 	kfree(clp->cl_hostname);
 	kfree(clp);
 
-	dprintk("<-- nfs_free_client()\n");
+	dprintk("<-- %s done\n", __func__);
 }
 
 /*
@@ -321,7 +323,8 @@ void nfs_put_client(struct nfs_client *clp)
 	if (!clp)
 		return;
 
-	dprintk("--> nfs_put_client({%d})\n", atomic_read(&clp->cl_count));
+	dprintk("--> %s nfs_client = %p ({%d})\n",
+		__func__, clp, atomic_read(&clp->cl_count));
 	nn = net_generic(clp->cl_net, nfs_net_id);
 
 	if (atomic_dec_and_lock(&clp->cl_count, &nn->nfs_client_lock)) {
@@ -500,6 +503,8 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
 			continue;
 
 		atomic_inc(&clp->cl_count);
+		dprintk("%s nfs_client = %p ({%d})\n",
+			__func__, clp, atomic_read(&clp->cl_count));
 		return clp;
 	}
 	return NULL;
@@ -1433,8 +1438,11 @@ nfs4_find_client_ident(struct net *net, int cb_ident)
 
 	spin_lock(&nn->nfs_client_lock);
 	clp = idr_find(&nn->cb_ident_idr, cb_ident);
-	if (clp)
+	if (clp) {
+		dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",
+			__func__, clp, atomic_read(&clp->cl_count));
 		atomic_inc(&clp->cl_count);
+	}
 	spin_unlock(&nn->nfs_client_lock);
 	return clp;
 }
@@ -1468,6 +1476,8 @@ nfs4_find_client_sessionid(struct net *net, const struct sockaddr *addr,
 			continue;
 
 		atomic_inc(&clp->cl_count);
+		dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",
+			__func__, clp, atomic_read(&clp->cl_count));
 		spin_unlock(&nn->nfs_client_lock);
 		return clp;
 	}
@@ -1978,6 +1988,9 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
 	server->nfs_client = source->nfs_client;
 	server->destroy = source->destroy;
 	atomic_inc(&server->nfs_client->cl_count);
+	dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",
+		__func__, server->nfs_client,
+		atomic_read(&server->nfs_client->cl_count));
 	nfs_server_copy_userdata(server, source);
 
 	server->fsid = fattr->fsid;


^ permalink raw reply related	[flat|nested] 37+ messages in thread

* Re: [PATCH 05/20] NFS: Clean up return code checking in nfs4_proc_exchange_id()
  2012-04-23 20:53 ` [PATCH 05/20] NFS: Clean up return code checking in nfs4_proc_exchange_id() Chuck Lever
@ 2012-04-23 21:07   ` Myklebust, Trond
  0 siblings, 0 replies; 37+ messages in thread
From: Myklebust, Trond @ 2012-04-23 21:07 UTC (permalink / raw)
  To: Chuck Lever; +Cc: linux-nfs

T24gTW9uLCAyMDEyLTA0LTIzIGF0IDE2OjUzIC0wNDAwLCBDaHVjayBMZXZlciB3cm90ZToNCj4g
Q2xlYW4gdXA6IHByZWZlciB1c2luZyB0aGUgcHJvcGVyIHR5cGVzIGluICJpZiIgZXhwcmVzc2lv
bnMuDQo+IA0KPiBTaWduZWQtb2ZmLWJ5OiBDaHVjayBMZXZlciA8Y2h1Y2subGV2ZXJAb3JhY2xl
LmNvbT4NCj4gLS0tDQo+IA0KPiAgZnMvbmZzL25mczRwcm9jLmMgfCAgIDE2ICsrKysrKysrLS0t
LS0tLS0NCj4gIDEgZmlsZXMgY2hhbmdlZCwgOCBpbnNlcnRpb25zKCspLCA4IGRlbGV0aW9ucygt
KQ0KPiANCj4gZGlmZiAtLWdpdCBhL2ZzL25mcy9uZnM0cHJvYy5jIGIvZnMvbmZzL25mczRwcm9j
LmMNCj4gaW5kZXggMzc3OGU0Zi4uMGFmNjU3ZCAxMDA2NDQNCj4gLS0tIGEvZnMvbmZzL25mczRw
cm9jLmMNCj4gKysrIGIvZnMvbmZzL25mczRwcm9jLmMNCj4gQEAgLTUwMzgsMzAgKzUwMzgsMzAg
QEAgaW50IG5mczRfcHJvY19leGNoYW5nZV9pZChzdHJ1Y3QgbmZzX2NsaWVudCAqY2xwLCBzdHJ1
Y3QgcnBjX2NyZWQgKmNyZWQpDQo+ICANCj4gIAlyZXMuc2VydmVyX3Njb3BlID0ga3phbGxvYyhz
aXplb2Yoc3RydWN0IG5mczQxX3NlcnZlcl9zY29wZSksDQo+ICAJCQkJCUdGUF9LRVJORUwpOw0K
PiAtCWlmICh1bmxpa2VseSghcmVzLnNlcnZlcl9zY29wZSkpIHsNCj4gKwlpZiAodW5saWtlbHko
cmVzLnNlcnZlcl9zY29wZSA9PSBOVUxMKSkgew0KPiAgCQlzdGF0dXMgPSAtRU5PTUVNOw0KPiAg
CQlnb3RvIG91dDsNCj4gIAl9DQo+ICANCj4gIAlyZXMuaW1wbF9pZCA9IGt6YWxsb2Moc2l6ZW9m
KHN0cnVjdCBuZnM0MV9pbXBsX2lkKSwgR0ZQX0tFUk5FTCk7DQo+IC0JaWYgKHVubGlrZWx5KCFy
ZXMuaW1wbF9pZCkpIHsNCj4gKwlpZiAodW5saWtlbHkocmVzLmltcGxfaWQgPT0gTlVMTCkpIHsN
Cj4gIAkJc3RhdHVzID0gLUVOT01FTTsNCj4gIAkJZ290byBvdXRfc2VydmVyX3Njb3BlOw0KPiAg
CX0NCj4gIA0KPiAgCXN0YXR1cyA9IHJwY19jYWxsX3N5bmMoY2xwLT5jbF9ycGNjbGllbnQsICZt
c2csIFJQQ19UQVNLX1RJTUVPVVQpOw0KPiAtCWlmICghc3RhdHVzKQ0KPiArCWlmIChzdGF0dXMg
PT0gTkZTNF9PSykNCg0KTm8uLi4NCg0KJ3N0YXR1cycgaXMgYW4gb3JkaW5hcnkgaW50ZWdlciB2
YWx1ZS4gSXQgaXMgbm90IGEgc3BlY2lhbA0KTkZTNF90aGlzdGhhdG9ydGhlb3RoZXIuLi4NCg0K
DQoNCi0tIA0KVHJvbmQgTXlrbGVidXN0DQpMaW51eCBORlMgY2xpZW50IG1haW50YWluZXINCg0K
TmV0QXBwDQpUcm9uZC5NeWtsZWJ1c3RAbmV0YXBwLmNvbQ0Kd3d3Lm5ldGFwcC5jb20NCg0K

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 09/20] NFS: Force server to drop NFSv4 state
  2012-04-23 20:54 ` [PATCH 09/20] NFS: Force server to drop NFSv4 state Chuck Lever
@ 2012-04-23 21:13   ` Myklebust, Trond
  2012-04-23 21:18     ` Chuck Lever
  0 siblings, 1 reply; 37+ messages in thread
From: Myklebust, Trond @ 2012-04-23 21:13 UTC (permalink / raw)
  To: Chuck Lever; +Cc: linux-nfs

T24gTW9uLCAyMDEyLTA0LTIzIGF0IDE2OjU0IC0wNDAwLCBDaHVjayBMZXZlciB3cm90ZToNCj4g
QSBTRVRDTElFTlRJRCBib290IHZlcmlmaWVyIGlzIG5vdGhpbmcgbW9yZSB0aGFuIGEgYm9vdCB0
aW1lc3RhbXAuDQo+IEFuIE5GU3Y0IHNlcnZlciBpcyBvYmxpZ2F0ZWQgdG8gd2lwZSBhbGwgTkZT
djQgc3RhdGUgZm9yIGFuIE5GUyBjbGllbnQNCj4gd2hlbiB0aGUgY2xpZW50IHByZXNlbnRzIGFu
IHVwZGF0ZWQgU0VUQ0xJRU5USUQgYm9vdCB2ZXJpZmllci4gIFRoaXMNCj4gaXMgaG93IHNlcnZl
cnMgZGV0ZWN0IGNsaWVudCByZWJvb3RzLg0KPiANCj4gbmZzNF9yZXNldF9hbGxfc3RhdGUoKSBm
b3JjZXMgYSBib290IHZlcmlmaWVyIHJlZnJlc2ggdG8gY2F1c2UgYQ0KPiBzZXJ2ZXIgdG8gd2lw
ZSBzdGF0ZSBhcyBwYXJ0IG9mIHJlY292ZXJpbmcgZnJvbSBhIHNlcnZlciByZXBvcnRpbmcNCj4g
dGhhdCBpdCBoYXMgcmV2b2tlZCBzb21lIG9yIGFsbCBvZiBhIGNsaWVudCdzIE5GU3Y0IHN0YXRl
LiAgVGhpcyB3aXBlcw0KPiB0aGUgc2xhdGUgZm9yIGZ1bGwgc3RhdGUgcmVjb3ZlcnkuDQo+IA0K
PiBTb29uIHdlIHdhbnQgdG8gZ2V0IHJpZCBvZiB0aGUgcGVyLW5mc19jbGllbnQgY2xfYm9vdF90
aW1lIGZpZWxkLA0KPiBob3dldmVyLiAgV2l0aG91dCBjbF9ib290X3RpbWUsIHRoZSBORlMgY2xp
ZW50IHdpbGwgbmVlZCB0byBmaW5kIGENCj4gZGlmZmVyZW50IHdheSB0byBmb3JjZSB0aGUgc2Vy
dmVyIHRvIHB1cmdlIHRoZSBjbGllbnQncyBORlN2NCBzdGF0ZS4NCj4gDQo+IEJlY2F1c2UgdGhl
c2UgdmVyaWZpZXJzIGFyZSBvcGFxdWUgKGllLCB0aGUgc2VydmVyIGRvZXNuJ3Qga25vdyBvcg0K
PiBjYXJlIHRoYXQgdGhleSBhcmUgdGltZXN0YW1wcyksIHdlIGNhbiBkbyB0aGlzIGJ5IHVzaW5n
IHRoZSBzYW1lDQo+IHRyaWNrIHdlIHVzZSBub3csIGJ1dCB0aGVuIGFmdGVyd2FyZHMgZXN0YWJs
aXNoIGEgZnJlc2ggY2xpZW50IElEDQo+IHVzaW5nIHRoZSBvbGQgYm9vdCB2ZXJpZmllciBhZ2Fp
bi4NCj4gDQo+IEhvcGVmdWxseSB0aGVyZSBhcmUgbm8gZXh0cmEgcGFyYW5vaWQgc2VydmVyIGlt
cGxlbWVudGF0aW9ucyB0aGF0IGtlZXANCj4gdHJhY2sgb2YgdGhlIGNsaWVudCdzIGJvb3QgdmVy
aWZpZXJzIGFuZCBwcmV2ZW50IGNsaWVudHMgZnJvbSByZXVzaW5nDQo+IGEgcHJldmlvdXMgb25l
Lg0KPiANCj4gU2lnbmVkLW9mZi1ieTogQ2h1Y2sgTGV2ZXIgPGNodWNrLmxldmVyQG9yYWNsZS5j
b20+DQo+IC0tLQ0KPiANCj4gIGZzL25mcy9uZnM0X2ZzLmggICB8ICAgIDEgKw0KPiAgZnMvbmZz
L25mczRwcm9jLmMgIHwgICAgOSArKysrKysrLS0NCj4gIGZzL25mcy9uZnM0c3RhdGUuYyB8ICAg
IDcgKysrKysrLQ0KPiAgMyBmaWxlcyBjaGFuZ2VkLCAxNCBpbnNlcnRpb25zKCspLCAzIGRlbGV0
aW9ucygtKQ0KPiANCj4gZGlmZiAtLWdpdCBhL2ZzL25mcy9uZnM0X2ZzLmggYi9mcy9uZnMvbmZz
NF9mcy5oDQo+IGluZGV4IGQ4YzJkMzkuLjI5NTNmMmMgMTAwNjQ0DQo+IC0tLSBhL2ZzL25mcy9u
ZnM0X2ZzLmgNCj4gKysrIGIvZnMvbmZzL25mczRfZnMuaA0KPiBAQCAtMjQsNiArMjQsNyBAQCBl
bnVtIG5mczRfY2xpZW50X3N0YXRlIHsNCj4gIAlORlM0Q0xOVF9SRUNBTExfU0xPVCwNCj4gIAlO
RlM0Q0xOVF9MRUFTRV9DT05GSVJNLA0KPiAgCU5GUzRDTE5UX1NFUlZFUl9TQ09QRV9NSVNNQVRD
SCwNCj4gKwlORlM0Q0xOVF9QVVJHRV9TVEFURSwNCj4gIH07DQo+ICANCj4gIGVudW0gbmZzNF9z
ZXNzaW9uX3N0YXRlIHsNCj4gZGlmZiAtLWdpdCBhL2ZzL25mcy9uZnM0cHJvYy5jIGIvZnMvbmZz
L25mczRwcm9jLmMNCj4gaW5kZXggODRhMjZkOS4uYjE5Y2Y4MSAxMDA2NDQNCj4gLS0tIGEvZnMv
bmZzL25mczRwcm9jLmMNCj4gKysrIGIvZnMvbmZzL25mczRwcm9jLmMNCj4gQEAgLTM4NzgsOCAr
Mzg3OCwxMyBAQCBzdGF0aWMgdm9pZCBuZnM0X2NvbnN0cnVjdF9ib290X3ZlcmlmaWVyKHN0cnVj
dCBuZnNfY2xpZW50ICpjbHAsDQo+ICB7DQo+ICAJX19iZTMyIHZlcmZbMl07DQo+ICANCj4gLQl2
ZXJmWzBdID0gKF9fYmUzMiljbHAtPmNsX2Jvb3RfdGltZS50dl9zZWM7DQo+IC0JdmVyZlsxXSA9
IChfX2JlMzIpY2xwLT5jbF9ib290X3RpbWUudHZfbnNlYzsNCj4gKwlpZiAodGVzdF9iaXQoTkZT
NENMTlRfUFVSR0VfU1RBVEUsICZjbHAtPmNsX3N0YXRlKSkgew0KPiArCQl2ZXJmWzBdID0gKF9f
YmUzMilDVVJSRU5UX1RJTUUudHZfc2VjOw0KPiArCQl2ZXJmWzFdID0gKF9fYmUzMilDVVJSRU5U
X1RJTUUudHZfbnNlYzsNCj4gKwl9IGVsc2Ugew0KPiArCQl2ZXJmWzBdID0gKF9fYmUzMiljbHAt
PmNsX2Jvb3RfdGltZS50dl9zZWM7DQo+ICsJCXZlcmZbMV0gPSAoX19iZTMyKWNscC0+Y2xfYm9v
dF90aW1lLnR2X25zZWM7DQo+ICsJfQ0KPiAgCW1lbWNweShib290dmVyZi0+ZGF0YSwgdmVyZiwg
c2l6ZW9mKGJvb3R2ZXJmLT5kYXRhKSk7DQo+ICB9DQo+ICANCj4gZGlmZiAtLWdpdCBhL2ZzL25m
cy9uZnM0c3RhdGUuYyBiL2ZzL25mcy9uZnM0c3RhdGUuYw0KPiBpbmRleCBjYmVmMzY2Li43ZjU2
NTAyIDEwMDY0NA0KPiAtLS0gYS9mcy9uZnMvbmZzNHN0YXRlLmMNCj4gKysrIGIvZnMvbmZzL25m
czRzdGF0ZS5jDQo+IEBAIC0xNjEyLDcgKzE2MTIsNyBAQCB2b2lkIG5mczQxX2hhbmRsZV9yZWNh
bGxfc2xvdChzdHJ1Y3QgbmZzX2NsaWVudCAqY2xwKQ0KPiAgc3RhdGljIHZvaWQgbmZzNF9yZXNl
dF9hbGxfc3RhdGUoc3RydWN0IG5mc19jbGllbnQgKmNscCkNCj4gIHsNCj4gIAlpZiAodGVzdF9h
bmRfc2V0X2JpdChORlM0Q0xOVF9MRUFTRV9FWFBJUkVELCAmY2xwLT5jbF9zdGF0ZSkgPT0gMCkg
ew0KPiAtCQljbHAtPmNsX2Jvb3RfdGltZSA9IENVUlJFTlRfVElNRTsNCj4gKwkJc2V0X2JpdChO
RlM0Q0xOVF9QVVJHRV9TVEFURSwgJmNscC0+Y2xfc3RhdGUpOw0KPiAgCQluZnM0X3N0YXRlX3N0
YXJ0X3JlY2xhaW1fbm9ncmFjZShjbHApOw0KPiAgCQluZnM0X3NjaGVkdWxlX3N0YXRlX21hbmFn
ZXIoY2xwKTsNCj4gIAl9DQo+IEBAIC0xNzU5LDYgKzE3NTksMTEgQEAgc3RhdGljIHZvaWQgbmZz
NF9zdGF0ZV9tYW5hZ2VyKHN0cnVjdCBuZnNfY2xpZW50ICpjbHApDQo+ICANCj4gIAkvKiBFbnN1
cmUgZXhjbHVzaXZlIGFjY2VzcyB0byBORlN2NCBzdGF0ZSAqLw0KPiAgCWRvIHsNCj4gKwkJaWYg
KHRlc3RfYml0KE5GUzRDTE5UX1BVUkdFX1NUQVRFLCAmY2xwLT5jbF9zdGF0ZSkpIHsNCj4gKwkJ
CW5mczRfcmVjbGFpbV9sZWFzZShjbHApOw0KPiArCQkJY2xlYXJfYml0KE5GUzRDTE5UX1BVUkdF
X1NUQVRFLCAmY2xwLT5jbF9zdGF0ZSk7DQoNClRoaXMgbmVlZHMgdG8gc2V0IE5GUzRDTE5UX0xF
QVNFX0VYUElSRUQsIHNvIHRoYXQgd2UgZG8gYSBzZWNvbmQNClNFVENMSUVOVElEIGFmdGVyIHRo
ZSBzdGF0ZSBoYXMgYmVlbiBjbGVhcmVkLiBPdGhlcndpc2Ugd2UgZW5kIHVwIHdpdGggYQ0KbGVh
c2Ugd2l0aCB0aGUgd3JvbmcgdmVyaWZpZXIuDQoNCj4gKwkJfQ0KPiArDQo+ICAJCWlmICh0ZXN0
X2FuZF9jbGVhcl9iaXQoTkZTNENMTlRfTEVBU0VfRVhQSVJFRCwgJmNscC0+Y2xfc3RhdGUpKSB7
DQo+ICAJCQkvKiBXZSdyZSBnb2luZyB0byBoYXZlIHRvIHJlLWVzdGFibGlzaCBhIGNsaWVudGlk
ICovDQo+ICAJCQlzdGF0dXMgPSBuZnM0X3JlY2xhaW1fbGVhc2UoY2xwKTsNCj4gDQoNCi0tIA0K
VHJvbmQgTXlrbGVidXN0DQpMaW51eCBORlMgY2xpZW50IG1haW50YWluZXINCg0KTmV0QXBwDQpU
cm9uZC5NeWtsZWJ1c3RAbmV0YXBwLmNvbQ0Kd3d3Lm5ldGFwcC5jb20NCg0K

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 09/20] NFS: Force server to drop NFSv4 state
  2012-04-23 21:13   ` Myklebust, Trond
@ 2012-04-23 21:18     ` Chuck Lever
  0 siblings, 0 replies; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 21:18 UTC (permalink / raw)
  To: Myklebust, Trond; +Cc: linux-nfs


On Apr 23, 2012, at 5:13 PM, Myklebust, Trond wrote:

> On Mon, 2012-04-23 at 16:54 -0400, Chuck Lever wrote:
>> A SETCLIENTID boot verifier is nothing more than a boot timestamp.
>> An NFSv4 server is obligated to wipe all NFSv4 state for an NFS client
>> when the client presents an updated SETCLIENTID boot verifier.  This
>> is how servers detect client reboots.
>> 
>> nfs4_reset_all_state() forces a boot verifier refresh to cause a
>> server to wipe state as part of recovering from a server reporting
>> that it has revoked some or all of a client's NFSv4 state.  This wipes
>> the slate for full state recovery.
>> 
>> Soon we want to get rid of the per-nfs_client cl_boot_time field,
>> however.  Without cl_boot_time, the NFS client will need to find a
>> different way to force the server to purge the client's NFSv4 state.
>> 
>> Because these verifiers are opaque (ie, the server doesn't know or
>> care that they are timestamps), we can do this by using the same
>> trick we use now, but then afterwards establish a fresh client ID
>> using the old boot verifier again.
>> 
>> Hopefully there are no extra paranoid server implementations that keep
>> track of the client's boot verifiers and prevent clients from reusing
>> a previous one.
>> 
>> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
>> ---
>> 
>> fs/nfs/nfs4_fs.h   |    1 +
>> fs/nfs/nfs4proc.c  |    9 +++++++--
>> fs/nfs/nfs4state.c |    7 ++++++-
>> 3 files changed, 14 insertions(+), 3 deletions(-)
>> 
>> diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
>> index d8c2d39..2953f2c 100644
>> --- a/fs/nfs/nfs4_fs.h
>> +++ b/fs/nfs/nfs4_fs.h
>> @@ -24,6 +24,7 @@ enum nfs4_client_state {
>> 	NFS4CLNT_RECALL_SLOT,
>> 	NFS4CLNT_LEASE_CONFIRM,
>> 	NFS4CLNT_SERVER_SCOPE_MISMATCH,
>> +	NFS4CLNT_PURGE_STATE,
>> };
>> 
>> enum nfs4_session_state {
>> diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
>> index 84a26d9..b19cf81 100644
>> --- a/fs/nfs/nfs4proc.c
>> +++ b/fs/nfs/nfs4proc.c
>> @@ -3878,8 +3878,13 @@ static void nfs4_construct_boot_verifier(struct nfs_client *clp,
>> {
>> 	__be32 verf[2];
>> 
>> -	verf[0] = (__be32)clp->cl_boot_time.tv_sec;
>> -	verf[1] = (__be32)clp->cl_boot_time.tv_nsec;
>> +	if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
>> +		verf[0] = (__be32)CURRENT_TIME.tv_sec;
>> +		verf[1] = (__be32)CURRENT_TIME.tv_nsec;
>> +	} else {
>> +		verf[0] = (__be32)clp->cl_boot_time.tv_sec;
>> +		verf[1] = (__be32)clp->cl_boot_time.tv_nsec;
>> +	}
>> 	memcpy(bootverf->data, verf, sizeof(bootverf->data));
>> }
>> 
>> diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
>> index cbef366..7f56502 100644
>> --- a/fs/nfs/nfs4state.c
>> +++ b/fs/nfs/nfs4state.c
>> @@ -1612,7 +1612,7 @@ void nfs41_handle_recall_slot(struct nfs_client *clp)
>> static void nfs4_reset_all_state(struct nfs_client *clp)
>> {
>> 	if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
>> -		clp->cl_boot_time = CURRENT_TIME;
>> +		set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
>> 		nfs4_state_start_reclaim_nograce(clp);
>> 		nfs4_schedule_state_manager(clp);
>> 	}
>> @@ -1759,6 +1759,11 @@ static void nfs4_state_manager(struct nfs_client *clp)
>> 
>> 	/* Ensure exclusive access to NFSv4 state */
>> 	do {
>> +		if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
>> +			nfs4_reclaim_lease(clp);
>> +			clear_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
> 
> This needs to set NFS4CLNT_LEASE_EXPIRED, so that we do a second
> SETCLIENTID after the state has been cleared. Otherwise we end up with a
> lease with the wrong verifier.

Agreed, brain fart.  I had thought part of the reclaim lease path set it, but it doesn't.

Obviously these are not thoroughly tested yet.  I forgot to set the "-E" flag on "stg mail" when sending these so I didn't have a cover letter to mention that.

> 
>> +		}
>> +
>> 		if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
>> 			/* We're going to have to re-establish a clientid */
>> 			status = nfs4_reclaim_lease(clp);
>> 
> 
> -- 
> Trond Myklebust
> Linux NFS client maintainer
> 
> NetApp
> Trond.Myklebust@netapp.com
> www.netapp.com
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

-- 
Chuck Lever
chuck[dot]lever[at]oracle[dot]com





^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 20/20] NFS: Clean up debugging messages in fs/nfs/client.c
  2012-04-23 20:56 ` [PATCH 20/20] NFS: Clean up debugging messages in fs/nfs/client.c Chuck Lever
@ 2012-04-23 21:23   ` Malahal Naineni
  0 siblings, 0 replies; 37+ messages in thread
From: Malahal Naineni @ 2012-04-23 21:23 UTC (permalink / raw)
  To: Chuck Lever; +Cc: Trond.Myklebust, linux-nfs

Chuck Lever [chuck.lever@oracle.com] wrote:
> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
> ---
> 
>  fs/nfs/client.c |   21 +++++++++++++++++----
>  1 files changed, 17 insertions(+), 4 deletions(-)
> 
> diff --git a/fs/nfs/client.c b/fs/nfs/client.c
> index 7330673..beb001b 100644
> --- a/fs/nfs/client.c
> +++ b/fs/nfs/client.c
> @@ -192,6 +192,8 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
>  		clp->cl_machine_cred = cred;
>  	nfs_fscache_get_client_cookie(clp);
> 
> +	dprintk("NFS: %s returning new nfs_client = %p ({1})\n",
> +		__func__, clp);
>  	return clp;
> 
>  error_cleanup:
> @@ -291,7 +293,7 @@ static void pnfs_init_server(struct nfs_server *server)
>   */
>  static void nfs_free_client(struct nfs_client *clp)
>  {
> -	dprintk("--> nfs_free_client(%u)\n", clp->rpc_ops->version);
> +	dprintk("--> %s destroying nfs_client = %p\n", __func__, clp);
> 
>  	nfs4_shutdown_client(clp);
> 
> @@ -308,7 +310,7 @@ static void nfs_free_client(struct nfs_client *clp)
>  	kfree(clp->cl_hostname);
>  	kfree(clp);
> 
> -	dprintk("<-- nfs_free_client()\n");
> +	dprintk("<-- %s done\n", __func__);

Many dprintk's seem to take __func__. How about pushing that to dprink
macro itself. I have one and use it most of the time.

Regards, Malahal.


^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 18/20] NFS: Detect NFSv4 server trunking when mounting
  2012-04-23 20:55 ` [PATCH 18/20] NFS: Detect NFSv4 server trunking when mounting Chuck Lever
@ 2012-04-23 21:27   ` Myklebust, Trond
  2012-04-23 21:43     ` Chuck Lever
  2012-04-23 21:47     ` Chuck Lever
  0 siblings, 2 replies; 37+ messages in thread
From: Myklebust, Trond @ 2012-04-23 21:27 UTC (permalink / raw)
  To: Chuck Lever; +Cc: linux-nfs

T24gTW9uLCAyMDEyLTA0LTIzIGF0IDE2OjU1IC0wNDAwLCBDaHVjayBMZXZlciB3cm90ZToNCj4g
Q3VycmVudGx5IHRoZSBMaW51eCBORlMgY2xpZW50IHdhaXRzIHRvIHBlcmZvcm0gYSBTRVRDTElF
TlRJRCB1bnRpbA0KPiBqdXN0IGJlZm9yZSBhbiBhcHBsaWNhdGlvbiB3YW50cyB0byBvcGVuIGEg
ZmlsZS4gIFF1aXRlIGEgYml0IG9mDQo+IGFjdGl2aXR5IGNhbiBvY2N1ciBiZWZvcmUgYW55IHN0
YXRlIGlzIG5lZWRlZC4NCj4gDQo+IElmIHRoZSBjbGllbnQgY2FyZXMgYWJvdXQgc2VydmVyIHRy
dW5raW5nLCBob3dldmVyLCBubyBORlN2NA0KPiBvcGVyYXRpb25zIGNhbiBwcm9jZWVkIHVudGls
IHRoZSBjbGllbnQgZGV0ZXJtaW5lcyB3aG8gaXQgaXMgdGFsa2luZw0KPiB0by4gIFRodXMgc2Vy
dmVyIElQIHRydW5raW5nIGRldGVjdGlvbiBtdXN0IGJlIGRvbmUgd2hlbiB0aGUgY2xpZW50DQo+
IGZpcnN0IGVuY291bnRlcnMgYW4gdW5mYW1pbGlhciBzZXJ2ZXIgSVAgYWRkcmVzcy4NCj4gDQo+
IFRoZSBuZnNfZ2V0X2NsaWVudCgpIGZ1bmN0aW9uIHdhbGtzIHRoZSBuZnNfY2xpZW50X2xpc3Qg
YW5kIG1hdGNoZXMgb24NCj4gc2VydmVyIElQIGFkZHJlc3MuICBUaGUgb3V0Y29tZSBvZiB0aGF0
IHdhbGsgdGVsbHMgdXMgaW1tZWRpYXRlbHkgaWYNCj4gd2UgaGF2ZSBhbiB1bmZhbWlsaWFyIHNl
cnZlciBJUCBhZGRyZXNzLiAgSXQgaW52b2tlcyBhbiBpbml0X2NsaWVudCgpDQo+IG1ldGhvZCBp
biB0aGlzIGNhc2UuDQo+IA0KPiBUaHVzLCBuZnM0X2luaXRfY2xpZW50KCkgY2FuIGVzdGFibGlz
aCBhIGZyZXNoIGNsaWVudCBJRCwgYW5kIHBlcmZvcm0NCj4gdHJ1bmtpbmcgZGV0ZWN0aW9uIHdp
dGggaXQuICBUaGUgZXhhY3QgcHJvY2VzcyBmb3IgZGV0ZWN0aW5nIHRydW5raW5nDQo+IGlzIGRp
ZmZlcmVudCBmb3IgTkZTdjQuMCBhbmQgTkZTdjQuMSwgc28gYSBtaW5vcnZlcnNpb24tc3BlY2lm
aWMNCj4gaW5pdF9jbGllbnQgY2FsbG91dCBpcyBpbnRyb2R1Y2VkLg0KPiANCj4gU2lnbmVkLW9m
Zi1ieTogQ2h1Y2sgTGV2ZXIgPGNodWNrLmxldmVyQG9yYWNsZS5jb20+DQo+IC0tLQ0KPiANCj4g
IGZzL25mcy9jbGllbnQuYyAgICB8ICAyMjMgKysrKysrKysrKysrKysrKysrKysrKysrKysrKysr
KysrKysrKysrKysrKysrKysrKysrKw0KPiAgZnMvbmZzL2ludGVybmFsLmggIHwgICAgNiArDQo+
ICBmcy9uZnMvbmZzNF9mcy5oICAgfCAgICA3ICsrDQo+ICBmcy9uZnMvbmZzNHByb2MuYyAgfCAg
ICAyIA0KPiAgZnMvbmZzL25mczRzdGF0ZS5jIHwgIDEzMSArKysrKysrKysrKysrKysrKysrKysr
KysrKysrKystDQo+ICA1IGZpbGVzIGNoYW5nZWQsIDM2NyBpbnNlcnRpb25zKCspLCAyIGRlbGV0
aW9ucygtKQ0KPiANCj4gZGlmZiAtLWdpdCBhL2ZzL25mcy9jbGllbnQuYyBiL2ZzL25mcy9jbGll
bnQuYw0KPiBpbmRleCA5MjBhYmJjLi43MzMwNjczIDEwMDY0NA0KPiAtLS0gYS9mcy9uZnMvY2xp
ZW50LmMNCj4gKysrIGIvZnMvbmZzL2NsaWVudC5jDQo+IEBAIC01NjYsNyArNTY2LDggQEAgbmZz
X2dldF9jbGllbnQoY29uc3Qgc3RydWN0IG5mc19jbGllbnRfaW5pdGRhdGEgKmNsX2luaXQsDQo+
ICAJCQlyZXR1cm4gbmZzX2ZvdW5kX2NsaWVudChjbF9pbml0LCBjbHApOw0KPiAgCQl9DQo+ICAJ
CWlmIChuZXcpIHsNCj4gLQkJCWxpc3RfYWRkKCZuZXctPmNsX3NoYXJlX2xpbmssICZubi0+bmZz
X2NsaWVudF9saXN0KTsNCj4gKwkJCWxpc3RfYWRkX3RhaWwoJm5ldy0+Y2xfc2hhcmVfbGluaywN
Cj4gKwkJCQkJJm5uLT5uZnNfY2xpZW50X2xpc3QpOw0KPiAgCQkJc3Bpbl91bmxvY2soJm5uLT5u
ZnNfY2xpZW50X2xvY2spOw0KPiAgCQkJbmV3LT5jbF9mbGFncyA9IGNsX2luaXQtPmluaXRfZmxh
Z3M7DQo+ICAJCQlyZXR1cm4gY2xfaW5pdC0+cnBjX29wcy0+aW5pdF9jbGllbnQobmV3LA0KPiBA
QCAtNTg0LDYgKzU4NSwyMTAgQEAgbmZzX2dldF9jbGllbnQoY29uc3Qgc3RydWN0IG5mc19jbGll
bnRfaW5pdGRhdGEgKmNsX2luaXQsDQo+ICAJcmV0dXJuIG5ldzsNCj4gIH0NCj4gIA0KPiArI2lm
ZGVmIENPTkZJR19ORlNfVjQNCj4gKy8qDQo+ICsgKiBSZXR1cm5zIHRydWUgaWYgdGhlIGNsaWVu
dCBJRHMgbWF0Y2gNCj4gKyAqLw0KPiArc3RhdGljIGJvb2wNCj4gK25mczRfbWF0Y2hfY2xpZW50
aWRzKHN0cnVjdCBuZnNfY2xpZW50ICphLCBzdHJ1Y3QgbmZzX2NsaWVudCAqYikNCj4gK3sNCj4g
KwlpZiAoYS0+Y2xfY2xpZW50aWQgIT0gYi0+Y2xfY2xpZW50aWQpIHsNCj4gKwkJZHByaW50aygi
TkZTOiAtLT4gJXMgY2xpZW50IElEICVsbHggZG9lcyBub3QgbWF0Y2ggJWxseFxuIiwNCj4gKwkJ
CV9fZnVuY19fLCBhLT5jbF9jbGllbnRpZCwgYi0+Y2xfY2xpZW50aWQpOw0KPiArCQlyZXR1cm4g
ZmFsc2U7DQo+ICsJfQ0KPiArCWRwcmludGsoIk5GUzogLS0+ICVzIGNsaWVudCBJRCAlbGx4IG1h
dGNoZXMgJWxseFxuIiwNCj4gKwkJX19mdW5jX18sIGEtPmNsX2NsaWVudGlkLCBiLT5jbF9jbGll
bnRpZCk7DQo+ICsJcmV0dXJuIHRydWU7DQo+ICt9DQo+ICsNCj4gKy8qKg0KPiArICogbmZzNDBf
d2Fsa19jbGllbnRfbGlzdCAtIEZpbmQgc2VydmVyIHRoYXQgcmVjb2duaXplcyBhIGNsaWVudCBJ
RA0KPiArICoNCj4gKyAqIEBuZXc6IG5mc19jbGllbnQgd2l0aCBjbGllbnQgSUQgdG8gdGVzdA0K
PiArICogQHJlc3VsdDogT1VUOiBmb3VuZCBuZnNfY2xpZW50LCBvciBuZXcNCj4gKyAqIEBjcmVk
OiBjcmVkZW50aWFsIHRvIHVzZSBmb3IgdHJ1bmtpbmcgdGVzdA0KPiArICoNCj4gKyAqIFJldHVy
bnMgTkZTNF9PSywgYSBuZWdhdGl2ZSBlcnJubywgb3IgYSBuZWdhdGl2ZSBORlM0RVJSIHN0YXR1
cy4NCj4gKyAqIElmIE5GUzRfT0sgaXMgcmV0dXJuZWQsIGFuIG5mc19jbGllbnQgcG9pbnRlciBp
cyBwbGFudGVkIGluICJyZXN1bHQuIg0KPiArICoNCj4gKyAqIE5COiBuZnM0MF93YWxrX2NsaWVu
dF9saXN0KCkgcmVsaWVzIG9uIHRoZSBuZXcgbmZzX2NsaWVudCBiZWluZw0KPiArICogICAgIHRo
ZSBsYXN0IG5mc19jbGllbnQgb24gdGhlIGxpc3QuDQo+ICsgKi8NCj4gK2ludCBuZnM0MF93YWxr
X2NsaWVudF9saXN0KHN0cnVjdCBuZnNfY2xpZW50ICpuZXcsDQo+ICsJCQkgICBzdHJ1Y3QgbmZz
X2NsaWVudCAqKnJlc3VsdCwNCj4gKwkJCSAgIHN0cnVjdCBycGNfY3JlZCAqY3JlZCkNCj4gK3sN
Cj4gKwlzdHJ1Y3QgbmZzX25ldCAqbm4gPSBuZXRfZ2VuZXJpYyhuZXctPmNsX25ldCwgbmZzX25l
dF9pZCk7DQo+ICsJc3RydWN0IG5mc19jbGllbnQgKnBvcywgKnByZXYgPSBOVUxMOw0KPiArCXN0
cnVjdCBuZnM0X3NldGNsaWVudGlkX3JlcyBjbGlkID0gew0KPiArCQkuY2xpZW50aWQJPSBuZXct
PmNsX2NsaWVudGlkLA0KPiArCQkuY29uZmlybQk9IG5ldy0+Y2xfY29uZmlybSwNCj4gKwl9Ow0K
PiArCWludCBzdGF0dXM7DQo+ICsNCj4gKwlkcHJpbnRrKCJORlM6IC0tPiAlcyBuZnNfY2xpZW50
ID0gJXBcbiIsIF9fZnVuY19fLCBuZXcpOw0KPiArDQo+ICsJc3Bpbl9sb2NrKCZubi0+bmZzX2Ns
aWVudF9sb2NrKTsNCj4gKw0KPiArCWxpc3RfZm9yX2VhY2hfZW50cnkocG9zLCAmbm4tPm5mc19j
bGllbnRfbGlzdCwgY2xfc2hhcmVfbGluaykgew0KPiArCQlpZiAocG9zLT5jbF9jb25zX3N0YXRl
IDwgMCkNCj4gKwkJCWNvbnRpbnVlOw0KPiArDQo+ICsJCWlmIChwb3MtPnJwY19vcHMgIT0gbmV3
LT5ycGNfb3BzKQ0KPiArCQkJY29udGludWU7DQo+ICsNCj4gKwkJaWYgKHBvcy0+Y2xfcHJvdG8g
IT0gbmV3LT5jbF9wcm90bykNCj4gKwkJCWNvbnRpbnVlOw0KPiArDQo+ICsJCWlmIChwb3MtPmNs
X21pbm9ydmVyc2lvbiAhPSBuZXctPmNsX21pbm9ydmVyc2lvbikNCj4gKwkJCWNvbnRpbnVlOw0K
PiArDQo+ICsJCWRwcmludGsoIk5GUzogLS0+ICVzIGNvbXBhcmluZyAlbGx4IGFuZCAlbGx4XG4i
LCBfX2Z1bmNfXywNCj4gKwkJCW5ldy0+Y2xfY2xpZW50aWQsIHBvcy0+Y2xfY2xpZW50aWQpOw0K
PiArCQlpZiAocG9zLT5jbF9jbGllbnRpZCAhPSBuZXctPmNsX2NsaWVudGlkKQ0KPiArCQkJY29u
dGludWU7DQo+ICsNCj4gKwkJYXRvbWljX2luYygmcG9zLT5jbF9jb3VudCk7DQo+ICsJCWRwcmlu
dGsoIiVzIG5mc19jbGllbnQgPSAlcCAoeyVkfSlcbiIsDQo+ICsJCQlfX2Z1bmNfXywgcG9zLCBh
dG9taWNfcmVhZCgmcG9zLT5jbF9jb3VudCkpOw0KPiArCQlzcGluX3VubG9jaygmbm4tPm5mc19j
bGllbnRfbG9jayk7DQo+ICsNCj4gKwkJZHByaW50aygiTkZTOiAtLT4gJXMgY29uZmlybWluZyAl
bGx4XG4iLA0KPiArCQkJX19mdW5jX18sIG5ldy0+Y2xfY2xpZW50aWQpOw0KPiArDQo+ICsJCWlm
IChwcmV2KQ0KPiArCQkJbmZzX3B1dF9jbGllbnQocHJldik7DQo+ICsNCj4gKwkJc3RhdHVzID0g
bmZzNF9wcm9jX3NldGNsaWVudGlkX2NvbmZpcm0ocG9zLCAmY2xpZCwgY3JlZCk7DQoNCkhvdyBh
cmUgeW91IHByb3RlY3RpbmcgYWdhaW5zdCBORlM0Q0xOVF9QVVJHRV9TVEFURT8NCg0KPiArCQlp
ZiAoc3RhdHVzID09IE5GUzRfT0spIHsNCg0Kc3RhdHVzID09IDANCg0KPiArCQkJLyogVGhlIG5l
dyBuZnNfY2xpZW50IGRvZXNuJ3QgbmVlZCB0aGUgZXh0cmENCj4gKwkJCSAqIGNsX2NvdW50IGJ1
bXAuICovDQo+ICsJCQluZnNfcHV0X2NsaWVudChwb3MpOw0KPiArCQkJKnJlc3VsdCA9IHBvczsN
Cj4gKwkJCWRwcmludGsoIk5GUzogPC0tICVzIHVzaW5nIG5mc19jbGllbnQgPSAlcCAoeyVkfSlc
biIsDQo+ICsJCQkJX19mdW5jX18sIHBvcywgYXRvbWljX3JlYWQoJnBvcy0+Y2xfY291bnQpKTsN
Cj4gKwkJCXJldHVybiBORlM0X09LOw0KDQpyZXR1cm4gMA0KDQo+ICsJCX0NCj4gKwkJaWYgKHN0
YXR1cyAhPSBORlM0RVJSX1NUQUxFX0NMSUVOVElEKSB7DQoNCkFsbCBlcnJvciB2YWx1ZXMgYXJl
IF9uZWdhdGl2ZV8gaW50ZWdlcnMuDQoNCj4gKwkJCW5mc19wdXRfY2xpZW50KHBvcyk7DQo+ICsJ
CQlkcHJpbnRrKCJORlM6IDwtLSAlcyBzdGF0dXMgPSAlZCwgbm8gcmVzdWx0XG4iLA0KPiArCQkJ
CV9fZnVuY19fLCBzdGF0dXMpOw0KPiArCQkJcmV0dXJuIHN0YXR1czsNCj4gKwkJfQ0KPiArDQo+
ICsJCXNwaW5fbG9jaygmbm4tPm5mc19jbGllbnRfbG9jayk7DQo+ICsJCXByZXYgPSBwb3M7DQo+
ICsJfQ0KPiArDQo+ICsJLyoNCj4gKwkgKiBObyBtYXRjaGluZyBuZnNfY2xpZW50IGZvdW5kLiAg
VGhpcyBzaG91bGQgYmUgaW1wb3NzaWJsZSwNCj4gKwkgKiBiZWNhdXNlIHRoZSBuZXcgbmZzX2Ns
aWVudCBoYXMgYWxyZWFkeSBiZWVuIGFkZGVkIHRvDQo+ICsJICogbmZzX2NsaWVudF9saXN0IGJ5
IG5mc19nZXRfY2xpZW50KCkuDQo+ICsJICoNCj4gKwkgKiBEb24ndCBCVUcoKSwgc2luY2UgdGhl
IGNhbGxlciBpcyBob2xkaW5nIGEgbXV0ZXguDQo+ICsJICovDQo+ICsJc3Bpbl91bmxvY2soJm5u
LT5uZnNfY2xpZW50X2xvY2spOw0KPiArCXByaW50ayhLRVJOX0VSUiAiTkZTOiAlcyBFcnJvcjog
bm8gbWF0Y2hpbmcgbmZzX2NsaWVudCBmb3VuZFxuIiwNCj4gKwkJX19mdW5jX18pOw0KPiArCXJl
dHVybiBORlM0RVJSX1NUQUxFX0NMSUVOVElEOw0KDQotTkZTNEVSUl9TVEFMRV9DTElFTlRJRC4u
Lg0KDQo+ICt9DQo+ICsNCj4gKyNpZmRlZiBDT05GSUdfTkZTX1Y0XzENCj4gKy8qDQo+ICsgKiBS
ZXR1cm5zIHRydWUgaWYgdGhlIHNlcnZlciBvd25lcnMgbWF0Y2gNCj4gKyAqLw0KPiArc3RhdGlj
IGJvb2wNCj4gK25mczRfbWF0Y2hfc2VydmVyb3duZXJzKHN0cnVjdCBuZnNfY2xpZW50ICphLCBz
dHJ1Y3QgbmZzX2NsaWVudCAqYikNCj4gK3sNCj4gKwlzdHJ1Y3QgbmZzNDFfc2VydmVyX293bmVy
ICpvMSA9IGEtPmNsX3NlcnZlcm93bmVyOw0KPiArCXN0cnVjdCBuZnM0MV9zZXJ2ZXJfb3duZXIg
Km8yID0gYi0+Y2xfc2VydmVyb3duZXI7DQo+ICsNCj4gKwlpZiAobzEtPm1pbm9yX2lkICE9IG8y
LT5taW5vcl9pZCkgew0KPiArCQlkcHJpbnRrKCJORlM6IC0tPiAlcyBzZXJ2ZXIgb3duZXIgbWlu
b3IgSURzIGRvIG5vdCBtYXRjaFxuIiwNCj4gKwkJCV9fZnVuY19fKTsNCj4gKwkJcmV0dXJuIGZh
bHNlOw0KPiArCX0NCj4gKw0KPiArCWlmIChvMS0+bWFqb3JfaWRfc3ogIT0gbzItPm1ham9yX2lk
X3N6KQ0KPiArCQlnb3RvIG91dF9tYWpvcl9taXNtYXRjaDsNCj4gKwlpZiAobWVtY21wKG8xLT5t
YWpvcl9pZCwgbzItPm1ham9yX2lkLCBvMS0+bWFqb3JfaWRfc3opICE9IDApDQo+ICsJCWdvdG8g
b3V0X21ham9yX21pc21hdGNoOw0KPiArDQo+ICsJZHByaW50aygiTkZTOiAtLT4gJXMgc2VydmVy
IG93bmVycyBtYXRjaFxuIiwgX19mdW5jX18pOw0KPiArCXJldHVybiB0cnVlOw0KPiArDQo+ICtv
dXRfbWFqb3JfbWlzbWF0Y2g6DQo+ICsJZHByaW50aygiTkZTOiAtLT4gJXMgc2VydmVyIG93bmVy
IG1ham9yIElEcyBkbyBub3QgbWF0Y2hcbiIsDQo+ICsJCV9fZnVuY19fKTsNCj4gKwlyZXR1cm4g
ZmFsc2U7DQo+ICt9DQo+ICsNCj4gKy8qKg0KPiArICogbmZzNDFfd2Fsa19jbGllbnRfbGlzdCAt
IEZpbmQgbmZzX2NsaWVudCB0aGF0IG1hdGNoZXMgYSBjbGllbnQvc2VydmVyIG93bmVyDQo+ICsg
Kg0KPiArICogQG5ldzogbmZzX2NsaWVudCB3aXRoIGNsaWVudCBJRCB0byB0ZXN0DQo+ICsgKiBA
cmVzdWx0OiBPVVQ6IGZvdW5kIG5mc19jbGllbnQsIG9yIG5ldw0KPiArICogQGNyZWQ6IGNyZWRl
bnRpYWwgdG8gdXNlIGZvciB0cnVua2luZyB0ZXN0DQo+ICsgKg0KPiArICogUmV0dXJucyBORlM0
X09LLCBhIG5lZ2F0aXZlIGVycm5vLCBvciBhIG5lZ2F0aXZlIE5GUzRFUlIgc3RhdHVzLg0KPiAr
ICogSWYgTkZTNF9PSyBpcyByZXR1cm5lZCwgYW4gbmZzX2NsaWVudCBwb2ludGVyIGlzIHBsYW50
ZWQgaW4gInJlc3VsdC4iDQo+ICsgKg0KPiArICogTkI6IG5mczQxX3dhbGtfY2xpZW50X2xpc3Qo
KSByZWxpZXMgb24gdGhlIG5ldyBuZnNfY2xpZW50IGJlaW5nDQo+ICsgKiAgICAgdGhlIGxhc3Qg
bmZzX2NsaWVudCBvbiB0aGUgbGlzdC4NCj4gKyAqLw0KPiAraW50IG5mczQxX3dhbGtfY2xpZW50
X2xpc3Qoc3RydWN0IG5mc19jbGllbnQgKm5ldywNCj4gKwkJCSAgIHN0cnVjdCBuZnNfY2xpZW50
ICoqcmVzdWx0LA0KPiArCQkJICAgc3RydWN0IHJwY19jcmVkICpjcmVkKQ0KPiArew0KPiArCXN0
cnVjdCBuZnNfbmV0ICpubiA9IG5ldF9nZW5lcmljKG5ldy0+Y2xfbmV0LCBuZnNfbmV0X2lkKTsN
Cj4gKwlzdHJ1Y3QgbmZzX2NsaWVudCAqcG9zOw0KPiArDQo+ICsJZHByaW50aygiTkZTOiAtLT4g
JXMgbmZzX2NsaWVudCA9ICVwXG4iLCBfX2Z1bmNfXywgbmV3KTsNCj4gKw0KPiArCXNwaW5fbG9j
aygmbm4tPm5mc19jbGllbnRfbG9jayk7DQo+ICsNCj4gKwlsaXN0X2Zvcl9lYWNoX2VudHJ5KHBv
cywgJm5uLT5uZnNfY2xpZW50X2xpc3QsIGNsX3NoYXJlX2xpbmspIHsNCj4gKwkJaWYgKHBvcy0+
Y2xfY29uc19zdGF0ZSA8IDApDQo+ICsJCQljb250aW51ZTsNCj4gKw0KPiArCQlpZiAocG9zLT5y
cGNfb3BzICE9IG5ldy0+cnBjX29wcykNCj4gKwkJCWNvbnRpbnVlOw0KPiArDQo+ICsJCWlmIChw
b3MtPmNsX3Byb3RvICE9IG5ldy0+Y2xfcHJvdG8pDQo+ICsJCQljb250aW51ZTsNCj4gKw0KPiAr
CQlpZiAocG9zLT5jbF9taW5vcnZlcnNpb24gIT0gbmV3LT5jbF9taW5vcnZlcnNpb24pDQo+ICsJ
CQljb250aW51ZTsNCj4gKw0KPiArCQlpZiAoIW5mczRfbWF0Y2hfY2xpZW50aWRzKHBvcywgbmV3
KSkNCj4gKwkJCWNvbnRpbnVlOw0KPiArDQo+ICsJCWlmICghbmZzNF9tYXRjaF9zZXJ2ZXJvd25l
cnMocG9zLCBuZXcpKQ0KPiArCQkJY29udGludWU7DQo+ICsNCj4gKwkJYXRvbWljX2luYygmcG9z
LT5jbF9jb3VudCk7DQo+ICsJCSpyZXN1bHQgPSBwb3M7DQo+ICsJCWRwcmludGsoIk5GUzogPC0t
ICVzIHVzaW5nIG5mc19jbGllbnQgPSAlcCAoeyVkfSlcbiIsDQo+ICsJCQlfX2Z1bmNfXywgcG9z
LCBhdG9taWNfcmVhZCgmcG9zLT5jbF9jb3VudCkpOw0KPiArCQlyZXR1cm4gTkZTNF9PSzsNCg0K
MA0KDQo+ICsJfQ0KPiArDQo+ICsJLyoNCj4gKwkgKiBObyBtYXRjaGluZyBuZnNfY2xpZW50IGZv
dW5kLiAgVGhpcyBzaG91bGQgYmUgaW1wb3NzaWJsZSwNCj4gKwkgKiBiZWNhdXNlIHRoZSBuZXcg
bmZzX2NsaWVudCBoYXMgYWxyZWFkeSBiZWVuIGFkZGVkIHRvDQo+ICsJICogbmZzX2NsaWVudF9s
aXN0IGJ5IG5mc19nZXRfY2xpZW50KCkuDQo+ICsJICoNCj4gKwkgKiBEb24ndCBCVUcoKSwgc2lu
Y2UgdGhlIGNhbGxlciBpcyBob2xkaW5nIGEgbXV0ZXguDQo+ICsJICovDQo+ICsJc3Bpbl91bmxv
Y2soJm5uLT5uZnNfY2xpZW50X2xvY2spOw0KPiArCXByaW50ayhLRVJOX0VSUiAiTkZTOiAlcyBF
cnJvcjogbm8gbWF0Y2hpbmcgbmZzX2NsaWVudCBmb3VuZFxuIiwNCj4gKwkJX19mdW5jX18pOw0K
PiArCXJldHVybiBORlM0RVJSX1NUQUxFX0NMSUVOVElEOw0KDQotTkZTNEVSUl9TVEFMRV9DTElF
TlRJRA0KDQo+ICt9DQo+ICsjZW5kaWYJLyogQ09ORklHX05GU19WNF8xICovDQo+ICsjZW5kaWYJ
LyogQ09ORklHX05GU19WNCAqLw0KPiArDQo+ICAvKg0KPiAgICogTWFyayBhIHNlcnZlciBhcyBy
ZWFkeSBvciBmYWlsZWQNCj4gICAqLw0KPiBAQCAtMTM1MCw2ICsxNTU1LDcgQEAgc3RydWN0IG5m
c19jbGllbnQgKm5mczRfaW5pdF9jbGllbnQoc3RydWN0IG5mc19jbGllbnQgKmNscCwNCj4gIAkJ
CQkgICAgcnBjX2F1dGhmbGF2b3JfdCBhdXRoZmxhdm91cikNCj4gIHsNCj4gIAljaGFyIGJ1ZltJ
TkVUNl9BRERSU1RSTEVOICsgMV07DQo+ICsJc3RydWN0IG5mc19jbGllbnQgKm9sZDsNCj4gIAlp
bnQgZXJyb3I7DQo+ICANCj4gIAlpZiAoY2xwLT5jbF9jb25zX3N0YXRlID09IE5GU19DU19SRUFE
WSkgew0KPiBAQCAtMTM5NSw2ICsxNjAxLDIxIEBAIHN0cnVjdCBuZnNfY2xpZW50ICpuZnM0X2lu
aXRfY2xpZW50KHN0cnVjdCBuZnNfY2xpZW50ICpjbHAsDQo+ICANCj4gIAlpZiAoIW5mczRfaGFz
X3Nlc3Npb24oY2xwKSkNCj4gIAkJbmZzX21hcmtfY2xpZW50X3JlYWR5KGNscCwgTkZTX0NTX1JF
QURZKTsNCj4gKw0KPiArCWVycm9yID0gbmZzNF9kZXRlY3RfdHJ1bmtpbmcoY2xwLCAmb2xkKTsN
Cj4gKwlpZiAoZXJyb3IgPCAwKQ0KPiArCQlnb3RvIGVycm9yOw0KPiArCWlmIChjbHAgIT0gb2xk
KSB7DQo+ICsJCW5mc19tYXJrX2NsaWVudF9yZWFkeShjbHAsIE5GU19DU19SRUFEWSk7DQo+ICsJ
CW5mc19wdXRfY2xpZW50KGNscCk7DQo+ICsJCWRwcmludGsoIjwtLSAlcygpIHJldHVybmluZyAl
cCBpbnN0ZWFkIG9mICVwXG4iLA0KPiArCQkJX19mdW5jX18sIG9sZCwgY2xwKTsNCj4gKwkJY2xw
ID0gb2xkOw0KPiArCQlhdG9taWNfaW5jKCZjbHAtPmNsX2NvdW50KTsNCj4gKwkJZHByaW50aygi
TkZTOiA8LS0gJXMgdXNpbmcgbmZzX2NsaWVudCA9ICVwICh7JWR9KVxuIiwNCj4gKwkJCV9fZnVu
Y19fLCBjbHAsIGF0b21pY19yZWFkKCZjbHAtPmNsX2NvdW50KSk7DQo+ICsJfQ0KPiArDQo+ICAJ
cmV0dXJuIGNscDsNCj4gIA0KPiAgZXJyb3I6DQo+IGRpZmYgLS1naXQgYS9mcy9uZnMvaW50ZXJu
YWwuaCBiL2ZzL25mcy9pbnRlcm5hbC5oDQo+IGluZGV4IDMxNWRjODYuLjg1ODg4ZjYgMTAwNjQ0
DQo+IC0tLSBhL2ZzL25mcy9pbnRlcm5hbC5oDQo+ICsrKyBiL2ZzL25mcy9pbnRlcm5hbC5oDQo+
IEBAIC0xNjIsNiArMTYyLDEyIEBAIGV4dGVybiBzdHJ1Y3QgbmZzX2NsaWVudCAqbmZzNF9pbml0
X2NsaWVudChzdHJ1Y3QgbmZzX2NsaWVudCAqY2xwLA0KPiAgCQkJCWNvbnN0IHN0cnVjdCBycGNf
dGltZW91dCAqdGltZXBhcm1zLA0KPiAgCQkJCWNvbnN0IGNoYXIgKmlwX2FkZHIsDQo+ICAJCQkJ
cnBjX2F1dGhmbGF2b3JfdCBhdXRoZmxhdm91cik7DQo+ICtleHRlcm4gaW50IG5mczQwX3dhbGtf
Y2xpZW50X2xpc3Qoc3RydWN0IG5mc19jbGllbnQgKmNscCwNCj4gKwkJCQlzdHJ1Y3QgbmZzX2Ns
aWVudCAqKnJlc3VsdCwNCj4gKwkJCQlzdHJ1Y3QgcnBjX2NyZWQgKmNyZWQpOw0KPiArZXh0ZXJu
IGludCBuZnM0MV93YWxrX2NsaWVudF9saXN0KHN0cnVjdCBuZnNfY2xpZW50ICpjbHAsDQo+ICsJ
CQkJc3RydWN0IG5mc19jbGllbnQgKipyZXN1bHQsDQo+ICsJCQkJc3RydWN0IHJwY19jcmVkICpj
cmVkKTsNCj4gIGV4dGVybiBzdHJ1Y3QgbmZzX3NlcnZlciAqbmZzX2NyZWF0ZV9zZXJ2ZXIoDQo+
ICAJCQkJCWNvbnN0IHN0cnVjdCBuZnNfcGFyc2VkX21vdW50X2RhdGEgKiwNCj4gIAkJCQkJc3Ry
dWN0IG5mc19maCAqKTsNCj4gZGlmZiAtLWdpdCBhL2ZzL25mcy9uZnM0X2ZzLmggYi9mcy9uZnMv
bmZzNF9mcy5oDQo+IGluZGV4IDI5NTNmMmMuLmJhMTM5ODYgMTAwNjQ0DQo+IC0tLSBhL2ZzL25m
cy9uZnM0X2ZzLmgNCj4gKysrIGIvZnMvbmZzL25mczRfZnMuaA0KPiBAQCAtMTkwLDYgKzE5MCw4
IEBAIHN0cnVjdCBuZnM0X3N0YXRlX3JlY292ZXJ5X29wcyB7DQo+ICAJaW50ICgqZXN0YWJsaXNo
X2NsaWQpKHN0cnVjdCBuZnNfY2xpZW50ICosIHN0cnVjdCBycGNfY3JlZCAqKTsNCj4gIAlzdHJ1
Y3QgcnBjX2NyZWQgKiAoKmdldF9jbGlkX2NyZWQpKHN0cnVjdCBuZnNfY2xpZW50ICopOw0KPiAg
CWludCAoKnJlY2xhaW1fY29tcGxldGUpKHN0cnVjdCBuZnNfY2xpZW50ICopOw0KPiArCWludCAo
KmRldGVjdF90cnVua2luZykoc3RydWN0IG5mc19jbGllbnQgKiwgc3RydWN0IG5mc19jbGllbnQg
KiosDQo+ICsJCXN0cnVjdCBycGNfY3JlZCAqKTsNCj4gIH07DQo+ICANCj4gIHN0cnVjdCBuZnM0
X3N0YXRlX21haW50ZW5hbmNlX29wcyB7DQo+IEBAIC0yOTcsOSArMjk5LDE0IEBAIGV4dGVybiB2
b2lkIG5mczRfcmVuZXdfc3RhdGUoc3RydWN0IHdvcmtfc3RydWN0ICopOw0KPiAgLyogbmZzNHN0
YXRlLmMgKi8NCj4gIHN0cnVjdCBycGNfY3JlZCAqbmZzNF9nZXRfc2V0Y2xpZW50aWRfY3JlZChz
dHJ1Y3QgbmZzX2NsaWVudCAqY2xwKTsNCj4gIHN0cnVjdCBycGNfY3JlZCAqbmZzNF9nZXRfcmVu
ZXdfY3JlZF9sb2NrZWQoc3RydWN0IG5mc19jbGllbnQgKmNscCk7DQo+ICtpbnQgbmZzNF9kZXRl
Y3RfdHJ1bmtpbmcoc3RydWN0IG5mc19jbGllbnQgKmNscCwgc3RydWN0IG5mc19jbGllbnQgKiop
Ow0KPiAraW50IG5mczQwX2RldGVjdF90cnVua2luZyhzdHJ1Y3QgbmZzX2NsaWVudCAqY2xwLCBz
dHJ1Y3QgbmZzX2NsaWVudCAqKiwNCj4gKwkJCXN0cnVjdCBycGNfY3JlZCAqKTsNCj4gICNpZiBk
ZWZpbmVkKENPTkZJR19ORlNfVjRfMSkNCj4gIHN0cnVjdCBycGNfY3JlZCAqbmZzNF9nZXRfbWFj
aGluZV9jcmVkX2xvY2tlZChzdHJ1Y3QgbmZzX2NsaWVudCAqY2xwKTsNCj4gIHN0cnVjdCBycGNf
Y3JlZCAqbmZzNF9nZXRfZXhjaGFuZ2VfaWRfY3JlZChzdHJ1Y3QgbmZzX2NsaWVudCAqY2xwKTsN
Cj4gK2ludCBuZnM0MV9kZXRlY3RfdHJ1bmtpbmcoc3RydWN0IG5mc19jbGllbnQgKmNscCwgc3Ry
dWN0IG5mc19jbGllbnQgKiosDQo+ICsJCQlzdHJ1Y3QgcnBjX2NyZWQgKik7DQo+ICBleHRlcm4g
dm9pZCBuZnM0X3NjaGVkdWxlX3Nlc3Npb25fcmVjb3Zlcnkoc3RydWN0IG5mczRfc2Vzc2lvbiAq
KTsNCj4gICNlbHNlDQo+ICBzdGF0aWMgaW5saW5lIHZvaWQgbmZzNF9zY2hlZHVsZV9zZXNzaW9u
X3JlY292ZXJ5KHN0cnVjdCBuZnM0X3Nlc3Npb24gKnNlc3Npb24pDQo+IGRpZmYgLS1naXQgYS9m
cy9uZnMvbmZzNHByb2MuYyBiL2ZzL25mcy9uZnM0cHJvYy5jDQo+IGluZGV4IDNmZDk5NDQuLjAw
YjVkMDIgMTAwNjQ0DQo+IC0tLSBhL2ZzL25mcy9uZnM0cHJvYy5jDQo+ICsrKyBiL2ZzL25mcy9u
ZnM0cHJvYy5jDQo+IEBAIC02NDUzLDYgKzY0NTMsNyBAQCBzdGF0aWMgY29uc3Qgc3RydWN0IG5m
czRfc3RhdGVfcmVjb3Zlcnlfb3BzIG5mczQwX3JlYm9vdF9yZWNvdmVyeV9vcHMgPSB7DQo+ICAJ
LnJlY292ZXJfbG9jawk9IG5mczRfbG9ja19yZWNsYWltLA0KPiAgCS5lc3RhYmxpc2hfY2xpZCA9
IG5mczRfaW5pdF9jbGllbnRpZCwNCj4gIAkuZ2V0X2NsaWRfY3JlZAk9IG5mczRfZ2V0X3NldGNs
aWVudGlkX2NyZWQsDQo+ICsJLmRldGVjdF90cnVua2luZyA9IG5mczQwX2RldGVjdF90cnVua2lu
ZywNCj4gIH07DQo+ICANCj4gICNpZiBkZWZpbmVkKENPTkZJR19ORlNfVjRfMSkNCj4gQEAgLTY0
NjQsNiArNjQ2NSw3IEBAIHN0YXRpYyBjb25zdCBzdHJ1Y3QgbmZzNF9zdGF0ZV9yZWNvdmVyeV9v
cHMgbmZzNDFfcmVib290X3JlY292ZXJ5X29wcyA9IHsNCj4gIAkuZXN0YWJsaXNoX2NsaWQgPSBu
ZnM0MV9pbml0X2NsaWVudGlkLA0KPiAgCS5nZXRfY2xpZF9jcmVkCT0gbmZzNF9nZXRfZXhjaGFu
Z2VfaWRfY3JlZCwNCj4gIAkucmVjbGFpbV9jb21wbGV0ZSA9IG5mczQxX3Byb2NfcmVjbGFpbV9j
b21wbGV0ZSwNCj4gKwkuZGV0ZWN0X3RydW5raW5nID0gbmZzNDFfZGV0ZWN0X3RydW5raW5nLA0K
PiAgfTsNCj4gICNlbmRpZiAvKiBDT05GSUdfTkZTX1Y0XzEgKi8NCj4gIA0KPiBkaWZmIC0tZ2l0
IGEvZnMvbmZzL25mczRzdGF0ZS5jIGIvZnMvbmZzL25mczRzdGF0ZS5jDQo+IGluZGV4IDZhMWEz
MDUuLmRmNTk5NTEgMTAwNjQ0DQo+IC0tLSBhL2ZzL25mcy9uZnM0c3RhdGUuYw0KPiArKysgYi9m
cy9uZnMvbmZzNHN0YXRlLmMNCj4gQEAgLTU3LDEwICs1NywxMiBAQA0KPiAgI2luY2x1ZGUgImlu
dGVybmFsLmgiDQo+ICAjaW5jbHVkZSAicG5mcy5oIg0KPiAgDQo+ICsjZGVmaW5lIE5GU0RCR19G
QUNJTElUWQkJTkZTREJHX0NMSUVOVA0KDQpNb3N0IG9mIHRoZSBzdHVmZiBpbiBuZnM0c3RhdGUu
YyBpcyBkZWFsaW5nIHdpdGggTkZTdjQtc3BlY2lmaWMgc3RhdGUuDQpJdCBkb2VzIG5vdCBtYWtl
IHNlbnNlIHRvIGx1bXAgdGhhdCBpbiB3aXRoIHRoZSBuZnNfY2xpZW50IGRlYnVnZ2luZy4NCg0K
PiArDQo+ICAjZGVmaW5lIE9QRU5PV05FUl9QT09MX1NJWkUJOA0KPiAgDQo+ICBjb25zdCBuZnM0
X3N0YXRlaWQgemVyb19zdGF0ZWlkOw0KPiAtDQo+ICtzdGF0aWMgREVGSU5FX01VVEVYKG5mc19j
bGlkX2luaXRfbXV0ZXgpOw0KPiAgc3RhdGljIExJU1RfSEVBRChuZnM0X2NsaWVudGlkX2xpc3Qp
Ow0KPiAgDQo+ICBpbnQgbmZzNF9pbml0X2NsaWVudGlkKHN0cnVjdCBuZnNfY2xpZW50ICpjbHAs
IHN0cnVjdCBycGNfY3JlZCAqY3JlZCkNCj4gQEAgLTk0LDYgKzk2LDQ3IEBAIG91dDoNCj4gIAly
ZXR1cm4gc3RhdHVzOw0KPiAgfQ0KPiAgDQo+ICsvKioNCj4gKyAqIG5mczQwX2RldGVjdF90cnVu
a2luZyAtIERldGVjdCBzZXJ2ZXIgSVAgYWRkcmVzcyB0cnVua2luZyAobXYwKQ0KPiArICoNCj4g
KyAqIEBjbHA6IG5mc19jbGllbnQgdW5kZXIgdGVzdA0KPiArICogQHJlc3VsdDogT1VUOiBmb3Vu
ZCBuZnNfY2xpZW50LCBvciBjbHANCj4gKyAqIEBjcmVkOiBjcmVkZW50aWFsIHRvIHVzZSBmb3Ig
dHJ1bmtpbmcgdGVzdA0KPiArICoNCj4gKyAqIFJldHVybnMgTkZTNF9PSywgYSBuZWdhdGl2ZSBl
cnJubywgb3IgYSBuZWdhdGl2ZSBORlM0RVJSIHN0YXR1cy4NCj4gKyAqIElmIE5GUzRfT0sgaXMg
cmV0dXJuZWQsIGFuIG5mc19jbGllbnQgcG9pbnRlciBpcyBwbGFudGVkIGluDQo+ICsgKiAicmVz
dWx0Ii4NCj4gKyAqLw0KPiAraW50IG5mczQwX2RldGVjdF90cnVua2luZyhzdHJ1Y3QgbmZzX2Ns
aWVudCAqY2xwLCBzdHJ1Y3QgbmZzX2NsaWVudCAqKnJlc3VsdCwNCj4gKwkJCSAgc3RydWN0IHJw
Y19jcmVkICpjcmVkKQ0KPiArew0KPiArCXN0cnVjdCBuZnM0X3NldGNsaWVudGlkX3JlcyBjbGlk
ID0gew0KPiArCQkuY2xpZW50aWQgPSBjbHAtPmNsX2NsaWVudGlkLA0KPiArCQkuY29uZmlybSA9
IGNscC0+Y2xfY29uZmlybSwNCj4gKwl9Ow0KPiArCXVuc2lnbmVkIHNob3J0IHBvcnQ7DQo+ICsJ
aW50IHN0YXR1czsNCj4gKw0KPiArCXBvcnQgPSBuZnNfY2FsbGJhY2tfdGNwcG9ydDsNCj4gKwlp
ZiAoY2xwLT5jbF9hZGRyLnNzX2ZhbWlseSA9PSBBRl9JTkVUNikNCj4gKwkJcG9ydCA9IG5mc19j
YWxsYmFja190Y3Bwb3J0NjsNCj4gKw0KPiArCXN0YXR1cyA9IG5mczRfcHJvY19zZXRjbGllbnRp
ZChjbHAsIE5GUzRfQ0FMTEJBQ0ssIHBvcnQsIGNyZWQsICZjbGlkKTsNCj4gKwlpZiAoc3RhdHVz
ICE9IE5GUzRfT0spDQo+ICsJCWdvdG8gb3V0Ow0KPiArCWNscC0+Y2xfY2xpZW50aWQgPSBjbGlk
LmNsaWVudGlkOw0KPiArCWNscC0+Y2xfY29uZmlybSA9IGNsaWQuY29uZmlybTsNCj4gKw0KPiAr
CXN0YXR1cyA9IG5mczQwX3dhbGtfY2xpZW50X2xpc3QoY2xwLCByZXN1bHQsIGNyZWQpOw0KPiAr
CWlmIChzdGF0dXMgIT0gTkZTNF9PSykgew0KPiArCQlzZXRfYml0KE5GUzRDTE5UX0xFQVNFX0NP
TkZJUk0sICZjbHAtPmNsX3N0YXRlKTsNCj4gKwkJbmZzNF9zY2hlZHVsZV9zdGF0ZV9yZW5ld2Fs
KCpyZXN1bHQpOw0KPiArCX0NCj4gKw0KPiArb3V0Og0KPiArCXJldHVybiBzdGF0dXM7DQo+ICt9
DQo+ICsNCj4gIHN0cnVjdCBycGNfY3JlZCAqbmZzNF9nZXRfbWFjaGluZV9jcmVkX2xvY2tlZChz
dHJ1Y3QgbmZzX2NsaWVudCAqY2xwKQ0KPiAgew0KPiAgCXN0cnVjdCBycGNfY3JlZCAqY3JlZCA9
IE5VTEw7DQo+IEBAIC0yNjQsNiArMzA3LDQ0IEBAIG91dDoNCj4gIAlyZXR1cm4gc3RhdHVzOw0K
PiAgfQ0KPiAgDQo+ICsvKioNCj4gKyAqIG5mczQxX2RldGVjdF90cnVua2luZyAtIERldGVjdCBz
ZXJ2ZXIgSVAgYWRkcmVzcyB0cnVua2luZyAobXYxKQ0KPiArICoNCj4gKyAqIEBjbHA6IG5mc19j
bGllbnQgdW5kZXIgdGVzdA0KPiArICogQHJlc3VsdDogT1VUOiBmb3VuZCBuZnNfY2xpZW50LCBv
ciBjbHANCj4gKyAqIEBjcmVkOiBjcmVkZW50aWFsIHRvIHVzZSBmb3IgdHJ1bmtpbmcgdGVzdA0K
PiArICoNCj4gKyAqIFJldHVybnMgTkZTNF9PSywgYSBuZWdhdGl2ZSBlcnJubywgb3IgYSBuZWdh
dGl2ZSBORlM0RVJSIHN0YXR1cy4NCj4gKyAqIElmIE5GUzRfT0sgaXMgcmV0dXJuZWQsIGFuIG5m
c19jbGllbnQgcG9pbnRlciBpcyBwbGFudGVkIGluDQo+ICsgKiAicmVzdWx0Ii4NCj4gKyAqLw0K
PiAraW50IG5mczQxX2RldGVjdF90cnVua2luZyhzdHJ1Y3QgbmZzX2NsaWVudCAqY2xwLCBzdHJ1
Y3QgbmZzX2NsaWVudCAqKnJlc3VsdCwNCj4gKwkJCSAgc3RydWN0IHJwY19jcmVkICpjcmVkKQ0K
PiArew0KPiArCXN0cnVjdCBuZnNfY2xpZW50ICp0cnVua2VkOw0KPiArCWludCBzdGF0dXM7DQo+
ICsNCj4gKwluZnM0X2JlZ2luX2RyYWluX3Nlc3Npb24oY2xwKTsNCj4gKwlzdGF0dXMgPSBuZnM0
X3Byb2NfZXhjaGFuZ2VfaWQoY2xwLCBjcmVkKTsNCj4gKwlpZiAoc3RhdHVzICE9IE5GUzRfT0sp
DQo+ICsJCWdvdG8gb3V0Ow0KPiArDQo+ICsJc3RhdHVzID0gbmZzNDFfd2Fsa19jbGllbnRfbGlz
dChjbHAsICZ0cnVua2VkLCBjcmVkKTsNCj4gKwlpZiAoc3RhdHVzICE9IE5GUzRfT0spDQo+ICsJ
CWdvdG8gb3V0Ow0KPiArDQo+ICsJc2V0X2JpdChORlM0Q0xOVF9MRUFTRV9DT05GSVJNLCAmdHJ1
bmtlZC0+Y2xfc3RhdGUpOw0KPiArCXN0YXR1cyA9IG5mczRfcHJvY19jcmVhdGVfc2Vzc2lvbih0
cnVua2VkKTsNCj4gKwlpZiAoc3RhdHVzICE9IE5GUzRfT0spDQo+ICsJCWdvdG8gb3V0Ow0KPiAr
CWNsZWFyX2JpdChORlM0Q0xOVF9MRUFTRV9DT05GSVJNLCAmdHJ1bmtlZC0+Y2xfc3RhdGUpOw0K
PiArCW5mczQxX3NldHVwX3N0YXRlX3JlbmV3YWwodHJ1bmtlZCk7DQo+ICsJbmZzX21hcmtfY2xp
ZW50X3JlYWR5KHRydW5rZWQsIE5GU19DU19SRUFEWSk7DQo+ICsJKnJlc3VsdCA9IHRydW5rZWQ7
DQo+ICtvdXQ6DQo+ICsJcmV0dXJuIHN0YXR1czsNCj4gK30NCj4gKw0KPiAgc3RydWN0IHJwY19j
cmVkICpuZnM0X2dldF9leGNoYW5nZV9pZF9jcmVkKHN0cnVjdCBuZnNfY2xpZW50ICpjbHApDQo+
ICB7DQo+ICAJc3RydWN0IHJwY19jcmVkICpjcmVkOw0KPiBAQCAtMTU3OSw2ICsxNjYwLDggQEAg
c3RhdGljIGludCBuZnM0X3JlY2xhaW1fbGVhc2Uoc3RydWN0IG5mc19jbGllbnQgKmNscCkNCj4g
IAlycGNfYXV0aGZsYXZvcl90IGZsYXZvcnNbTkZTX01BWF9TRUNGTEFWT1JTXTsNCj4gIAlpbnQg
aSwgbGVuLCBzdGF0dXM7DQo+ICANCj4gKwltdXRleF9sb2NrKCZuZnNfY2xpZF9pbml0X211dGV4
KTsNCj4gKw0KPiAgCWkgPSAwOw0KPiAgCWxlbiA9IGdzc19tZWNoX2xpc3RfcHNldWRvZmxhdm9y
cyhmbGF2b3JzKTsNCj4gIA0KPiBAQCAtMTYxMyw2ICsxNjk2LDUyIEBAIGFnYWluOg0KPiAgCQkJ
YnJlYWs7DQo+ICAJCX0NCj4gIAl9DQo+ICsNCj4gKwltdXRleF91bmxvY2soJm5mc19jbGlkX2lu
aXRfbXV0ZXgpOw0KPiArCXJldHVybiBzdGF0dXM7DQo+ICt9DQo+ICsNCj4gKy8qKg0KPiArICog
bmZzNF9kZXRlY3RfdHJ1bmtpbmcgLSBEZXRlY3Qgc2VydmVyIElQIGFkZHJlc3MgdHJ1bmtpbmcN
Cj4gKyAqDQo+ICsgKiBAY2xwOiBuZnNfY2xpZW50IHVuZGVyIHRlc3QNCj4gKyAqIEByZXN1bHQ6
IE9VVDogZm91bmQgbmZzX2NsaWVudCwgb3IgY2xwDQo+ICsgKg0KPiArICogUmV0dXJucyBORlM0
X09LLCBhIG5lZ2F0aXZlIGVycm5vLCBvciBhIG5lZ2F0aXZlIE5GUzRFUlIgc3RhdHVzLg0KPiAr
ICogSWYgTkZTNF9PSyBpcyByZXR1cm5lZCwgYW4gbmZzX2NsaWVudCBwb2ludGVyIGlzIHBsYW50
ZWQgaW4NCj4gKyAqICJyZXN1bHQiLg0KPiArICovDQo+ICtpbnQgbmZzNF9kZXRlY3RfdHJ1bmtp
bmcoc3RydWN0IG5mc19jbGllbnQgKmNscCwNCj4gKwkJCSBzdHJ1Y3QgbmZzX2NsaWVudCAqKnJl
c3VsdCkNCj4gK3sNCj4gKwljb25zdCBzdHJ1Y3QgbmZzNF9zdGF0ZV9yZWNvdmVyeV9vcHMgKm9w
cyA9DQo+ICsJCQkJY2xwLT5jbF9tdm9wcy0+cmVib290X3JlY292ZXJ5X29wczsNCj4gKwlzdHJ1
Y3QgcnBjX2NyZWQgKmNyZWQ7DQo+ICsJaW50IHN0YXR1czsNCj4gKw0KPiArCWRwcmludGsoIk5G
UzogPC0tICVzIG5mc19jbGllbnQgPSAlcFxuIiwgX19mdW5jX18sIGNscCk7DQo+ICsJbXV0ZXhf
bG9jaygmbmZzX2NsaWRfaW5pdF9tdXRleCk7DQo+ICsNCj4gKwlzdGF0dXMgPSAtRU5PRU5UOw0K
PiArCWNyZWQgPSBvcHMtPmdldF9jbGlkX2NyZWQoY2xwKTsNCj4gKwlpZiAoY3JlZCAhPSBOVUxM
KSB7DQo+ICsJCXN0YXR1cyA9IG9wcy0+ZGV0ZWN0X3RydW5raW5nKGNscCwgcmVzdWx0LCBjcmVk
KTsNCj4gKwkJcHV0X3JwY2NyZWQoY3JlZCk7DQo+ICsJCS8qIEhhbmRsZSBjYXNlIHdoZXJlIHRo
ZSB1c2VyIGhhc24ndCBzZXQgdXAgbWFjaGluZSBjcmVkcyAqLw0KPiArCQlpZiAoc3RhdHVzID09
IC1FQUNDRVMgJiYgY3JlZCA9PSBjbHAtPmNsX21hY2hpbmVfY3JlZCkgew0KPiArCQkJbmZzNF9j
bGVhcl9tYWNoaW5lX2NyZWQoY2xwKTsNCj4gKwkJCXN0YXR1cyA9IC1FQUdBSU47DQo+ICsJCX0N
Cj4gKwkJaWYgKHN0YXR1cyA9PSAtTkZTNEVSUl9NSU5PUl9WRVJTX01JU01BVENIKQ0KPiArCQkJ
c3RhdHVzID0gLUVQUk9UT05PU1VQUE9SVDsNCj4gKwl9DQo+ICsNCj4gKwltdXRleF91bmxvY2so
Jm5mc19jbGlkX2luaXRfbXV0ZXgpOw0KPiArCWlmIChzdGF0dXMgPT0gTkZTNF9PSykgew0KPiAr
CQljbGVhcl9iaXQoTkZTNENMTlRfTEVBU0VfRVhQSVJFRCwgJmNscC0+Y2xfc3RhdGUpOw0KPiAr
CQlkcHJpbnRrKCJORlM6IDwtLSAlcyByZXN1bHQgPSAlcFxuIiwgX19mdW5jX18sICpyZXN1bHQp
Ow0KPiArCX0gZWxzZQ0KPiArCQlkcHJpbnRrKCJORlM6IDwtLSAlcyBzdGF0dXMgPSAlZFxuIiwg
X19mdW5jX18sIHN0YXR1cyk7DQo+ICAJcmV0dXJuIHN0YXR1czsNCj4gIH0NCj4gIA0KPiANCg0K
LS0gDQpUcm9uZCBNeWtsZWJ1c3QNCkxpbnV4IE5GUyBjbGllbnQgbWFpbnRhaW5lcg0KDQpOZXRB
cHANClRyb25kLk15a2xlYnVzdEBuZXRhcHAuY29tDQp3d3cubmV0YXBwLmNvbQ0KDQo=

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 18/20] NFS: Detect NFSv4 server trunking when mounting
  2012-04-23 21:27   ` Myklebust, Trond
@ 2012-04-23 21:43     ` Chuck Lever
  2012-04-23 21:47     ` Chuck Lever
  1 sibling, 0 replies; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 21:43 UTC (permalink / raw)
  To: Myklebust, Trond; +Cc: linux-nfs


On Apr 23, 2012, at 5:27 PM, Myklebust, Trond wrote:

> On Mon, 2012-04-23 at 16:55 -0400, Chuck Lever wrote:
>> Currently the Linux NFS client waits to perform a SETCLIENTID until
>> just before an application wants to open a file.  Quite a bit of
>> activity can occur before any state is needed.
>> 
>> If the client cares about server trunking, however, no NFSv4
>> operations can proceed until the client determines who it is talking
>> to.  Thus server IP trunking detection must be done when the client
>> first encounters an unfamiliar server IP address.
>> 
>> The nfs_get_client() function walks the nfs_client_list and matches on
>> server IP address.  The outcome of that walk tells us immediately if
>> we have an unfamiliar server IP address.  It invokes an init_client()
>> method in this case.
>> 
>> Thus, nfs4_init_client() can establish a fresh client ID, and perform
>> trunking detection with it.  The exact process for detecting trunking
>> is different for NFSv4.0 and NFSv4.1, so a minorversion-specific
>> init_client callout is introduced.
>> 
>> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
>> ---
>> 
>> fs/nfs/client.c    |  223 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>> fs/nfs/internal.h  |    6 +
>> fs/nfs/nfs4_fs.h   |    7 ++
>> fs/nfs/nfs4proc.c  |    2 
>> fs/nfs/nfs4state.c |  131 ++++++++++++++++++++++++++++++-
>> 5 files changed, 367 insertions(+), 2 deletions(-)
>> 
>> diff --git a/fs/nfs/client.c b/fs/nfs/client.c
>> index 920abbc..7330673 100644
>> --- a/fs/nfs/client.c
>> +++ b/fs/nfs/client.c
>> @@ -566,7 +566,8 @@ nfs_get_client(const struct nfs_client_initdata *cl_init,
>> 			return nfs_found_client(cl_init, clp);
>> 		}
>> 		if (new) {
>> -			list_add(&new->cl_share_link, &nn->nfs_client_list);
>> +			list_add_tail(&new->cl_share_link,
>> +					&nn->nfs_client_list);
>> 			spin_unlock(&nn->nfs_client_lock);
>> 			new->cl_flags = cl_init->init_flags;
>> 			return cl_init->rpc_ops->init_client(new,
>> @@ -584,6 +585,210 @@ nfs_get_client(const struct nfs_client_initdata *cl_init,
>> 	return new;
>> }
>> 
>> +#ifdef CONFIG_NFS_V4
>> +/*
>> + * Returns true if the client IDs match
>> + */
>> +static bool
>> +nfs4_match_clientids(struct nfs_client *a, struct nfs_client *b)
>> +{
>> +	if (a->cl_clientid != b->cl_clientid) {
>> +		dprintk("NFS: --> %s client ID %llx does not match %llx\n",
>> +			__func__, a->cl_clientid, b->cl_clientid);
>> +		return false;
>> +	}
>> +	dprintk("NFS: --> %s client ID %llx matches %llx\n",
>> +		__func__, a->cl_clientid, b->cl_clientid);
>> +	return true;
>> +}
>> +
>> +/**
>> + * nfs40_walk_client_list - Find server that recognizes a client ID
>> + *
>> + * @new: nfs_client with client ID to test
>> + * @result: OUT: found nfs_client, or new
>> + * @cred: credential to use for trunking test
>> + *
>> + * Returns NFS4_OK, a negative errno, or a negative NFS4ERR status.
>> + * If NFS4_OK is returned, an nfs_client pointer is planted in "result."
>> + *
>> + * NB: nfs40_walk_client_list() relies on the new nfs_client being
>> + *     the last nfs_client on the list.
>> + */
>> +int nfs40_walk_client_list(struct nfs_client *new,
>> +			   struct nfs_client **result,
>> +			   struct rpc_cred *cred)
>> +{
>> +	struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
>> +	struct nfs_client *pos, *prev = NULL;
>> +	struct nfs4_setclientid_res clid = {
>> +		.clientid	= new->cl_clientid,
>> +		.confirm	= new->cl_confirm,
>> +	};
>> +	int status;
>> +
>> +	dprintk("NFS: --> %s nfs_client = %p\n", __func__, new);
>> +
>> +	spin_lock(&nn->nfs_client_lock);
>> +
>> +	list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
>> +		if (pos->cl_cons_state < 0)
>> +			continue;
>> +
>> +		if (pos->rpc_ops != new->rpc_ops)
>> +			continue;
>> +
>> +		if (pos->cl_proto != new->cl_proto)
>> +			continue;
>> +
>> +		if (pos->cl_minorversion != new->cl_minorversion)
>> +			continue;
>> +
>> +		dprintk("NFS: --> %s comparing %llx and %llx\n", __func__,
>> +			new->cl_clientid, pos->cl_clientid);
>> +		if (pos->cl_clientid != new->cl_clientid)
>> +			continue;
>> +
>> +		atomic_inc(&pos->cl_count);
>> +		dprintk("%s nfs_client = %p ({%d})\n",
>> +			__func__, pos, atomic_read(&pos->cl_count));
>> +		spin_unlock(&nn->nfs_client_lock);
>> +
>> +		dprintk("NFS: --> %s confirming %llx\n",
>> +			__func__, new->cl_clientid);
>> +
>> +		if (prev)
>> +			nfs_put_client(prev);
>> +
>> +		status = nfs4_proc_setclientid_confirm(pos, &clid, cred);
> 
> How are you protecting against NFS4CLNT_PURGE_STATE?

My design is that the clid_init_mutex is held by nfs4_reclaim_lease() and while we are in this code.  Whether that made it out of my head and into the source code is another question.

-- 
Chuck Lever
chuck[dot]lever[at]oracle[dot]com





^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 18/20] NFS: Detect NFSv4 server trunking when mounting
  2012-04-23 21:27   ` Myklebust, Trond
  2012-04-23 21:43     ` Chuck Lever
@ 2012-04-23 21:47     ` Chuck Lever
  2012-04-23 21:56       ` Myklebust, Trond
  1 sibling, 1 reply; 37+ messages in thread
From: Chuck Lever @ 2012-04-23 21:47 UTC (permalink / raw)
  To: Myklebust, Trond; +Cc: linux-nfs


On Apr 23, 2012, at 5:27 PM, Myklebust, Trond wrote:

> On Mon, 2012-04-23 at 16:55 -0400, Chuck Lever wrote:
>> diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
>> index 6a1a305..df59951 100644
>> --- a/fs/nfs/nfs4state.c
>> +++ b/fs/nfs/nfs4state.c
>> @@ -57,10 +57,12 @@
>> #include "internal.h"
>> #include "pnfs.h"
>> 
>> +#define NFSDBG_FACILITY		NFSDBG_CLIENT
> 
> Most of the stuff in nfs4state.c is dealing with NFSv4-specific state.
> It does not make sense to lump that in with the nfs_client debugging.
> 

It was the closest convenient.  Shall I create an NFSDBG_STATE?

>> +
>> #define OPENOWNER_POOL_SIZE	8
>> 
>> const nfs4_stateid zero_stateid;
>> -
>> +static DEFINE_MUTEX(nfs_clid_init_mutex);
>> static LIST_HEAD(nfs4_clientid_list);
>> 
>> int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)

-- 
Chuck Lever
chuck[dot]lever[at]oracle[dot]com





^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 18/20] NFS: Detect NFSv4 server trunking when mounting
  2012-04-23 21:47     ` Chuck Lever
@ 2012-04-23 21:56       ` Myklebust, Trond
  0 siblings, 0 replies; 37+ messages in thread
From: Myklebust, Trond @ 2012-04-23 21:56 UTC (permalink / raw)
  To: Chuck Lever; +Cc: linux-nfs

T24gTW9uLCAyMDEyLTA0LTIzIGF0IDE3OjQ3IC0wNDAwLCBDaHVjayBMZXZlciB3cm90ZToNCj4g
T24gQXByIDIzLCAyMDEyLCBhdCA1OjI3IFBNLCBNeWtsZWJ1c3QsIFRyb25kIHdyb3RlOg0KPiAN
Cj4gPiBPbiBNb24sIDIwMTItMDQtMjMgYXQgMTY6NTUgLTA0MDAsIENodWNrIExldmVyIHdyb3Rl
Og0KPiA+PiBkaWZmIC0tZ2l0IGEvZnMvbmZzL25mczRzdGF0ZS5jIGIvZnMvbmZzL25mczRzdGF0
ZS5jDQo+ID4+IGluZGV4IDZhMWEzMDUuLmRmNTk5NTEgMTAwNjQ0DQo+ID4+IC0tLSBhL2ZzL25m
cy9uZnM0c3RhdGUuYw0KPiA+PiArKysgYi9mcy9uZnMvbmZzNHN0YXRlLmMNCj4gPj4gQEAgLTU3
LDEwICs1NywxMiBAQA0KPiA+PiAjaW5jbHVkZSAiaW50ZXJuYWwuaCINCj4gPj4gI2luY2x1ZGUg
InBuZnMuaCINCj4gPj4gDQo+ID4+ICsjZGVmaW5lIE5GU0RCR19GQUNJTElUWQkJTkZTREJHX0NM
SUVOVA0KPiA+IA0KPiA+IE1vc3Qgb2YgdGhlIHN0dWZmIGluIG5mczRzdGF0ZS5jIGlzIGRlYWxp
bmcgd2l0aCBORlN2NC1zcGVjaWZpYyBzdGF0ZS4NCj4gPiBJdCBkb2VzIG5vdCBtYWtlIHNlbnNl
IHRvIGx1bXAgdGhhdCBpbiB3aXRoIHRoZSBuZnNfY2xpZW50IGRlYnVnZ2luZy4NCj4gPiANCj4g
DQo+IEl0IHdhcyB0aGUgY2xvc2VzdCBjb252ZW5pZW50LiAgU2hhbGwgSSBjcmVhdGUgYW4gTkZT
REJHX1NUQVRFPw0KDQpZZXMuIFRoYXQgd291bGQgYmUgcHJlZmVyYWJsZS4uLg0KDQo+ID4+ICsN
Cj4gPj4gI2RlZmluZSBPUEVOT1dORVJfUE9PTF9TSVpFCTgNCj4gPj4gDQo+ID4+IGNvbnN0IG5m
czRfc3RhdGVpZCB6ZXJvX3N0YXRlaWQ7DQo+ID4+IC0NCj4gPj4gK3N0YXRpYyBERUZJTkVfTVVU
RVgobmZzX2NsaWRfaW5pdF9tdXRleCk7DQo+ID4+IHN0YXRpYyBMSVNUX0hFQUQobmZzNF9jbGll
bnRpZF9saXN0KTsNCj4gPj4gDQo+ID4+IGludCBuZnM0X2luaXRfY2xpZW50aWQoc3RydWN0IG5m
c19jbGllbnQgKmNscCwgc3RydWN0IHJwY19jcmVkICpjcmVkKQ0KPiANCg0KLS0gDQpUcm9uZCBN
eWtsZWJ1c3QNCkxpbnV4IE5GUyBjbGllbnQgbWFpbnRhaW5lcg0KDQpOZXRBcHANClRyb25kLk15
a2xlYnVzdEBuZXRhcHAuY29tDQp3d3cubmV0YXBwLmNvbQ0KDQo=

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 13/20] NFS: Fix recovery from NFS4ERR_CLID_INUSE
  2012-04-23 20:55 ` [PATCH 13/20] NFS: Fix recovery from NFS4ERR_CLID_INUSE Chuck Lever
@ 2012-04-26 16:24   ` Chuck Lever
  2012-04-26 16:55     ` Myklebust, Trond
  0 siblings, 1 reply; 37+ messages in thread
From: Chuck Lever @ 2012-04-26 16:24 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: linux-nfs


On Apr 23, 2012, at 4:55 PM, Chuck Lever wrote:

> For NFSv4 minor version 0, currently the cl_id_uniquifier allows the
> Linux client to generate a unique nfs_client_id4 string whenever a
> server replies with NFS4ERR_CLID_INUSE.
> 
> NFS4ERR_CLID_INUSE actually means that the client has presented this
> nfs_client_id4 string with a different authentication flavor in the
> past.  Retrying with a different nfs_client_id4 string means the
> client orphans NFSv4 state on the server.  This state will take at
> least a whole lease period to be purged.
> 
> Change recovery to try the identification operation again with a
> different auth flavor until it works.  The retry loop is factored
> out of nfs4_proc_setclientid() and into the state manager, so that
> both mv0 and mv1 client ID establishment is covered by the same
> CLID_INUSE recovery logic.
> 
> XXX: On further review, I'm not sure how it would be possible to
> send an nfs_client_id4 with the wrong authentication flavor, since
> the au_name is part of the string itself...

I'm having other doubts about this whole approach.

In the loop in nfs4_reclaim_lease(), the client will need to replace the RPC transport for each retried flavor, and then continue using the transport that worked.  New mounts clone their transport from the nfs_client, even if its authentication flavor does not match what might have been specified on the mount.  (I haven't checked this, is it true?)

What's more, there's no way a server can identify a re-used nfs_client_id4, since we currently plant the authentication flavor in the nfs_client_id4 string…

In fact, because we generate nfs_client_id4 strings with the flavor built in, won't each flavor used on a mount generate a separate lease on the server?

Talk me down?

> 
> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
> ---
> 
> fs/nfs/nfs4proc.c         |   75 ++++++++++++++++++++++++++++++---------------
> fs/nfs/nfs4state.c        |   37 ++++++++++++++++++----
> include/linux/nfs_fs_sb.h |    3 +-
> 3 files changed, 81 insertions(+), 34 deletions(-)
> 
> diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
> index 8bdc6fd..7ec1b68 100644
> --- a/fs/nfs/nfs4proc.c
> +++ b/fs/nfs/nfs4proc.c
> @@ -3890,6 +3890,37 @@ static void nfs4_init_boot_verifier(const struct nfs_client *clp,
> 	memcpy(bootverf->data, verf, sizeof(bootverf->data));
> }
> 
> +static unsigned int
> +nfs4_init_nonuniform_client_string(const struct nfs_client *clp,
> +				   char *buf, size_t len)
> +{
> +	unsigned int result;
> +
> +	rcu_read_lock();
> +	result = scnprintf(buf, len, "%s/%s %s %s non-uniform",
> +				clp->cl_ipaddr,
> +				rpc_peeraddr2str(clp->cl_rpcclient,
> +							RPC_DISPLAY_ADDR),
> +				rpc_peeraddr2str(clp->cl_rpcclient,
> +							RPC_DISPLAY_PROTO),
> +				clp->cl_rpcclient->cl_auth->au_ops->au_name);
> +	rcu_read_unlock();
> +	return result;
> +}
> +
> +/**
> + * nfs4_proc_setclientid - Negotiate client ID
> + * @clp: state data structure
> + * @program: RPC program for NFSv4 callback service
> + * @port: IP port number for NFS4 callback service
> + * @cred: RPC credential to use for this call
> + * @res: where to place the result
> + *
> + * Returns zero or a negative NFS4ERR status code.
> + *
> + * A status of -NFS4ERR_CLID_INUSE means the caller should try
> + * again with a different authentication flavor.
> + */
> int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
> 		unsigned short port, struct rpc_cred *cred,
> 		struct nfs4_setclientid_res *res)
> @@ -3906,41 +3937,30 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
> 		.rpc_resp = res,
> 		.rpc_cred = cred,
> 	};
> -	int loop = 0;
> 	int status;
> 
> +	/* Client ID */
> 	nfs4_init_boot_verifier(clp, &sc_verifier);
> +	setclientid.sc_name_len = nfs4_init_nonuniform_client_string(clp,
> +						setclientid.sc_name,
> +						sizeof(setclientid.sc_name));
> 
> -	for(;;) {
> -		rcu_read_lock();
> -		setclientid.sc_name_len = scnprintf(setclientid.sc_name,
> -				sizeof(setclientid.sc_name), "%s/%s %s %s %u",
> -				clp->cl_ipaddr,
> -				rpc_peeraddr2str(clp->cl_rpcclient,
> -							RPC_DISPLAY_ADDR),
> -				rpc_peeraddr2str(clp->cl_rpcclient,
> -							RPC_DISPLAY_PROTO),
> -				clp->cl_rpcclient->cl_auth->au_ops->au_name,
> -				clp->cl_id_uniquifier);
> -		setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
> +	/* Callback info */
> +	rcu_read_lock();
> +	setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
> 				sizeof(setclientid.sc_netid),
> 				rpc_peeraddr2str(clp->cl_rpcclient,
> 							RPC_DISPLAY_NETID));
> -		setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
> +	rcu_read_unlock();
> +	setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
> 				sizeof(setclientid.sc_uaddr), "%s.%u.%u",
> 				clp->cl_ipaddr, port >> 8, port & 255);
> -		rcu_read_unlock();
> 
> -		status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
> -		if (status != -NFS4ERR_CLID_INUSE)
> -			break;
> -		if (loop != 0) {
> -			++clp->cl_id_uniquifier;
> -			break;
> -		}
> -		++loop;
> -		ssleep(clp->cl_lease_time / HZ + 1);
> -	}
> +	status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
> +
> +	dprintk("%s: nfs_client_id4 '%.*s' (status %d)\n",
> +		__func__, setclientid.sc_name_len, setclientid.sc_name,
> +		status);
> 	return status;
> }
> 
> @@ -5008,6 +5028,11 @@ nfs41_same_server_scope(struct nfs41_server_scope *a,
> /*
>  * nfs4_proc_exchange_id()
>  *
> + * Returns zero or a negative NFS4ERR status code.
> + *
> + * A status of -NFS4ERR_CLID_INUSE means the caller should try
> + * again with a different authentication flavor.
> + *
>  * Since the clientid has expired, all compounds using sessions
>  * associated with the stale clientid will be returning
>  * NFS4ERR_BADSESSION in the sequence operation, and will therefore
> diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
> index 7f56502..6a1a305 100644
> --- a/fs/nfs/nfs4state.c
> +++ b/fs/nfs/nfs4state.c
> @@ -1576,19 +1576,42 @@ static int nfs4_reclaim_lease(struct nfs_client *clp)
> 	struct rpc_cred *cred;
> 	const struct nfs4_state_recovery_ops *ops =
> 		clp->cl_mvops->reboot_recovery_ops;
> -	int status = -ENOENT;
> +	rpc_authflavor_t flavors[NFS_MAX_SECFLAVORS];
> +	int i, len, status;
> 
> +	i = 0;
> +	len = gss_mech_list_pseudoflavors(flavors);
> +
> +again:
> +	status = -ENOENT;
> 	cred = ops->get_clid_cred(clp);
> 	if (cred != NULL) {
> 		status = ops->establish_clid(clp, cred);
> 		put_rpccred(cred);
> -		/* Handle case where the user hasn't set up machine creds */
> -		if (status == -EACCES && cred == clp->cl_machine_cred) {
> -			nfs4_clear_machine_cred(clp);
> -			status = -EAGAIN;
> -		}
> -		if (status == -NFS4ERR_MINOR_VERS_MISMATCH)
> +		switch (status) {
> +		case 0:
> +			break;
> +		case -EACCES:	/* the user hasn't set up machine creds */
> +			if (cred == clp->cl_machine_cred) {
> +				nfs4_clear_machine_cred(clp);
> +				status = -EAGAIN;
> +			}
> +			break;
> +		case -NFS4ERR_CLID_INUSE:
> +		case -NFS4ERR_WRONGSEC:
> +			/*
> +			 * XXX: "flavors" is unordered; the client should
> +			 *	prefer krb5p for this transport
> +			 */
> +			if (i < len && rpcauth_create(flavors[i++],
> +						clp->cl_rpcclient) != NULL)
> +				goto again;
> +			status = -EPERM;
> +			break;
> +		case -NFS4ERR_MINOR_VERS_MISMATCH:
> 			status = -EPROTONOSUPPORT;
> +			break;
> +		}
> 	}
> 	return status;
> }
> diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
> index b246582..1c4c174 100644
> --- a/include/linux/nfs_fs_sb.h
> +++ b/include/linux/nfs_fs_sb.h
> @@ -65,10 +65,9 @@ struct nfs_client {
> 	struct idmap *		cl_idmap;
> 
> 	/* Our own IP address, as a null-terminated string.
> -	 * This is used to generate the clientid, and the callback address.
> +	 * This is used to generate the mv0 callback address.
> 	 */
> 	char			cl_ipaddr[48];
> -	unsigned char		cl_id_uniquifier;
> 	u32			cl_cb_ident;	/* v4.0 callback identifier */
> 	const struct nfs4_minor_version_ops *cl_mvops;
> 
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

-- 
Chuck Lever
chuck[dot]lever[at]oracle[dot]com





^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 13/20] NFS: Fix recovery from NFS4ERR_CLID_INUSE
  2012-04-26 16:24   ` Chuck Lever
@ 2012-04-26 16:55     ` Myklebust, Trond
  2012-04-26 18:43       ` Chuck Lever
  0 siblings, 1 reply; 37+ messages in thread
From: Myklebust, Trond @ 2012-04-26 16:55 UTC (permalink / raw)
  To: Chuck Lever; +Cc: linux-nfs

T24gVGh1LCAyMDEyLTA0LTI2IGF0IDEyOjI0IC0wNDAwLCBDaHVjayBMZXZlciB3cm90ZToNCj4g
T24gQXByIDIzLCAyMDEyLCBhdCA0OjU1IFBNLCBDaHVjayBMZXZlciB3cm90ZToNCj4gDQo+ID4g
Rm9yIE5GU3Y0IG1pbm9yIHZlcnNpb24gMCwgY3VycmVudGx5IHRoZSBjbF9pZF91bmlxdWlmaWVy
IGFsbG93cyB0aGUNCj4gPiBMaW51eCBjbGllbnQgdG8gZ2VuZXJhdGUgYSB1bmlxdWUgbmZzX2Ns
aWVudF9pZDQgc3RyaW5nIHdoZW5ldmVyIGENCj4gPiBzZXJ2ZXIgcmVwbGllcyB3aXRoIE5GUzRF
UlJfQ0xJRF9JTlVTRS4NCj4gPiANCj4gPiBORlM0RVJSX0NMSURfSU5VU0UgYWN0dWFsbHkgbWVh
bnMgdGhhdCB0aGUgY2xpZW50IGhhcyBwcmVzZW50ZWQgdGhpcw0KPiA+IG5mc19jbGllbnRfaWQ0
IHN0cmluZyB3aXRoIGEgZGlmZmVyZW50IGF1dGhlbnRpY2F0aW9uIGZsYXZvciBpbiB0aGUNCj4g
PiBwYXN0LiAgUmV0cnlpbmcgd2l0aCBhIGRpZmZlcmVudCBuZnNfY2xpZW50X2lkNCBzdHJpbmcg
bWVhbnMgdGhlDQo+ID4gY2xpZW50IG9ycGhhbnMgTkZTdjQgc3RhdGUgb24gdGhlIHNlcnZlci4g
IFRoaXMgc3RhdGUgd2lsbCB0YWtlIGF0DQo+ID4gbGVhc3QgYSB3aG9sZSBsZWFzZSBwZXJpb2Qg
dG8gYmUgcHVyZ2VkLg0KPiA+IA0KPiA+IENoYW5nZSByZWNvdmVyeSB0byB0cnkgdGhlIGlkZW50
aWZpY2F0aW9uIG9wZXJhdGlvbiBhZ2FpbiB3aXRoIGENCj4gPiBkaWZmZXJlbnQgYXV0aCBmbGF2
b3IgdW50aWwgaXQgd29ya3MuICBUaGUgcmV0cnkgbG9vcCBpcyBmYWN0b3JlZA0KPiA+IG91dCBv
ZiBuZnM0X3Byb2Nfc2V0Y2xpZW50aWQoKSBhbmQgaW50byB0aGUgc3RhdGUgbWFuYWdlciwgc28g
dGhhdA0KPiA+IGJvdGggbXYwIGFuZCBtdjEgY2xpZW50IElEIGVzdGFibGlzaG1lbnQgaXMgY292
ZXJlZCBieSB0aGUgc2FtZQ0KPiA+IENMSURfSU5VU0UgcmVjb3ZlcnkgbG9naWMuDQo+ID4gDQo+
ID4gWFhYOiBPbiBmdXJ0aGVyIHJldmlldywgSSdtIG5vdCBzdXJlIGhvdyBpdCB3b3VsZCBiZSBw
b3NzaWJsZSB0bw0KPiA+IHNlbmQgYW4gbmZzX2NsaWVudF9pZDQgd2l0aCB0aGUgd3JvbmcgYXV0
aGVudGljYXRpb24gZmxhdm9yLCBzaW5jZQ0KPiA+IHRoZSBhdV9uYW1lIGlzIHBhcnQgb2YgdGhl
IHN0cmluZyBpdHNlbGYuLi4NCj4gDQo+IEknbSBoYXZpbmcgb3RoZXIgZG91YnRzIGFib3V0IHRo
aXMgd2hvbGUgYXBwcm9hY2guDQo+IA0KPiBJbiB0aGUgbG9vcCBpbiBuZnM0X3JlY2xhaW1fbGVh
c2UoKSwgdGhlIGNsaWVudCB3aWxsIG5lZWQgdG8gcmVwbGFjZSB0aGUgUlBDIHRyYW5zcG9ydCBm
b3IgZWFjaCByZXRyaWVkIGZsYXZvciwgYW5kIHRoZW4gY29udGludWUgdXNpbmcgdGhlIHRyYW5z
cG9ydCB0aGF0IHdvcmtlZC4gIE5ldyBtb3VudHMgY2xvbmUgdGhlaXIgdHJhbnNwb3J0IGZyb20g
dGhlIG5mc19jbGllbnQsIGV2ZW4gaWYgaXRzIGF1dGhlbnRpY2F0aW9uIGZsYXZvciBkb2VzIG5v
dCBtYXRjaCB3aGF0IG1pZ2h0IGhhdmUgYmVlbiBzcGVjaWZpZWQgb24gdGhlIG1vdW50LiAgKEkg
aGF2ZW4ndCBjaGVja2VkIHRoaXMsIGlzIGl0IHRydWU/KQ0KPiANCj4gV2hhdCdzIG1vcmUsIHRo
ZXJlJ3Mgbm8gd2F5IGEgc2VydmVyIGNhbiBpZGVudGlmeSBhIHJlLXVzZWQgbmZzX2NsaWVudF9p
ZDQsIHNpbmNlIHdlIGN1cnJlbnRseSBwbGFudCB0aGUgYXV0aGVudGljYXRpb24gZmxhdm9yIGlu
IHRoZSBuZnNfY2xpZW50X2lkNCBzdHJpbmfigKYNCj4gDQo+IEluIGZhY3QsIGJlY2F1c2Ugd2Ug
Z2VuZXJhdGUgbmZzX2NsaWVudF9pZDQgc3RyaW5ncyB3aXRoIHRoZSBmbGF2b3IgYnVpbHQgaW4s
IHdvbid0IGVhY2ggZmxhdm9yIHVzZWQgb24gYSBtb3VudCBnZW5lcmF0ZSBhIHNlcGFyYXRlIGxl
YXNlIG9uIHRoZSBzZXJ2ZXI/DQoNClRoZW4gbGV0cyBtb3ZlIHRoZSBmbGF2b3VyIG91dCBvZiB0
aGUgY2xpZW50aWQgc3RyaW5nLCBhbmQganVzdCBzZXR0bGUNCmZvciBoYW5kbGluZyBDTElEX0lO
VVNFIGJ5IGNoYW5naW5nIHRoZSBmbGF2b3VyIG9uIHRoZSBTRVRDTElFTlRJRCBjYWxsLg0KDQpD
aGVlcnMNCiAgVHJvbmQNCg0KLS0gDQpUcm9uZCBNeWtsZWJ1c3QNCkxpbnV4IE5GUyBjbGllbnQg
bWFpbnRhaW5lcg0KDQpOZXRBcHANClRyb25kLk15a2xlYnVzdEBuZXRhcHAuY29tDQp3d3cubmV0
YXBwLmNvbQ0KDQo=

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 13/20] NFS: Fix recovery from NFS4ERR_CLID_INUSE
  2012-04-26 16:55     ` Myklebust, Trond
@ 2012-04-26 18:43       ` Chuck Lever
  2012-04-26 18:53         ` Myklebust, Trond
  0 siblings, 1 reply; 37+ messages in thread
From: Chuck Lever @ 2012-04-26 18:43 UTC (permalink / raw)
  To: Myklebust, Trond; +Cc: linux-nfs


On Apr 26, 2012, at 12:55 PM, Myklebust, Trond wrote:

> On Thu, 2012-04-26 at 12:24 -0400, Chuck Lever wrote:
>> On Apr 23, 2012, at 4:55 PM, Chuck Lever wrote:
>> 
>>> For NFSv4 minor version 0, currently the cl_id_uniquifier allows the
>>> Linux client to generate a unique nfs_client_id4 string whenever a
>>> server replies with NFS4ERR_CLID_INUSE.
>>> 
>>> NFS4ERR_CLID_INUSE actually means that the client has presented this
>>> nfs_client_id4 string with a different authentication flavor in the
>>> past.  Retrying with a different nfs_client_id4 string means the
>>> client orphans NFSv4 state on the server.  This state will take at
>>> least a whole lease period to be purged.
>>> 
>>> Change recovery to try the identification operation again with a
>>> different auth flavor until it works.  The retry loop is factored
>>> out of nfs4_proc_setclientid() and into the state manager, so that
>>> both mv0 and mv1 client ID establishment is covered by the same
>>> CLID_INUSE recovery logic.
>>> 
>>> XXX: On further review, I'm not sure how it would be possible to
>>> send an nfs_client_id4 with the wrong authentication flavor, since
>>> the au_name is part of the string itself...
>> 
>> I'm having other doubts about this whole approach.
>> 
>> In the loop in nfs4_reclaim_lease(), the client will need to replace the RPC transport for each retried flavor, and then continue using the transport that worked.  New mounts clone their transport from the nfs_client, even if its authentication flavor does not match what might have been specified on the mount.  (I haven't checked this, is it true?)

It looks like nfs_init_server_rpcclient() changes the flavor of the RPC transport that was cloned from cl_rpcclient, so that shouldn't be a problem.

>> What's more, there's no way a server can identify a re-used nfs_client_id4, since we currently plant the authentication flavor in the nfs_client_id4 string…
>> 
>> In fact, because we generate nfs_client_id4 strings with the flavor built in, won't each flavor used on a mount generate a separate lease on the server?
> 
> Then lets move the flavour out of the clientid string,

Removing the flavor from the nfs_client_id4 string makes sense.

> and just settle
> for handling CLID_INUSE by changing the flavour on the SETCLIENTID call.

This is where I get hazy.  

If I simply change the authentication flavor on the existing clp->cl_rpcclient, will this affect ongoing RENEW operations that also use this transport?  Do we want subsequent RENEW operations to use the new flavor?

Thinking hypothetically, it seems to me that CLID_INUSE is really an indication of a permanent configuration error, or a software bug, and we should not bother to recover.  But maybe that's my limited imagination.  Under what use cases do you think CLID_INUSE might occur and it might be useful to attempt recovery?

-- 
Chuck Lever
chuck[dot]lever[at]oracle[dot]com





^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 13/20] NFS: Fix recovery from NFS4ERR_CLID_INUSE
  2012-04-26 18:43       ` Chuck Lever
@ 2012-04-26 18:53         ` Myklebust, Trond
  2012-04-26 18:57           ` Myklebust, Trond
  2012-04-26 19:04           ` Chuck Lever
  0 siblings, 2 replies; 37+ messages in thread
From: Myklebust, Trond @ 2012-04-26 18:53 UTC (permalink / raw)
  To: Chuck Lever; +Cc: linux-nfs

T24gVGh1LCAyMDEyLTA0LTI2IGF0IDE0OjQzIC0wNDAwLCBDaHVjayBMZXZlciB3cm90ZToNCj4g
T24gQXByIDI2LCAyMDEyLCBhdCAxMjo1NSBQTSwgTXlrbGVidXN0LCBUcm9uZCB3cm90ZToNCj4g
DQo+ID4gT24gVGh1LCAyMDEyLTA0LTI2IGF0IDEyOjI0IC0wNDAwLCBDaHVjayBMZXZlciB3cm90
ZToNCj4gPj4gT24gQXByIDIzLCAyMDEyLCBhdCA0OjU1IFBNLCBDaHVjayBMZXZlciB3cm90ZToN
Cj4gPiBUaGVuIGxldHMgbW92ZSB0aGUgZmxhdm91ciBvdXQgb2YgdGhlIGNsaWVudGlkIHN0cmlu
ZywNCj4gDQo+IFJlbW92aW5nIHRoZSBmbGF2b3IgZnJvbSB0aGUgbmZzX2NsaWVudF9pZDQgc3Ry
aW5nIG1ha2VzIHNlbnNlLg0KPiANCj4gPiBhbmQganVzdCBzZXR0bGUNCj4gPiBmb3IgaGFuZGxp
bmcgQ0xJRF9JTlVTRSBieSBjaGFuZ2luZyB0aGUgZmxhdm91ciBvbiB0aGUgU0VUQ0xJRU5USUQg
Y2FsbC4NCj4gDQo+IFRoaXMgaXMgd2hlcmUgSSBnZXQgaGF6eS4gIA0KPiANCj4gSWYgSSBzaW1w
bHkgY2hhbmdlIHRoZSBhdXRoZW50aWNhdGlvbiBmbGF2b3Igb24gdGhlIGV4aXN0aW5nIGNscC0+
Y2xfcnBjY2xpZW50LCB3aWxsIHRoaXMgYWZmZWN0IG9uZ29pbmcgUkVORVcgb3BlcmF0aW9ucyB0
aGF0IGFsc28gdXNlIHRoaXMgdHJhbnNwb3J0PyAgRG8gd2Ugd2FudCBzdWJzZXF1ZW50IFJFTkVX
IG9wZXJhdGlvbnMgdG8gdXNlIHRoZSBuZXcgZmxhdm9yPw0KPiANCj4gVGhpbmtpbmcgaHlwb3Ro
ZXRpY2FsbHksIGl0IHNlZW1zIHRvIG1lIHRoYXQgQ0xJRF9JTlVTRSBpcyByZWFsbHkgYW4gaW5k
aWNhdGlvbiBvZiBhIHBlcm1hbmVudCBjb25maWd1cmF0aW9uIGVycm9yLCBvciBhIHNvZnR3YXJl
IGJ1ZywgYW5kIHdlIHNob3VsZCBub3QgYm90aGVyIHRvIHJlY292ZXIuICBCdXQgbWF5YmUgdGhh
dCdzIG15IGxpbWl0ZWQgaW1hZ2luYXRpb24uICBVbmRlciB3aGF0IHVzZSBjYXNlcyBkbyB5b3Ug
dGhpbmsgQ0xJRF9JTlVTRSBtaWdodCBvY2N1ciBhbmQgaXQgbWlnaHQgYmUgdXNlZnVsIHRvIGF0
dGVtcHQgcmVjb3Zlcnk/DQo+IA0KDQpUaGUgc2VydmVyIGNhY2hlcyB0aGUgcHJpbmNpcGFsIG5h
bWUgdGhhdCB3YXMgdXNlZCB0byBjYWxsIFNFVENMSUVOVElEDQp3aGVuIHRoZSBsZWFzZSB3YXMg
ZXN0YWJsaXNoZWQuIEFueSBhdHRlbXB0IHRvIGNhbGwgU0VUQ0xJRU5USUQgd2l0aCBhDQpkaWZm
ZXJlbnQgcHJpbmNpcGFsIHdpbGwgcmVzdWx0IGluIENMSURfSU5VU0UgdW5sZXNzIHRoZSBsZWFz
ZSBoYXMNCmV4cGlyZWQuDQoNClNvIHdoYXQgSSB3YXMgcHJvcG9zaW5nIHdhc24ndCB0aGF0IHlv
dSB0cnkgdG8gY2hhbmdlIHRoZSBhdXRoZW50aWNhdGlvbg0KZmxhdm91ciBvbiBhbiBleGlzdGlu
ZyBuZnNfY2xpZW50LiBJdCB3YXMgdGhhdCB3aGVuIHlvdSBhcmUgcHJvYmluZywgeW91DQpjYW4g
dXNlIHRoZSBDTElEX0lOVVNFIHJlcGx5IGZyb20gU0VUQ0xJRU5USUQgYXMgYSBkaXJlY3QgaW5k
aWNhdGlvbg0KdGhhdCB0aGUgc2VydmVyIGlzIGluZGVlZCB0cnVua2VkLCBhbmQgdGhhdCB5b3Ug
YWxyZWFkeSBob2xkIGEgbGVhc2Ugb24NCnRoYXQgc2VydmVyLCBidXQgdGhhdCB0aGUgYXV0aGVu
dGljYXRpb24gZmxhdm91ciB0aGF0IHlvdSBhcmUgdHJ5aW5nIHRvDQp1c2UgaXMgd3JvbmcuDQoN
Ci0tIA0KVHJvbmQgTXlrbGVidXN0DQpMaW51eCBORlMgY2xpZW50IG1haW50YWluZXINCg0KTmV0
QXBwDQpUcm9uZC5NeWtsZWJ1c3RAbmV0YXBwLmNvbQ0Kd3d3Lm5ldGFwcC5jb20NCg0K

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 13/20] NFS: Fix recovery from NFS4ERR_CLID_INUSE
  2012-04-26 18:53         ` Myklebust, Trond
@ 2012-04-26 18:57           ` Myklebust, Trond
  2012-04-26 19:04           ` Chuck Lever
  1 sibling, 0 replies; 37+ messages in thread
From: Myklebust, Trond @ 2012-04-26 18:57 UTC (permalink / raw)
  To: Chuck Lever; +Cc: linux-nfs

T24gVGh1LCAyMDEyLTA0LTI2IGF0IDE0OjUzIC0wNDAwLCBUcm9uZCBNeWtsZWJ1c3Qgd3JvdGU6
DQo+IE9uIFRodSwgMjAxMi0wNC0yNiBhdCAxNDo0MyAtMDQwMCwgQ2h1Y2sgTGV2ZXIgd3JvdGU6
DQo+ID4gT24gQXByIDI2LCAyMDEyLCBhdCAxMjo1NSBQTSwgTXlrbGVidXN0LCBUcm9uZCB3cm90
ZToNCj4gPiANCj4gPiA+IE9uIFRodSwgMjAxMi0wNC0yNiBhdCAxMjoyNCAtMDQwMCwgQ2h1Y2sg
TGV2ZXIgd3JvdGU6DQo+ID4gPj4gT24gQXByIDIzLCAyMDEyLCBhdCA0OjU1IFBNLCBDaHVjayBM
ZXZlciB3cm90ZToNCj4gPiA+IFRoZW4gbGV0cyBtb3ZlIHRoZSBmbGF2b3VyIG91dCBvZiB0aGUg
Y2xpZW50aWQgc3RyaW5nLA0KPiA+IA0KPiA+IFJlbW92aW5nIHRoZSBmbGF2b3IgZnJvbSB0aGUg
bmZzX2NsaWVudF9pZDQgc3RyaW5nIG1ha2VzIHNlbnNlLg0KPiA+IA0KPiA+ID4gYW5kIGp1c3Qg
c2V0dGxlDQo+ID4gPiBmb3IgaGFuZGxpbmcgQ0xJRF9JTlVTRSBieSBjaGFuZ2luZyB0aGUgZmxh
dm91ciBvbiB0aGUgU0VUQ0xJRU5USUQgY2FsbC4NCj4gPiANCj4gPiBUaGlzIGlzIHdoZXJlIEkg
Z2V0IGhhenkuICANCj4gPiANCj4gPiBJZiBJIHNpbXBseSBjaGFuZ2UgdGhlIGF1dGhlbnRpY2F0
aW9uIGZsYXZvciBvbiB0aGUgZXhpc3RpbmcgY2xwLT5jbF9ycGNjbGllbnQsIHdpbGwgdGhpcyBh
ZmZlY3Qgb25nb2luZyBSRU5FVyBvcGVyYXRpb25zIHRoYXQgYWxzbyB1c2UgdGhpcyB0cmFuc3Bv
cnQ/ICBEbyB3ZSB3YW50IHN1YnNlcXVlbnQgUkVORVcgb3BlcmF0aW9ucyB0byB1c2UgdGhlIG5l
dyBmbGF2b3I/DQo+ID4gDQo+ID4gVGhpbmtpbmcgaHlwb3RoZXRpY2FsbHksIGl0IHNlZW1zIHRv
IG1lIHRoYXQgQ0xJRF9JTlVTRSBpcyByZWFsbHkgYW4gaW5kaWNhdGlvbiBvZiBhIHBlcm1hbmVu
dCBjb25maWd1cmF0aW9uIGVycm9yLCBvciBhIHNvZnR3YXJlIGJ1ZywgYW5kIHdlIHNob3VsZCBu
b3QgYm90aGVyIHRvIHJlY292ZXIuICBCdXQgbWF5YmUgdGhhdCdzIG15IGxpbWl0ZWQgaW1hZ2lu
YXRpb24uICBVbmRlciB3aGF0IHVzZSBjYXNlcyBkbyB5b3UgdGhpbmsgQ0xJRF9JTlVTRSBtaWdo
dCBvY2N1ciBhbmQgaXQgbWlnaHQgYmUgdXNlZnVsIHRvIGF0dGVtcHQgcmVjb3Zlcnk/DQo+ID4g
DQo+IA0KPiBUaGUgc2VydmVyIGNhY2hlcyB0aGUgcHJpbmNpcGFsIG5hbWUgdGhhdCB3YXMgdXNl
ZCB0byBjYWxsIFNFVENMSUVOVElEDQo+IHdoZW4gdGhlIGxlYXNlIHdhcyBlc3RhYmxpc2hlZC4g
QW55IGF0dGVtcHQgdG8gY2FsbCBTRVRDTElFTlRJRCB3aXRoIGENCj4gZGlmZmVyZW50IHByaW5j
aXBhbCB3aWxsIHJlc3VsdCBpbiBDTElEX0lOVVNFIHVubGVzcyB0aGUgbGVhc2UgaGFzDQo+IGV4
cGlyZWQuDQo+IA0KPiBTbyB3aGF0IEkgd2FzIHByb3Bvc2luZyB3YXNuJ3QgdGhhdCB5b3UgdHJ5
IHRvIGNoYW5nZSB0aGUgYXV0aGVudGljYXRpb24NCj4gZmxhdm91ciBvbiBhbiBleGlzdGluZyBu
ZnNfY2xpZW50LiBJdCB3YXMgdGhhdCB3aGVuIHlvdSBhcmUgcHJvYmluZywgeW91DQo+IGNhbiB1
c2UgdGhlIENMSURfSU5VU0UgcmVwbHkgZnJvbSBTRVRDTElFTlRJRCBhcyBhIGRpcmVjdCBpbmRp
Y2F0aW9uDQo+IHRoYXQgdGhlIHNlcnZlciBpcyBpbmRlZWQgdHJ1bmtlZCwgYW5kIHRoYXQgeW91
IGFscmVhZHkgaG9sZCBhIGxlYXNlIG9uDQo+IHRoYXQgc2VydmVyLCBidXQgdGhhdCB0aGUgYXV0
aGVudGljYXRpb24gZmxhdm91ciB0aGF0IHlvdSBhcmUgdHJ5aW5nIHRvDQo+IHVzZSBpcyB3cm9u
Zy4NCg0KQWN0dWFsbHksIGxldCBtZSBxdWFsaWZ5IHRoYXQgYSBiaXQuIENMSURfSU5VU0UgY2Fu
IGFsc28gbWVhbiBvbmUgb3RoZXINCnRoaW5nOiB0aGF0IHlvdSBoYXZlIHByZXZpb3VzbHkgZXN0
YWJsaXNoZWQgYSBsZWFzZSBvbiB0aGF0IHNlcnZlciwgd2l0aA0KYSBkaWZmZXJlbnQgYXV0aGVu
dGljYXRpb24gZmxhdm91ciwgYW5kIHRoYXQgbGVhc2UgaGFzIG5vdCB5ZXQgZXhwaXJlZA0KKGV2
ZW4gdGhvdWdoIHlvdXIgY2xpZW50IG1heSBoYXZlIGZvcmdvdHRlbiBpdCBkdWUgdG8gYSB1bW91
bnQgb2YgYWxsDQpmaWxlc3lzdGVtcyBmcm9tIHRoYXQgc2VydmVyKS4NCg0KLS0gDQpUcm9uZCBN
eWtsZWJ1c3QNCkxpbnV4IE5GUyBjbGllbnQgbWFpbnRhaW5lcg0KDQpOZXRBcHANClRyb25kLk15
a2xlYnVzdEBuZXRhcHAuY29tDQp3d3cubmV0YXBwLmNvbQ0KDQo=

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 13/20] NFS: Fix recovery from NFS4ERR_CLID_INUSE
  2012-04-26 18:53         ` Myklebust, Trond
  2012-04-26 18:57           ` Myklebust, Trond
@ 2012-04-26 19:04           ` Chuck Lever
  2012-04-26 19:14             ` Myklebust, Trond
  1 sibling, 1 reply; 37+ messages in thread
From: Chuck Lever @ 2012-04-26 19:04 UTC (permalink / raw)
  To: Myklebust, Trond; +Cc: linux-nfs


On Apr 26, 2012, at 2:53 PM, Myklebust, Trond wrote:

> On Thu, 2012-04-26 at 14:43 -0400, Chuck Lever wrote:
>> On Apr 26, 2012, at 12:55 PM, Myklebust, Trond wrote:
>> 
>>> On Thu, 2012-04-26 at 12:24 -0400, Chuck Lever wrote:
>>>> On Apr 23, 2012, at 4:55 PM, Chuck Lever wrote:
>>> Then lets move the flavour out of the clientid string,
>> 
>> Removing the flavor from the nfs_client_id4 string makes sense.
>> 
>>> and just settle
>>> for handling CLID_INUSE by changing the flavour on the SETCLIENTID call.
>> 
>> This is where I get hazy.  
>> 
>> If I simply change the authentication flavor on the existing clp->cl_rpcclient, will this affect ongoing RENEW operations that also use this transport?  Do we want subsequent RENEW operations to use the new flavor?
>> 
>> Thinking hypothetically, it seems to me that CLID_INUSE is really an indication of a permanent configuration error, or a software bug, and we should not bother to recover.  But maybe that's my limited imagination.  Under what use cases do you think CLID_INUSE might occur and it might be useful to attempt recovery?
>> 
> 
> The server caches the principal name that was used to call SETCLIENTID
> when the lease was established. Any attempt to call SETCLIENTID with a
> different principal will result in CLID_INUSE unless the lease has
> expired.
> 
> So what I was proposing wasn't that you try to change the authentication
> flavour on an existing nfs_client. It was that when you are probing, you
> can use the CLID_INUSE reply from SETCLIENTID as a direct indication
> that the server is indeed trunked, and that you already hold a lease on
> that server, but that the authentication flavour that you are trying to
> use is wrong.

The use case would be that my client has mounted a server via address X using authentication flavor 1, and then tries to mount the same server via address Y using authentication flavor 2.

Do we even need to retry the SETCLIENTID and to perform a SETCLIENTID_CONFIRM in that case?

Now, what about nfs4_reclaim_lease() ?  If the client sees CLID_INUSE during a lease reclaim, no trunking discovery is involved.

-- 
Chuck Lever
chuck[dot]lever[at]oracle[dot]com





^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 13/20] NFS: Fix recovery from NFS4ERR_CLID_INUSE
  2012-04-26 19:04           ` Chuck Lever
@ 2012-04-26 19:14             ` Myklebust, Trond
  2012-04-26 19:46               ` Chuck Lever
  0 siblings, 1 reply; 37+ messages in thread
From: Myklebust, Trond @ 2012-04-26 19:14 UTC (permalink / raw)
  To: Chuck Lever; +Cc: linux-nfs

T24gVGh1LCAyMDEyLTA0LTI2IGF0IDE1OjA0IC0wNDAwLCBDaHVjayBMZXZlciB3cm90ZToNCj4g
T24gQXByIDI2LCAyMDEyLCBhdCAyOjUzIFBNLCBNeWtsZWJ1c3QsIFRyb25kIHdyb3RlOg0KPiAN
Cj4gPiBPbiBUaHUsIDIwMTItMDQtMjYgYXQgMTQ6NDMgLTA0MDAsIENodWNrIExldmVyIHdyb3Rl
Og0KPiA+PiBPbiBBcHIgMjYsIDIwMTIsIGF0IDEyOjU1IFBNLCBNeWtsZWJ1c3QsIFRyb25kIHdy
b3RlOg0KPiA+PiANCj4gPj4+IE9uIFRodSwgMjAxMi0wNC0yNiBhdCAxMjoyNCAtMDQwMCwgQ2h1
Y2sgTGV2ZXIgd3JvdGU6DQo+ID4+Pj4gT24gQXByIDIzLCAyMDEyLCBhdCA0OjU1IFBNLCBDaHVj
ayBMZXZlciB3cm90ZToNCj4gPj4+IFRoZW4gbGV0cyBtb3ZlIHRoZSBmbGF2b3VyIG91dCBvZiB0
aGUgY2xpZW50aWQgc3RyaW5nLA0KPiA+PiANCj4gPj4gUmVtb3ZpbmcgdGhlIGZsYXZvciBmcm9t
IHRoZSBuZnNfY2xpZW50X2lkNCBzdHJpbmcgbWFrZXMgc2Vuc2UuDQo+ID4+IA0KPiA+Pj4gYW5k
IGp1c3Qgc2V0dGxlDQo+ID4+PiBmb3IgaGFuZGxpbmcgQ0xJRF9JTlVTRSBieSBjaGFuZ2luZyB0
aGUgZmxhdm91ciBvbiB0aGUgU0VUQ0xJRU5USUQgY2FsbC4NCj4gPj4gDQo+ID4+IFRoaXMgaXMg
d2hlcmUgSSBnZXQgaGF6eS4gIA0KPiA+PiANCj4gPj4gSWYgSSBzaW1wbHkgY2hhbmdlIHRoZSBh
dXRoZW50aWNhdGlvbiBmbGF2b3Igb24gdGhlIGV4aXN0aW5nIGNscC0+Y2xfcnBjY2xpZW50LCB3
aWxsIHRoaXMgYWZmZWN0IG9uZ29pbmcgUkVORVcgb3BlcmF0aW9ucyB0aGF0IGFsc28gdXNlIHRo
aXMgdHJhbnNwb3J0PyAgRG8gd2Ugd2FudCBzdWJzZXF1ZW50IFJFTkVXIG9wZXJhdGlvbnMgdG8g
dXNlIHRoZSBuZXcgZmxhdm9yPw0KPiA+PiANCj4gPj4gVGhpbmtpbmcgaHlwb3RoZXRpY2FsbHks
IGl0IHNlZW1zIHRvIG1lIHRoYXQgQ0xJRF9JTlVTRSBpcyByZWFsbHkgYW4gaW5kaWNhdGlvbiBv
ZiBhIHBlcm1hbmVudCBjb25maWd1cmF0aW9uIGVycm9yLCBvciBhIHNvZnR3YXJlIGJ1ZywgYW5k
IHdlIHNob3VsZCBub3QgYm90aGVyIHRvIHJlY292ZXIuICBCdXQgbWF5YmUgdGhhdCdzIG15IGxp
bWl0ZWQgaW1hZ2luYXRpb24uICBVbmRlciB3aGF0IHVzZSBjYXNlcyBkbyB5b3UgdGhpbmsgQ0xJ
RF9JTlVTRSBtaWdodCBvY2N1ciBhbmQgaXQgbWlnaHQgYmUgdXNlZnVsIHRvIGF0dGVtcHQgcmVj
b3Zlcnk/DQo+ID4+IA0KPiA+IA0KPiA+IFRoZSBzZXJ2ZXIgY2FjaGVzIHRoZSBwcmluY2lwYWwg
bmFtZSB0aGF0IHdhcyB1c2VkIHRvIGNhbGwgU0VUQ0xJRU5USUQNCj4gPiB3aGVuIHRoZSBsZWFz
ZSB3YXMgZXN0YWJsaXNoZWQuIEFueSBhdHRlbXB0IHRvIGNhbGwgU0VUQ0xJRU5USUQgd2l0aCBh
DQo+ID4gZGlmZmVyZW50IHByaW5jaXBhbCB3aWxsIHJlc3VsdCBpbiBDTElEX0lOVVNFIHVubGVz
cyB0aGUgbGVhc2UgaGFzDQo+ID4gZXhwaXJlZC4NCj4gPiANCj4gPiBTbyB3aGF0IEkgd2FzIHBy
b3Bvc2luZyB3YXNuJ3QgdGhhdCB5b3UgdHJ5IHRvIGNoYW5nZSB0aGUgYXV0aGVudGljYXRpb24N
Cj4gPiBmbGF2b3VyIG9uIGFuIGV4aXN0aW5nIG5mc19jbGllbnQuIEl0IHdhcyB0aGF0IHdoZW4g
eW91IGFyZSBwcm9iaW5nLCB5b3UNCj4gPiBjYW4gdXNlIHRoZSBDTElEX0lOVVNFIHJlcGx5IGZy
b20gU0VUQ0xJRU5USUQgYXMgYSBkaXJlY3QgaW5kaWNhdGlvbg0KPiA+IHRoYXQgdGhlIHNlcnZl
ciBpcyBpbmRlZWQgdHJ1bmtlZCwgYW5kIHRoYXQgeW91IGFscmVhZHkgaG9sZCBhIGxlYXNlIG9u
DQo+ID4gdGhhdCBzZXJ2ZXIsIGJ1dCB0aGF0IHRoZSBhdXRoZW50aWNhdGlvbiBmbGF2b3VyIHRo
YXQgeW91IGFyZSB0cnlpbmcgdG8NCj4gPiB1c2UgaXMgd3JvbmcuDQo+IA0KPiBUaGUgdXNlIGNh
c2Ugd291bGQgYmUgdGhhdCBteSBjbGllbnQgaGFzIG1vdW50ZWQgYSBzZXJ2ZXIgdmlhIGFkZHJl
c3MgWCB1c2luZyBhdXRoZW50aWNhdGlvbiBmbGF2b3IgMSwgYW5kIHRoZW4gdHJpZXMgdG8gbW91
bnQgdGhlIHNhbWUgc2VydmVyIHZpYSBhZGRyZXNzIFkgdXNpbmcgYXV0aGVudGljYXRpb24gZmxh
dm9yIDIuDQoNCi4uLmZvciB3aGljaCB0aGUgcmVzdWx0IHNob3VsZCBiZSB0aGF0IGFsbCBzZXRj
bGllbnRpZC9jb25maXJtIGFuZCByZW5ldw0KcmVxdWVzdHMgd2lsbCB1c2UgZmxhdm91ciAxLg0K
DQo+IERvIHdlIGV2ZW4gbmVlZCB0byByZXRyeSB0aGUgU0VUQ0xJRU5USUQgYW5kIHRvIHBlcmZv
cm0gYSBTRVRDTElFTlRJRF9DT05GSVJNIGluIHRoYXQgY2FzZT8NCg0KWWVzLiBPdGhlcndpc2Ug
d2UgZW5kIHVwIHdpdGggMiBsZWFzZXMgb24gdGhlIHNhbWUgc2VydmVyLiBXZSBkb24ndCB3YW50
DQp0byBkbyB0aGF0Li4uDQoNCj4gTm93LCB3aGF0IGFib3V0IG5mczRfcmVjbGFpbV9sZWFzZSgp
ID8gIElmIHRoZSBjbGllbnQgc2VlcyBDTElEX0lOVVNFIGR1cmluZyBhIGxlYXNlIHJlY2xhaW0s
IG5vIHRydW5raW5nIGRpc2NvdmVyeSBpcyBpbnZvbHZlZC4NCg0KVGhhdCB3b3VsZCBtZWFuIHRo
YXQgdGhlIGxlYXNlIHdhcyBleHBpcmVkLCBhbmQgdGhhdCBzb21lb25lIHNlbnQgYQ0KU0VUQ0xJ
RU5USUQgY2FsbCB0byB0aGUgc2VydmVyIHVzaW5nIG91ciBjbGllbnRpZCBzdHJpbmcsIGJ1dCB1
c2luZyB0aGUNCndyb25nIHByaW5jaXBhbC4gVGhlcmUgYXJlIDIgY2FzZXM6DQoNCjEpIFNvbWVv
bmUgaXMgc3Bvb2Zpbmcgb3VyIGNsaWVudC4gSSd2ZSBubyBpZGVhIGhvdyB0byByZWNvdmVyIGZy
b20NCnRoaXMsIHNob3J0IG9mIGNoYW5naW5nIHRoZSBjbGllbnRpZCBzdHJpbmcuDQoNCjIpIFRo
ZSBzZXJ2ZXIgaXMgdHJ1bmtlZCwgdGhlIGxlYXNlIGV4cGlyZWQsIGFuZCB3ZSBoYXBwZW5lZCB0
byBjYWxsDQonbW91bnQnIHdoaWxlIGl0IHdhcyBleHBpcmVkLCBhbmQgaW5hZHZlcnRlbnRseSBz
ZW50IGEgU0VUQ0xJRU5USUQNCitTRVRDTElFTlRJRF9DT05GSVJNIGNhbGwgdG8gdGhlIHNlcnZl
ciB1c2luZyBhIGRpZmZlcmVudCBJUCBhZGRyZXNzLA0KYW5kIHVzaW5nIHRoZSB3cm9uZyBwcmlu
Y2lwYWwuDQoNCg0KLS0gDQpUcm9uZCBNeWtsZWJ1c3QNCkxpbnV4IE5GUyBjbGllbnQgbWFpbnRh
aW5lcg0KDQpOZXRBcHANClRyb25kLk15a2xlYnVzdEBuZXRhcHAuY29tDQp3d3cubmV0YXBwLmNv
bQ0KDQo=

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 13/20] NFS: Fix recovery from NFS4ERR_CLID_INUSE
  2012-04-26 19:14             ` Myklebust, Trond
@ 2012-04-26 19:46               ` Chuck Lever
  2012-04-26 19:57                 ` Myklebust, Trond
  0 siblings, 1 reply; 37+ messages in thread
From: Chuck Lever @ 2012-04-26 19:46 UTC (permalink / raw)
  To: Myklebust, Trond; +Cc: linux-nfs


On Apr 26, 2012, at 3:14 PM, Myklebust, Trond wrote:

> On Thu, 2012-04-26 at 15:04 -0400, Chuck Lever wrote:
>> On Apr 26, 2012, at 2:53 PM, Myklebust, Trond wrote:
>> 
>>> On Thu, 2012-04-26 at 14:43 -0400, Chuck Lever wrote:
>>>> On Apr 26, 2012, at 12:55 PM, Myklebust, Trond wrote:
>>>> 
>>>>> On Thu, 2012-04-26 at 12:24 -0400, Chuck Lever wrote:
>>>>>> On Apr 23, 2012, at 4:55 PM, Chuck Lever wrote:
>>>>> Then lets move the flavour out of the clientid string,
>>>> 
>>>> Removing the flavor from the nfs_client_id4 string makes sense.
>>>> 
>>>>> and just settle
>>>>> for handling CLID_INUSE by changing the flavour on the SETCLIENTID call.
>>>> 
>>>> This is where I get hazy.  
>>>> 
>>>> If I simply change the authentication flavor on the existing clp->cl_rpcclient, will this affect ongoing RENEW operations that also use this transport?  Do we want subsequent RENEW operations to use the new flavor?
>>>> 
>>>> Thinking hypothetically, it seems to me that CLID_INUSE is really an indication of a permanent configuration error, or a software bug, and we should not bother to recover.  But maybe that's my limited imagination.  Under what use cases do you think CLID_INUSE might occur and it might be useful to attempt recovery?
>>>> 
>>> 
>>> The server caches the principal name that was used to call SETCLIENTID
>>> when the lease was established. Any attempt to call SETCLIENTID with a
>>> different principal will result in CLID_INUSE unless the lease has
>>> expired.
>>> 
>>> So what I was proposing wasn't that you try to change the authentication
>>> flavour on an existing nfs_client. It was that when you are probing, you
>>> can use the CLID_INUSE reply from SETCLIENTID as a direct indication
>>> that the server is indeed trunked, and that you already hold a lease on
>>> that server, but that the authentication flavour that you are trying to
>>> use is wrong.
>> 
>> The use case would be that my client has mounted a server via address X using authentication flavor 1, and then tries to mount the same server via address Y using authentication flavor 2.
> 
> ...for which the result should be that all setclientid/confirm and renew
> requests will use flavour 1.

Agreed.

>> Do we even need to retry the SETCLIENTID and to perform a SETCLIENTID_CONFIRM in that case?
> 
> Yes. Otherwise we end up with 2 leases on the same server.

I don't see how...  If the second SETCLIENTID fails with CLID_INUSE then the server still has the first lease that's using flavor 1.  "Boom, done."

>> Now, what about nfs4_reclaim_lease() ?  If the client sees CLID_INUSE during a lease reclaim, no trunking discovery is involved.
> 
> That would mean that the lease was expired, and that someone sent a
> SETCLIENTID call to the server using our clientid string, but using the
> wrong principal. There are 2 cases:
> 
> 1) Someone is spoofing our client. I've no idea how to recover from
> this, short of changing the clientid string.

Maybe we should keep cl_uniquifier for case 1...?  Since nfs4_reclaim_lease() is called in the state manager, it has to do something to recover or make the waiting process error out.

> 2) The server is trunked, the lease expired, and we happened to call
> 'mount' while it was expired, and inadvertently sent a SETCLIENTID
> +SETCLIENTID_CONFIRM call to the server using a different IP address,
> and using the wrong principal.

The clid_init_mutex should exclude this case...?

-- 
Chuck Lever
chuck[dot]lever[at]oracle[dot]com





^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 13/20] NFS: Fix recovery from NFS4ERR_CLID_INUSE
  2012-04-26 19:46               ` Chuck Lever
@ 2012-04-26 19:57                 ` Myklebust, Trond
  0 siblings, 0 replies; 37+ messages in thread
From: Myklebust, Trond @ 2012-04-26 19:57 UTC (permalink / raw)
  To: Chuck Lever; +Cc: linux-nfs

T24gVGh1LCAyMDEyLTA0LTI2IGF0IDE1OjQ2IC0wNDAwLCBDaHVjayBMZXZlciB3cm90ZToNCj4g
T24gQXByIDI2LCAyMDEyLCBhdCAzOjE0IFBNLCBNeWtsZWJ1c3QsIFRyb25kIHdyb3RlOg0KPiAN
Cj4gPiBPbiBUaHUsIDIwMTItMDQtMjYgYXQgMTU6MDQgLTA0MDAsIENodWNrIExldmVyIHdyb3Rl
Og0KPiA+PiBPbiBBcHIgMjYsIDIwMTIsIGF0IDI6NTMgUE0sIE15a2xlYnVzdCwgVHJvbmQgd3Jv
dGU6DQo+ID4+IA0KPiA+Pj4gT24gVGh1LCAyMDEyLTA0LTI2IGF0IDE0OjQzIC0wNDAwLCBDaHVj
ayBMZXZlciB3cm90ZToNCj4gPj4+PiBPbiBBcHIgMjYsIDIwMTIsIGF0IDEyOjU1IFBNLCBNeWts
ZWJ1c3QsIFRyb25kIHdyb3RlOg0KPiA+Pj4+IA0KPiA+Pj4+PiBPbiBUaHUsIDIwMTItMDQtMjYg
YXQgMTI6MjQgLTA0MDAsIENodWNrIExldmVyIHdyb3RlOg0KPiA+Pj4+Pj4gT24gQXByIDIzLCAy
MDEyLCBhdCA0OjU1IFBNLCBDaHVjayBMZXZlciB3cm90ZToNCj4gPj4+Pj4gVGhlbiBsZXRzIG1v
dmUgdGhlIGZsYXZvdXIgb3V0IG9mIHRoZSBjbGllbnRpZCBzdHJpbmcsDQo+ID4+Pj4gDQo+ID4+
Pj4gUmVtb3ZpbmcgdGhlIGZsYXZvciBmcm9tIHRoZSBuZnNfY2xpZW50X2lkNCBzdHJpbmcgbWFr
ZXMgc2Vuc2UuDQo+ID4+Pj4gDQo+ID4+Pj4+IGFuZCBqdXN0IHNldHRsZQ0KPiA+Pj4+PiBmb3Ig
aGFuZGxpbmcgQ0xJRF9JTlVTRSBieSBjaGFuZ2luZyB0aGUgZmxhdm91ciBvbiB0aGUgU0VUQ0xJ
RU5USUQgY2FsbC4NCj4gPj4+PiANCj4gPj4+PiBUaGlzIGlzIHdoZXJlIEkgZ2V0IGhhenkuICAN
Cj4gPj4+PiANCj4gPj4+PiBJZiBJIHNpbXBseSBjaGFuZ2UgdGhlIGF1dGhlbnRpY2F0aW9uIGZs
YXZvciBvbiB0aGUgZXhpc3RpbmcgY2xwLT5jbF9ycGNjbGllbnQsIHdpbGwgdGhpcyBhZmZlY3Qg
b25nb2luZyBSRU5FVyBvcGVyYXRpb25zIHRoYXQgYWxzbyB1c2UgdGhpcyB0cmFuc3BvcnQ/ICBE
byB3ZSB3YW50IHN1YnNlcXVlbnQgUkVORVcgb3BlcmF0aW9ucyB0byB1c2UgdGhlIG5ldyBmbGF2
b3I/DQo+ID4+Pj4gDQo+ID4+Pj4gVGhpbmtpbmcgaHlwb3RoZXRpY2FsbHksIGl0IHNlZW1zIHRv
IG1lIHRoYXQgQ0xJRF9JTlVTRSBpcyByZWFsbHkgYW4gaW5kaWNhdGlvbiBvZiBhIHBlcm1hbmVu
dCBjb25maWd1cmF0aW9uIGVycm9yLCBvciBhIHNvZnR3YXJlIGJ1ZywgYW5kIHdlIHNob3VsZCBu
b3QgYm90aGVyIHRvIHJlY292ZXIuICBCdXQgbWF5YmUgdGhhdCdzIG15IGxpbWl0ZWQgaW1hZ2lu
YXRpb24uICBVbmRlciB3aGF0IHVzZSBjYXNlcyBkbyB5b3UgdGhpbmsgQ0xJRF9JTlVTRSBtaWdo
dCBvY2N1ciBhbmQgaXQgbWlnaHQgYmUgdXNlZnVsIHRvIGF0dGVtcHQgcmVjb3Zlcnk/DQo+ID4+
Pj4gDQo+ID4+PiANCj4gPj4+IFRoZSBzZXJ2ZXIgY2FjaGVzIHRoZSBwcmluY2lwYWwgbmFtZSB0
aGF0IHdhcyB1c2VkIHRvIGNhbGwgU0VUQ0xJRU5USUQNCj4gPj4+IHdoZW4gdGhlIGxlYXNlIHdh
cyBlc3RhYmxpc2hlZC4gQW55IGF0dGVtcHQgdG8gY2FsbCBTRVRDTElFTlRJRCB3aXRoIGENCj4g
Pj4+IGRpZmZlcmVudCBwcmluY2lwYWwgd2lsbCByZXN1bHQgaW4gQ0xJRF9JTlVTRSB1bmxlc3Mg
dGhlIGxlYXNlIGhhcw0KPiA+Pj4gZXhwaXJlZC4NCj4gPj4+IA0KPiA+Pj4gU28gd2hhdCBJIHdh
cyBwcm9wb3Npbmcgd2Fzbid0IHRoYXQgeW91IHRyeSB0byBjaGFuZ2UgdGhlIGF1dGhlbnRpY2F0
aW9uDQo+ID4+PiBmbGF2b3VyIG9uIGFuIGV4aXN0aW5nIG5mc19jbGllbnQuIEl0IHdhcyB0aGF0
IHdoZW4geW91IGFyZSBwcm9iaW5nLCB5b3UNCj4gPj4+IGNhbiB1c2UgdGhlIENMSURfSU5VU0Ug
cmVwbHkgZnJvbSBTRVRDTElFTlRJRCBhcyBhIGRpcmVjdCBpbmRpY2F0aW9uDQo+ID4+PiB0aGF0
IHRoZSBzZXJ2ZXIgaXMgaW5kZWVkIHRydW5rZWQsIGFuZCB0aGF0IHlvdSBhbHJlYWR5IGhvbGQg
YSBsZWFzZSBvbg0KPiA+Pj4gdGhhdCBzZXJ2ZXIsIGJ1dCB0aGF0IHRoZSBhdXRoZW50aWNhdGlv
biBmbGF2b3VyIHRoYXQgeW91IGFyZSB0cnlpbmcgdG8NCj4gPj4+IHVzZSBpcyB3cm9uZy4NCj4g
Pj4gDQo+ID4+IFRoZSB1c2UgY2FzZSB3b3VsZCBiZSB0aGF0IG15IGNsaWVudCBoYXMgbW91bnRl
ZCBhIHNlcnZlciB2aWEgYWRkcmVzcyBYIHVzaW5nIGF1dGhlbnRpY2F0aW9uIGZsYXZvciAxLCBh
bmQgdGhlbiB0cmllcyB0byBtb3VudCB0aGUgc2FtZSBzZXJ2ZXIgdmlhIGFkZHJlc3MgWSB1c2lu
ZyBhdXRoZW50aWNhdGlvbiBmbGF2b3IgMi4NCj4gPiANCj4gPiAuLi5mb3Igd2hpY2ggdGhlIHJl
c3VsdCBzaG91bGQgYmUgdGhhdCBhbGwgc2V0Y2xpZW50aWQvY29uZmlybSBhbmQgcmVuZXcNCj4g
PiByZXF1ZXN0cyB3aWxsIHVzZSBmbGF2b3VyIDEuDQo+IA0KPiBBZ3JlZWQuDQo+IA0KPiA+PiBE
byB3ZSBldmVuIG5lZWQgdG8gcmV0cnkgdGhlIFNFVENMSUVOVElEIGFuZCB0byBwZXJmb3JtIGEg
U0VUQ0xJRU5USURfQ09ORklSTSBpbiB0aGF0IGNhc2U/DQo+ID4gDQo+ID4gWWVzLiBPdGhlcndp
c2Ugd2UgZW5kIHVwIHdpdGggMiBsZWFzZXMgb24gdGhlIHNhbWUgc2VydmVyLg0KPiANCj4gSSBk
b24ndCBzZWUgaG93Li4uICBJZiB0aGUgc2Vjb25kIFNFVENMSUVOVElEIGZhaWxzIHdpdGggQ0xJ
RF9JTlVTRSB0aGVuIHRoZSBzZXJ2ZXIgc3RpbGwgaGFzIHRoZSBmaXJzdCBsZWFzZSB0aGF0J3Mg
dXNpbmcgZmxhdm9yIDEuICAiQm9vbSwgZG9uZS4iDQoNClNvcnJ5LiBJIHRob3VnaHQgeW91IHdl
cmUgaW1wbHlpbmcgdGhhdCB3ZSBzaG91bGQgdXNlIGEgZGlmZmVyZW50DQpjbGllbnRpZCBvciBz
b21ldGhpbmcgbGlrZSB0aGF0Lg0KDQpZb3Ugc3RpbGwgZG8gbmVlZCB0byBpc3N1ZSB0aGUgU0VU
Q0xJRU5USUQgaW4gb3JkZXIgdG8gZmlndXJlIG91dCB0aGUNCnRydW5raW5nIHRvcG9sb2d5IHNv
IHRoYXQgeW91IGNhbiBtYXAgYWRkcmVzcyBZIHRvIGFkZHJlc3MgWC4NCg0KPiA+PiBOb3csIHdo
YXQgYWJvdXQgbmZzNF9yZWNsYWltX2xlYXNlKCkgPyAgSWYgdGhlIGNsaWVudCBzZWVzIENMSURf
SU5VU0UgZHVyaW5nIGEgbGVhc2UgcmVjbGFpbSwgbm8gdHJ1bmtpbmcgZGlzY292ZXJ5IGlzIGlu
dm9sdmVkLg0KPiA+IA0KPiA+IFRoYXQgd291bGQgbWVhbiB0aGF0IHRoZSBsZWFzZSB3YXMgZXhw
aXJlZCwgYW5kIHRoYXQgc29tZW9uZSBzZW50IGENCj4gPiBTRVRDTElFTlRJRCBjYWxsIHRvIHRo
ZSBzZXJ2ZXIgdXNpbmcgb3VyIGNsaWVudGlkIHN0cmluZywgYnV0IHVzaW5nIHRoZQ0KPiA+IHdy
b25nIHByaW5jaXBhbC4gVGhlcmUgYXJlIDIgY2FzZXM6DQo+ID4gDQo+ID4gMSkgU29tZW9uZSBp
cyBzcG9vZmluZyBvdXIgY2xpZW50LiBJJ3ZlIG5vIGlkZWEgaG93IHRvIHJlY292ZXIgZnJvbQ0K
PiA+IHRoaXMsIHNob3J0IG9mIGNoYW5naW5nIHRoZSBjbGllbnRpZCBzdHJpbmcuDQo+IA0KPiBN
YXliZSB3ZSBzaG91bGQga2VlcCBjbF91bmlxdWlmaWVyIGZvciBjYXNlIDEuLi4/ICBTaW5jZSBu
ZnM0X3JlY2xhaW1fbGVhc2UoKSBpcyBjYWxsZWQgaW4gdGhlIHN0YXRlIG1hbmFnZXIsIGl0IGhh
cyB0byBkbyBzb21ldGhpbmcgdG8gcmVjb3ZlciBvciBtYWtlIHRoZSB3YWl0aW5nIHByb2Nlc3Mg
ZXJyb3Igb3V0Lg0KDQpZZXMsIGJ1dCB0aGF0IGJyZWFrcyB0aGUgVUNTIHRydW5raW5nLWRldGVj
dGlvbiBtb2RlbC4gU3Bvb2ZpbmcgaXMgYmFkDQpubyBtYXR0ZXIgd2hhdCBoYXBwZW5zLCBhbmQg
cGFwZXJpbmcgYXJvdW5kIGl0IHdpdGggY2xfdW5pcXVpZmllciB3YXMNCndyb25nLiBXaGF0IGlm
IHlvdSBqdXN0IGhhcHBlbmVkIHRvIHVzZSB0aGUgY29ycmVjdCBwcmluY2lwYWwgKHZlcnkgZWFz
eQ0KaWYgeW91IGFyZSB1c2luZyBBVVRIX1NZUykgYW5kIGRpZG4ndCBkZXRlY3QgdGhhdCB0aGUg
Y2xpZW50aWQgaXMgYmVpbmcNCnNwb29mZWQ/DQoNCj4gPiAyKSBUaGUgc2VydmVyIGlzIHRydW5r
ZWQsIHRoZSBsZWFzZSBleHBpcmVkLCBhbmQgd2UgaGFwcGVuZWQgdG8gY2FsbA0KPiA+ICdtb3Vu
dCcgd2hpbGUgaXQgd2FzIGV4cGlyZWQsIGFuZCBpbmFkdmVydGVudGx5IHNlbnQgYSBTRVRDTElF
TlRJRA0KPiA+ICtTRVRDTElFTlRJRF9DT05GSVJNIGNhbGwgdG8gdGhlIHNlcnZlciB1c2luZyBh
IGRpZmZlcmVudCBJUCBhZGRyZXNzLA0KPiA+IGFuZCB1c2luZyB0aGUgd3JvbmcgcHJpbmNpcGFs
Lg0KPiANCj4gVGhlIGNsaWRfaW5pdF9tdXRleCBzaG91bGQgZXhjbHVkZSB0aGlzIGNhc2UuLi4/
DQoNCkkgYXNzdW1lIHNvLi4uDQoNCi0tIA0KVHJvbmQgTXlrbGVidXN0DQpMaW51eCBORlMgY2xp
ZW50IG1haW50YWluZXINCg0KTmV0QXBwDQpUcm9uZC5NeWtsZWJ1c3RAbmV0YXBwLmNvbQ0Kd3d3
Lm5ldGFwcC5jb20NCg0K

^ permalink raw reply	[flat|nested] 37+ messages in thread

end of thread, other threads:[~2012-04-26 19:57 UTC | newest]

Thread overview: 37+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-04-23 20:53 [PATCH 01/20] NFS: Fix comment misspelling in struct nfs_client definition Chuck Lever
2012-04-23 20:53 ` [PATCH 02/20] NFS: Use proper naming conventions for NFSv4.1 server scope fields Chuck Lever
2012-04-23 20:53 ` [PATCH 03/20] NFS: Use proper naming conventions for nfs_client.impl_id field Chuck Lever
2012-04-23 20:53 ` [PATCH 04/20] NFS: Use proper naming conventions for the nfs_client.net field Chuck Lever
2012-04-23 20:53 ` [PATCH 05/20] NFS: Clean up return code checking in nfs4_proc_exchange_id() Chuck Lever
2012-04-23 21:07   ` Myklebust, Trond
2012-04-23 20:54 ` [PATCH 06/20] NFS: Remove nfs_unique_id Chuck Lever
2012-04-23 20:54 ` [PATCH 07/20] NFS: Don't swap bytes in nfs4_construct_boot_verifier() Chuck Lever
2012-04-23 20:54 ` [PATCH 08/20] NFS: Fix NFSv4 BAD_SEQID recovery Chuck Lever
2012-04-23 20:54 ` [PATCH 09/20] NFS: Force server to drop NFSv4 state Chuck Lever
2012-04-23 21:13   ` Myklebust, Trond
2012-04-23 21:18     ` Chuck Lever
2012-04-23 20:54 ` [PATCH 10/20] NFS: Always use the same SETCLIENTID boot verifier Chuck Lever
2012-04-23 20:54 ` [PATCH 11/20] NFS: Refactor nfs_get_client(): add nfs_found_client() Chuck Lever
2012-04-23 20:54 ` [PATCH 12/20] NFS: Refactor nfs_get_client(): initialize nfs_client Chuck Lever
2012-04-23 20:55 ` [PATCH 13/20] NFS: Fix recovery from NFS4ERR_CLID_INUSE Chuck Lever
2012-04-26 16:24   ` Chuck Lever
2012-04-26 16:55     ` Myklebust, Trond
2012-04-26 18:43       ` Chuck Lever
2012-04-26 18:53         ` Myklebust, Trond
2012-04-26 18:57           ` Myklebust, Trond
2012-04-26 19:04           ` Chuck Lever
2012-04-26 19:14             ` Myklebust, Trond
2012-04-26 19:46               ` Chuck Lever
2012-04-26 19:57                 ` Myklebust, Trond
2012-04-23 20:55 ` [PATCH 14/20] NFS: Add nfs_client behavior flags Chuck Lever
2012-04-23 20:55 ` [PATCH 15/20] NFS: Introduce "migration" mount option Chuck Lever
2012-04-23 20:55 ` [PATCH 16/20] NFS: Use the same nfs_client_id4 for every server Chuck Lever
2012-04-23 20:55 ` [PATCH 17/20] NFS: EXCHANGE_ID should save the server major and minor ID Chuck Lever
2012-04-23 20:55 ` [PATCH 18/20] NFS: Detect NFSv4 server trunking when mounting Chuck Lever
2012-04-23 21:27   ` Myklebust, Trond
2012-04-23 21:43     ` Chuck Lever
2012-04-23 21:47     ` Chuck Lever
2012-04-23 21:56       ` Myklebust, Trond
2012-04-23 20:56 ` [PATCH 19/20] NFS: Add nfs4_unique_id boot parameter Chuck Lever
2012-04-23 20:56 ` [PATCH 20/20] NFS: Clean up debugging messages in fs/nfs/client.c Chuck Lever
2012-04-23 21:23   ` Malahal Naineni

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.