linux-nfs.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Sasha Levin <sashal@kernel.org>
To: linux-kernel@vger.kernel.org, stable@vger.kernel.org
Cc: Chuck Lever <chuck.lever@oracle.com>,
	Bill Baker <bill.baker@oracle.com>, Simo Sorce <simo@redhat.com>,
	"J . Bruce Fields" <bfields@redhat.com>,
	Sasha Levin <sashal@kernel.org>,
	linux-nfs@vger.kernel.org, netdev@vger.kernel.org
Subject: [PATCH AUTOSEL 5.4 083/205] SUNRPC: Fix svcauth_gss_proxy_init()
Date: Thu, 16 Jan 2020 11:40:58 -0500	[thread overview]
Message-ID: <20200116164300.6705-83-sashal@kernel.org> (raw)
In-Reply-To: <20200116164300.6705-1-sashal@kernel.org>

From: Chuck Lever <chuck.lever@oracle.com>

[ Upstream commit 5866efa8cbfbadf3905072798e96652faf02dbe8 ]

gss_read_proxy_verf() assumes things about the XDR buffer containing
the RPC Call that are not true for buffers generated by
svc_rdma_recv().

RDMA's buffers look more like what the upper layer generates for
sending: head is a kmalloc'd buffer; it does not point to a page
whose contents are contiguous with the first page in the buffers'
page array. The result is that ACCEPT_SEC_CONTEXT via RPC/RDMA has
stopped working on Linux NFS servers that use gssproxy.

This does not affect clients that use only TCP to send their
ACCEPT_SEC_CONTEXT operation (that's all Linux clients). Other
clients, like Solaris NFS clients, send ACCEPT_SEC_CONTEXT on the
same transport as they send all other NFS operations. Such clients
can send ACCEPT_SEC_CONTEXT via RPC/RDMA.

I thought I had found every direct reference in the server RPC code
to the rqstp->rq_pages field.

Bug found at the 2019 Westford NFS bake-a-thon.

Fixes: 3316f0631139 ("svcrdma: Persistently allocate and DMA- ... ")
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Tested-by: Bill Baker <bill.baker@oracle.com>
Reviewed-by: Simo Sorce <simo@redhat.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
 net/sunrpc/auth_gss/svcauth_gss.c | 84 +++++++++++++++++++++++--------
 1 file changed, 63 insertions(+), 21 deletions(-)

diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 8be2f209982b..908b60a72d95 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1075,24 +1075,32 @@ gss_read_verf(struct rpc_gss_wire_cred *gc,
 	return 0;
 }
 
-/* Ok this is really heavily depending on a set of semantics in
- * how rqstp is set up by svc_recv and pages laid down by the
- * server when reading a request. We are basically guaranteed that
- * the token lays all down linearly across a set of pages, starting
- * at iov_base in rq_arg.head[0] which happens to be the first of a
- * set of pages stored in rq_pages[].
- * rq_arg.head[0].iov_base will provide us the page_base to pass
- * to the upcall.
- */
-static inline int
-gss_read_proxy_verf(struct svc_rqst *rqstp,
-		    struct rpc_gss_wire_cred *gc, __be32 *authp,
-		    struct xdr_netobj *in_handle,
-		    struct gssp_in_token *in_token)
+static void gss_free_in_token_pages(struct gssp_in_token *in_token)
 {
-	struct kvec *argv = &rqstp->rq_arg.head[0];
 	u32 inlen;
-	int res;
+	int i;
+
+	i = 0;
+	inlen = in_token->page_len;
+	while (inlen) {
+		if (in_token->pages[i])
+			put_page(in_token->pages[i]);
+		inlen -= inlen > PAGE_SIZE ? PAGE_SIZE : inlen;
+	}
+
+	kfree(in_token->pages);
+	in_token->pages = NULL;
+}
+
+static int gss_read_proxy_verf(struct svc_rqst *rqstp,
+			       struct rpc_gss_wire_cred *gc, __be32 *authp,
+			       struct xdr_netobj *in_handle,
+			       struct gssp_in_token *in_token)
+{
+	struct kvec *argv = &rqstp->rq_arg.head[0];
+	unsigned int page_base, length;
+	int pages, i, res;
+	size_t inlen;
 
 	res = gss_read_common_verf(gc, argv, authp, in_handle);
 	if (res)
@@ -1102,10 +1110,36 @@ gss_read_proxy_verf(struct svc_rqst *rqstp,
 	if (inlen > (argv->iov_len + rqstp->rq_arg.page_len))
 		return SVC_DENIED;
 
-	in_token->pages = rqstp->rq_pages;
-	in_token->page_base = (ulong)argv->iov_base & ~PAGE_MASK;
+	pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
+	in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
+	if (!in_token->pages)
+		return SVC_DENIED;
+	in_token->page_base = 0;
 	in_token->page_len = inlen;
+	for (i = 0; i < pages; i++) {
+		in_token->pages[i] = alloc_page(GFP_KERNEL);
+		if (!in_token->pages[i]) {
+			gss_free_in_token_pages(in_token);
+			return SVC_DENIED;
+		}
+	}
 
+	length = min_t(unsigned int, inlen, argv->iov_len);
+	memcpy(page_address(in_token->pages[0]), argv->iov_base, length);
+	inlen -= length;
+
+	i = 1;
+	page_base = rqstp->rq_arg.page_base;
+	while (inlen) {
+		length = min_t(unsigned int, inlen, PAGE_SIZE);
+		memcpy(page_address(in_token->pages[i]),
+		       page_address(rqstp->rq_arg.pages[i]) + page_base,
+		       length);
+
+		inlen -= length;
+		page_base = 0;
+		i++;
+	}
 	return 0;
 }
 
@@ -1280,8 +1314,11 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
 		break;
 	case GSS_S_COMPLETE:
 		status = gss_proxy_save_rsc(sn->rsc_cache, &ud, &handle);
-		if (status)
+		if (status) {
+			pr_info("%s: gss_proxy_save_rsc failed (%d)\n",
+				__func__, status);
 			goto out;
+		}
 		cli_handle.data = (u8 *)&handle;
 		cli_handle.len = sizeof(handle);
 		break;
@@ -1292,15 +1329,20 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
 
 	/* Got an answer to the upcall; use it: */
 	if (gss_write_init_verf(sn->rsc_cache, rqstp,
-				&cli_handle, &ud.major_status))
+				&cli_handle, &ud.major_status)) {
+		pr_info("%s: gss_write_init_verf failed\n", __func__);
 		goto out;
+	}
 	if (gss_write_resv(resv, PAGE_SIZE,
 			   &cli_handle, &ud.out_token,
-			   ud.major_status, ud.minor_status))
+			   ud.major_status, ud.minor_status)) {
+		pr_info("%s: gss_write_resv failed\n", __func__);
 		goto out;
+	}
 
 	ret = SVC_COMPLETE;
 out:
+	gss_free_in_token_pages(&ud.in_token);
 	gssp_free_upcall_data(&ud);
 	return ret;
 }
-- 
2.20.1


  parent reply	other threads:[~2020-01-16 16:49 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <20200116164300.6705-1-sashal@kernel.org>
2020-01-16 16:40 ` [PATCH AUTOSEL 5.4 058/205] xprtrdma: Connection becomes unstable after a reconnect Sasha Levin
2020-01-16 16:40 ` [PATCH AUTOSEL 5.4 059/205] xprtrdma: Fix MR list handling Sasha Levin
2020-01-16 16:40 ` Sasha Levin [this message]
2020-01-16 16:41 ` [PATCH AUTOSEL 5.4 133/205] nfsd: Fix cld_net->cn_tfm initialization Sasha Levin
2020-01-16 16:41 ` [PATCH AUTOSEL 5.4 134/205] nfsd: v4 support requires CRYPTO_SHA256 Sasha Levin
2020-01-16 16:41 ` [PATCH AUTOSEL 5.4 140/205] NFSv4.x: Handle bad/dead sessions correctly in nfs41_sequence_process() Sasha Levin
2020-01-16 16:42 ` [PATCH AUTOSEL 5.4 149/205] SUNRPC: Fix another issue with MIC buffer space Sasha Levin
2020-01-16 16:42 ` [PATCH AUTOSEL 5.4 165/205] SUNRPC: Fix backchannel latency metrics Sasha Levin
2020-01-16 16:42 ` [PATCH AUTOSEL 5.4 198/205] NFSD fixing possible null pointer derefering in copy offload Sasha Levin
2020-01-16 16:42 ` [PATCH AUTOSEL 5.4 199/205] nfsd: depend on CRYPTO_MD5 for legacy client tracking Sasha Levin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200116164300.6705-83-sashal@kernel.org \
    --to=sashal@kernel.org \
    --cc=bfields@redhat.com \
    --cc=bill.baker@oracle.com \
    --cc=chuck.lever@oracle.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-nfs@vger.kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=simo@redhat.com \
    --cc=stable@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).