linux-nfs.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Chuck Lever <chuck.lever@oracle.com>
To: Chuck Lever <chuck.lever@oracle.com>
Cc: Bruce Fields <bfields@fieldses.org>,
	Linux NFS Mailing List <linux-nfs@vger.kernel.org>,
	Simo Sorce <simo@redhat.com>
Subject: Re: [PATCH v1 2/2] SUNRPC: Fix svcauth_gss_proxy_init()
Date: Thu, 24 Oct 2019 09:35:58 -0400	[thread overview]
Message-ID: <1DD83F8D-10A7-4273-A53F-3AB858EBE2D1@oracle.com> (raw)
In-Reply-To: <20191024133416.2148.96218.stgit@klimt.1015granger.net>

Whoops, was going to Cc: Simo on this one...

> On Oct 24, 2019, at 9:34 AM, Chuck Lever <chuck.lever@oracle.com> wrote:
> 
> gss_read_proxy_verf() assumes things about the XDR buffer containing
> the RPC Call that are not true for buffers generated by
> svc_rdma_recv().
> 
> RDMA's buffers look more like what the upper layer generates for
> sending: head is a kmalloc'd buffer; it does not point to a page
> whose contents are contiguous with the first page in the buffers'
> page array. The result is that ACCEPT_SEC_CONTEXT via RPC/RDMA has
> stopped working on Linux NFS servers that use gssproxy.
> 
> This does not affect clients that use only TCP to send their
> ACCEPT_SEC_CONTEXT operation (that's all Linux clients). Other
> clients, like Solaris NFS clients, send ACCEPT_SEC_CONTEXT on the
> same transport as they send all other NFS operations. Such clients
> can send ACCEPT_SEC_CONTEXT via RPC/RDMA.
> 
> I thought I had found every direct reference in the server RPC code
> to the rqstp->rq_pages field.
> 
> Bug found at the 2019 Westford NFS bake-a-thon.
> 
> Fixes: 3316f0631139 ("svcrdma: Persistently allocate and DMA- ... ")
> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
> Tested-by: Bill Baker <bill.baker@oracle.com>
> ---
> net/sunrpc/auth_gss/svcauth_gss.c |   84 ++++++++++++++++++++++++++++---------
> 1 file changed, 63 insertions(+), 21 deletions(-)
> 
> diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
> index f130990..c62d1f1 100644
> --- a/net/sunrpc/auth_gss/svcauth_gss.c
> +++ b/net/sunrpc/auth_gss/svcauth_gss.c
> @@ -1078,24 +1078,32 @@ struct gss_svc_data {
> 	return 0;
> }
> 
> -/* Ok this is really heavily depending on a set of semantics in
> - * how rqstp is set up by svc_recv and pages laid down by the
> - * server when reading a request. We are basically guaranteed that
> - * the token lays all down linearly across a set of pages, starting
> - * at iov_base in rq_arg.head[0] which happens to be the first of a
> - * set of pages stored in rq_pages[].
> - * rq_arg.head[0].iov_base will provide us the page_base to pass
> - * to the upcall.
> - */
> -static inline int
> -gss_read_proxy_verf(struct svc_rqst *rqstp,
> -		    struct rpc_gss_wire_cred *gc, __be32 *authp,
> -		    struct xdr_netobj *in_handle,
> -		    struct gssp_in_token *in_token)
> +static void gss_free_in_token_pages(struct gssp_in_token *in_token)
> {
> -	struct kvec *argv = &rqstp->rq_arg.head[0];
> 	u32 inlen;
> -	int res;
> +	int i;
> +
> +	i = 0;
> +	inlen = in_token->page_len;
> +	while (inlen) {
> +		if (in_token->pages[i])
> +			put_page(in_token->pages[i]);
> +		inlen -= inlen > PAGE_SIZE ? PAGE_SIZE : inlen;
> +	}
> +
> +	kfree(in_token->pages);
> +	in_token->pages = NULL;
> +}
> +
> +static int gss_read_proxy_verf(struct svc_rqst *rqstp,
> +			       struct rpc_gss_wire_cred *gc, __be32 *authp,
> +			       struct xdr_netobj *in_handle,
> +			       struct gssp_in_token *in_token)
> +{
> +	struct kvec *argv = &rqstp->rq_arg.head[0];
> +	unsigned int page_base, length;
> +	int pages, i, res;
> +	size_t inlen;
> 
> 	res = gss_read_common_verf(gc, argv, authp, in_handle);
> 	if (res)
> @@ -1105,10 +1113,36 @@ struct gss_svc_data {
> 	if (inlen > (argv->iov_len + rqstp->rq_arg.page_len))
> 		return SVC_DENIED;
> 
> -	in_token->pages = rqstp->rq_pages;
> -	in_token->page_base = (ulong)argv->iov_base & ~PAGE_MASK;
> +	pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
> +	in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
> +	if (!in_token->pages)
> +		return SVC_DENIED;
> +	in_token->page_base = 0;
> 	in_token->page_len = inlen;
> +	for (i = 0; i < pages; i++) {
> +		in_token->pages[i] = alloc_page(GFP_KERNEL);
> +		if (!in_token->pages[i]) {
> +			gss_free_in_token_pages(in_token);
> +			return SVC_DENIED;
> +		}
> +	}
> 
> +	length = min_t(unsigned int, inlen, argv->iov_len);
> +	memcpy(page_address(in_token->pages[0]), argv->iov_base, length);
> +	inlen -= length;
> +
> +	i = 1;
> +	page_base = rqstp->rq_arg.page_base;
> +	while (inlen) {
> +		length = min_t(unsigned int, inlen, PAGE_SIZE);
> +		memcpy(page_address(in_token->pages[i]),
> +		       page_address(rqstp->rq_arg.pages[i]) + page_base,
> +		       length);
> +
> +		inlen -= length;
> +		page_base = 0;
> +		i++;
> +	}
> 	return 0;
> }
> 
> @@ -1282,8 +1316,11 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
> 		break;
> 	case GSS_S_COMPLETE:
> 		status = gss_proxy_save_rsc(sn->rsc_cache, &ud, &handle);
> -		if (status)
> +		if (status) {
> +			pr_info("%s: gss_proxy_save_rsc failed (%d)\n",
> +				__func__, status);
> 			goto out;
> +		}
> 		cli_handle.data = (u8 *)&handle;
> 		cli_handle.len = sizeof(handle);
> 		break;
> @@ -1294,15 +1331,20 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
> 
> 	/* Got an answer to the upcall; use it: */
> 	if (gss_write_init_verf(sn->rsc_cache, rqstp,
> -				&cli_handle, &ud.major_status))
> +				&cli_handle, &ud.major_status)) {
> +		pr_info("%s: gss_write_init_verf failed\n", __func__);
> 		goto out;
> +	}
> 	if (gss_write_resv(resv, PAGE_SIZE,
> 			   &cli_handle, &ud.out_token,
> -			   ud.major_status, ud.minor_status))
> +			   ud.major_status, ud.minor_status)) {
> +		pr_info("%s: gss_write_resv failed\n", __func__);
> 		goto out;
> +	}
> 
> 	ret = SVC_COMPLETE;
> out:
> +	gss_free_in_token_pages(&ud.in_token);
> 	gssp_free_upcall_data(&ud);
> 	return ret;
> }
> 

--
Chuck Lever




  reply	other threads:[~2019-10-24 13:36 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-10-24 13:34 [PATCH v1 1/2] SUNRPC: Trace gssproxy upcall results Chuck Lever
2019-10-24 13:34 ` [PATCH v1 2/2] SUNRPC: Fix svcauth_gss_proxy_init() Chuck Lever
2019-10-24 13:35   ` Chuck Lever [this message]
2019-10-24 14:02     ` Simo Sorce
2019-10-30 20:33       ` Bruce Fields
2019-10-24 15:38 ` [PATCH v1 1/2] SUNRPC: Trace gssproxy upcall results J. Bruce Fields
2019-10-24 17:08   ` Chuck Lever
2019-10-28 16:45     ` Bruce Fields
2019-10-30 20:34       ` Bruce Fields

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1DD83F8D-10A7-4273-A53F-3AB858EBE2D1@oracle.com \
    --to=chuck.lever@oracle.com \
    --cc=bfields@fieldses.org \
    --cc=linux-nfs@vger.kernel.org \
    --cc=simo@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).