All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chuck Lever <chuck.lever@oracle.com>
To: Anna Schumaker <Anna.Schumaker@netapp.com>
Cc: Linux NFS Mailing List <linux-nfs@vger.kernel.org>
Subject: Re: [PATCH v1 06/16] xprtrdma: Add a "deregister_external" op for each memreg mode
Date: Tue, 17 Mar 2015 08:04:33 -0700	[thread overview]
Message-ID: <B6B5E091-91F7-48AB-B8B4-4232D28BFD17@oracle.com> (raw)
In-Reply-To: <55083C31.9020704@Netapp.com>


On Mar 17, 2015, at 7:37 AM, Anna Schumaker <Anna.Schumaker@netapp.com> wrote:

> On 03/13/2015 05:22 PM, Chuck Lever wrote:
>> There is very little common processing among the different external
>> memory deregistration functions.
>> 
>> In addition, instead of calling the deregistration function for each
>> segment, have one call release all segments for a request. This makes
>> the API a little asymmetrical, but a hair faster.
> 
> The common processing would be the for-each loop that you moved into the ro_unmap functions.  I'm not completely sold on this... how often do unmaps happen?

Once for every RPC.

> Anna
> 
>> 
>> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
>> ---
>> net/sunrpc/xprtrdma/fmr_ops.c      |   37 ++++++++++++++++
>> net/sunrpc/xprtrdma/frwr_ops.c     |   46 ++++++++++++++++++++
>> net/sunrpc/xprtrdma/physical_ops.c |   13 ++++++
>> net/sunrpc/xprtrdma/rpc_rdma.c     |    7 +--
>> net/sunrpc/xprtrdma/transport.c    |    8 +---
>> net/sunrpc/xprtrdma/verbs.c        |   81 ------------------------------------
>> net/sunrpc/xprtrdma/xprt_rdma.h    |    5 +-
>> 7 files changed, 103 insertions(+), 94 deletions(-)
>> 
>> diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
>> index 45fb646..9b983b4 100644
>> --- a/net/sunrpc/xprtrdma/fmr_ops.c
>> +++ b/net/sunrpc/xprtrdma/fmr_ops.c
>> @@ -20,6 +20,32 @@
>> /* Maximum scatter/gather per FMR */
>> #define RPCRDMA_MAX_FMR_SGES	(64)
>> 
>> +/* Use the ib_unmap_fmr() verb to prevent further remote
>> + * access via RDMA READ or RDMA WRITE.
>> + */
>> +static int
>> +__fmr_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
>> +{
>> +	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
>> +	struct rpcrdma_mr_seg *seg1 = seg;
>> +	int rc, nsegs = seg->mr_nsegs;
>> +	LIST_HEAD(l);
>> +
>> +	list_add(&seg1->rl_mw->r.fmr->list, &l);
>> +	rc = ib_unmap_fmr(&l);
>> +	read_lock(&ia->ri_qplock);
>> +	while (seg1->mr_nsegs--)
>> +		rpcrdma_unmap_one(ia, seg++);
>> +	read_unlock(&ia->ri_qplock);
>> +	if (rc)
>> +		goto out_err;
>> +	return nsegs;
>> +
>> +out_err:
>> +	dprintk("RPC:       %s: ib_unmap_fmr status %i\n", __func__, rc);
>> +	return nsegs;
>> +}
>> +
>> /* FMR mode conveys up to 64 pages of payload per chunk segment.
>>  */
>> static size_t
>> @@ -79,8 +105,19 @@ out_maperr:
>> 	return rc;
>> }
>> 
>> +static void
>> +fmr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
>> +	     unsigned int count)
>> +{
>> +	unsigned int i;
>> +
>> +	for (i = 0; count--;)
>> +		i += __fmr_unmap(r_xprt, &req->rl_segments[i]);
>> +}
>> +
>> const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
>> 	.ro_map				= fmr_op_map,
>> +	.ro_unmap			= fmr_op_unmap,
>> 	.ro_maxpages			= fmr_op_maxpages,
>> 	.ro_displayname			= "fmr",
>> };
>> diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
>> index 2b5ccb0..05b5761 100644
>> --- a/net/sunrpc/xprtrdma/frwr_ops.c
>> +++ b/net/sunrpc/xprtrdma/frwr_ops.c
>> @@ -17,6 +17,41 @@
>> # define RPCDBG_FACILITY	RPCDBG_TRANS
>> #endif
>> 
>> +/* Post a LOCAL_INV Work Request to prevent further remote access
>> + * via RDMA READ or RDMA WRITE.
>> + */
>> +static int
>> +__frwr_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
>> +{
>> +	struct rpcrdma_mr_seg *seg1 = seg;
>> +	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
>> +	struct ib_send_wr invalidate_wr, *bad_wr;
>> +	int rc, nsegs = seg->mr_nsegs;
>> +
>> +	seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID;
>> +
>> +	memset(&invalidate_wr, 0, sizeof(invalidate_wr));
>> +	invalidate_wr.wr_id = (unsigned long)(void *)seg1->rl_mw;
>> +	invalidate_wr.opcode = IB_WR_LOCAL_INV;
>> +	invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey;
>> +	DECR_CQCOUNT(&r_xprt->rx_ep);
>> +
>> +	read_lock(&ia->ri_qplock);
>> +	while (seg1->mr_nsegs--)
>> +		rpcrdma_unmap_one(ia, seg++);
>> +	rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
>> +	read_unlock(&ia->ri_qplock);
>> +	if (rc)
>> +		goto out_err;
>> +	return nsegs;
>> +
>> +out_err:
>> +	/* Force rpcrdma_buffer_get() to retry */
>> +	seg1->rl_mw->r.frmr.fr_state = FRMR_IS_STALE;
>> +	dprintk("RPC:       %s: ib_post_send status %i\n", __func__, rc);
>> +	return nsegs;
>> +}
>> +
>> /* FRWR mode conveys a list of pages per chunk segment. The
>>  * maximum length of that list is the FRWR page list depth.
>>  */
>> @@ -116,8 +151,19 @@ out_err:
>> 	return rc;
>> }
>> 
>> +static void
>> +frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
>> +	      unsigned int count)
>> +{
>> +	unsigned int i;
>> +
>> +	for (i = 0; count--;)
>> +		i += __frwr_unmap(r_xprt, &req->rl_segments[i]);
>> +}
>> +
>> const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
>> 	.ro_map				= frwr_op_map,
>> +	.ro_unmap			= frwr_op_unmap,
>> 	.ro_maxpages			= frwr_op_maxpages,
>> 	.ro_displayname			= "frwr",
>> };
>> diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c
>> index 5a284ee..f2c15be 100644
>> --- a/net/sunrpc/xprtrdma/physical_ops.c
>> +++ b/net/sunrpc/xprtrdma/physical_ops.c
>> @@ -44,8 +44,21 @@ physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
>> 	return 1;
>> }
>> 
>> +/* Unmap a memory region, but leave it registered.
>> + */
>> +static void
>> +physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
>> +		  unsigned int count)
>> +{
>> +	unsigned int i;
>> +
>> +	for (i = 0; i < count; i++)
>> +		rpcrdma_unmap_one(&r_xprt->rx_ia, &req->rl_segments[i]);
>> +}
>> +
>> const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
>> 	.ro_map				= physical_op_map,
>> +	.ro_unmap			= physical_op_unmap,
>> 	.ro_maxpages			= physical_op_maxpages,
>> 	.ro_displayname			= "physical",
>> };
>> diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
>> index 6ab1d03..7b51d9d 100644
>> --- a/net/sunrpc/xprtrdma/rpc_rdma.c
>> +++ b/net/sunrpc/xprtrdma/rpc_rdma.c
>> @@ -284,11 +284,8 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
>> 	return (unsigned char *)iptr - (unsigned char *)headerp;
>> 
>> out:
>> -	if (r_xprt->rx_ia.ri_memreg_strategy != RPCRDMA_FRMR) {
>> -		for (pos = 0; nchunks--;)
>> -			pos += rpcrdma_deregister_external(
>> -					&req->rl_segments[pos], r_xprt);
>> -	}
>> +	if (r_xprt->rx_ia.ri_memreg_strategy != RPCRDMA_FRMR)
>> +		r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt, req, nchunks);
>> 	return n;
>> }
>> 
>> diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
>> index 9a9da40..c484671 100644
>> --- a/net/sunrpc/xprtrdma/transport.c
>> +++ b/net/sunrpc/xprtrdma/transport.c
>> @@ -571,7 +571,6 @@ xprt_rdma_free(void *buffer)
>> 	struct rpcrdma_req *req;
>> 	struct rpcrdma_xprt *r_xprt;
>> 	struct rpcrdma_regbuf *rb;
>> -	int i;
>> 
>> 	if (buffer == NULL)
>> 		return;
>> @@ -582,11 +581,8 @@ xprt_rdma_free(void *buffer)
>> 
>> 	dprintk("RPC:       %s: called on 0x%p\n", __func__, req->rl_reply);
>> 
>> -	for (i = 0; req->rl_nchunks;) {
>> -		--req->rl_nchunks;
>> -		i += rpcrdma_deregister_external(
>> -			&req->rl_segments[i], r_xprt);
>> -	}
>> +	r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt, req, req->rl_nchunks);
>> +	req->rl_nchunks = 0;
>> 
>> 	rpcrdma_buffer_put(req);
>> }
>> diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
>> index 851ed97..a1621fd 100644
>> --- a/net/sunrpc/xprtrdma/verbs.c
>> +++ b/net/sunrpc/xprtrdma/verbs.c
>> @@ -1509,7 +1509,7 @@ rpcrdma_buffer_put_sendbuf(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
>> 	}
>> }
>> 
>> -/* rpcrdma_unmap_one() was already done by rpcrdma_deregister_frmr_external().
>> +/* rpcrdma_unmap_one() was already done during deregistration.
>>  * Redo only the ib_post_send().
>>  */
>> static void
>> @@ -1889,85 +1889,6 @@ rpcrdma_unmap_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg)
>> 				seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
>> }
>> 
>> -static int
>> -rpcrdma_deregister_frmr_external(struct rpcrdma_mr_seg *seg,
>> -			struct rpcrdma_ia *ia, struct rpcrdma_xprt *r_xprt)
>> -{
>> -	struct rpcrdma_mr_seg *seg1 = seg;
>> -	struct ib_send_wr invalidate_wr, *bad_wr;
>> -	int rc;
>> -
>> -	seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID;
>> -
>> -	memset(&invalidate_wr, 0, sizeof invalidate_wr);
>> -	invalidate_wr.wr_id = (unsigned long)(void *)seg1->rl_mw;
>> -	invalidate_wr.opcode = IB_WR_LOCAL_INV;
>> -	invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey;
>> -	DECR_CQCOUNT(&r_xprt->rx_ep);
>> -
>> -	read_lock(&ia->ri_qplock);
>> -	while (seg1->mr_nsegs--)
>> -		rpcrdma_unmap_one(ia, seg++);
>> -	rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
>> -	read_unlock(&ia->ri_qplock);
>> -	if (rc) {
>> -		/* Force rpcrdma_buffer_get() to retry */
>> -		seg1->rl_mw->r.frmr.fr_state = FRMR_IS_STALE;
>> -		dprintk("RPC:       %s: failed ib_post_send for invalidate,"
>> -			" status %i\n", __func__, rc);
>> -	}
>> -	return rc;
>> -}
>> -
>> -static int
>> -rpcrdma_deregister_fmr_external(struct rpcrdma_mr_seg *seg,
>> -			struct rpcrdma_ia *ia)
>> -{
>> -	struct rpcrdma_mr_seg *seg1 = seg;
>> -	LIST_HEAD(l);
>> -	int rc;
>> -
>> -	list_add(&seg1->rl_mw->r.fmr->list, &l);
>> -	rc = ib_unmap_fmr(&l);
>> -	read_lock(&ia->ri_qplock);
>> -	while (seg1->mr_nsegs--)
>> -		rpcrdma_unmap_one(ia, seg++);
>> -	read_unlock(&ia->ri_qplock);
>> -	if (rc)
>> -		dprintk("RPC:       %s: failed ib_unmap_fmr,"
>> -			" status %i\n", __func__, rc);
>> -	return rc;
>> -}
>> -
>> -int
>> -rpcrdma_deregister_external(struct rpcrdma_mr_seg *seg,
>> -		struct rpcrdma_xprt *r_xprt)
>> -{
>> -	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
>> -	int nsegs = seg->mr_nsegs, rc;
>> -
>> -	switch (ia->ri_memreg_strategy) {
>> -
>> -	case RPCRDMA_ALLPHYSICAL:
>> -		read_lock(&ia->ri_qplock);
>> -		rpcrdma_unmap_one(ia, seg);
>> -		read_unlock(&ia->ri_qplock);
>> -		break;
>> -
>> -	case RPCRDMA_FRMR:
>> -		rc = rpcrdma_deregister_frmr_external(seg, ia, r_xprt);
>> -		break;
>> -
>> -	case RPCRDMA_MTHCAFMR:
>> -		rc = rpcrdma_deregister_fmr_external(seg, ia);
>> -		break;
>> -
>> -	default:
>> -		break;
>> -	}
>> -	return nsegs;
>> -}
>> -
>> /*
>>  * Prepost any receive buffer, then post send.
>>  *
>> diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
>> index 7bf077b..3aabbb2 100644
>> --- a/net/sunrpc/xprtrdma/xprt_rdma.h
>> +++ b/net/sunrpc/xprtrdma/xprt_rdma.h
>> @@ -338,6 +338,8 @@ struct rpcrdma_xprt;
>> struct rpcrdma_memreg_ops {
>> 	int		(*ro_map)(struct rpcrdma_xprt *,
>> 				  struct rpcrdma_mr_seg *, int, bool);
>> +	void		(*ro_unmap)(struct rpcrdma_xprt *,
>> +				    struct rpcrdma_req *, unsigned int);
>> 	size_t		(*ro_maxpages)(struct rpcrdma_xprt *);
>> 	const char	*ro_displayname;
>> };
>> @@ -405,9 +407,6 @@ void rpcrdma_buffer_put(struct rpcrdma_req *);
>> void rpcrdma_recv_buffer_get(struct rpcrdma_req *);
>> void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);
>> 
>> -int rpcrdma_deregister_external(struct rpcrdma_mr_seg *,
>> -				struct rpcrdma_xprt *);
>> -
>> struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(struct rpcrdma_ia *,
>> 					    size_t, gfp_t);
>> void rpcrdma_free_regbuf(struct rpcrdma_ia *,
>> 
>> --
>> To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
>> the body of a message to majordomo@vger.kernel.org
>> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>> 
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
Chuck Lever
chuck[dot]lever[at]oracle[dot]com




  reply	other threads:[~2015-03-17 15:04 UTC|newest]

Thread overview: 60+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-03-13 21:21 [PATCH v1 00/16] NFS/RDMA patches proposed for 4.1 Chuck Lever
2015-03-13 21:21 ` [PATCH v1 01/16] xprtrdma: Display IPv6 addresses and port numbers correctly Chuck Lever
2015-03-13 21:21 ` [PATCH v1 02/16] xprtrdma: Perform a full marshal on retransmit Chuck Lever
2015-03-13 21:21 ` [PATCH v1 03/16] xprtrdma: Add vector of ops for each memory registration strategy Chuck Lever
2015-03-13 21:21 ` [PATCH v1 04/16] xprtrdma: Add a "max_payload" op for each memreg mode Chuck Lever
2015-03-13 21:22 ` [PATCH v1 05/16] xprtrdma: Add a "register_external" " Chuck Lever
2015-03-13 21:22 ` [PATCH v1 06/16] xprtrdma: Add a "deregister_external" " Chuck Lever
2015-03-17 14:37   ` Anna Schumaker
2015-03-17 15:04     ` Chuck Lever [this message]
2015-03-13 21:22 ` [PATCH v1 07/16] xprtrdma: Add "init MRs" memreg op Chuck Lever
2015-03-13 21:22 ` [PATCH v1 08/16] xprtrdma: Add "reset " Chuck Lever
2015-03-13 21:22 ` [PATCH v1 09/16] xprtrdma: Add "destroy " Chuck Lever
2015-03-13 21:22 ` [PATCH v1 10/16] xprtrdma: Add "open" " Chuck Lever
2015-03-17 15:16   ` Anna Schumaker
2015-03-17 15:19     ` Chuck Lever
2015-03-13 21:23 ` [PATCH v1 11/16] xprtrdma: Handle non-SEND completions via a callout Chuck Lever
2015-03-13 21:23 ` [PATCH v1 12/16] xprtrdma: Acquire FMRs in rpcrdma_fmr_register_external() Chuck Lever
2015-03-13 21:23 ` [PATCH v1 13/16] xprtrdma: Acquire MRs in rpcrdma_register_external() Chuck Lever
2015-03-13 21:23 ` [PATCH v1 14/16] xprtrdma: Remove rpcrdma_ia::ri_memreg_strategy Chuck Lever
2015-03-13 21:23 ` [PATCH v1 15/16] xprtrdma: Make rpcrdma_{un}map_one() into inline functions Chuck Lever
2015-03-13 21:23 ` [PATCH v1 16/16] xprtrdma: Split rb_lock Chuck Lever
     [not found] ` <20150313211124.22471.14517.stgit-FYjufvaPoItvLzlybtyyYzGyq/o6K9yX@public.gmane.org>
2015-05-05 15:44   ` [PATCH v1 00/16] NFS/RDMA patches proposed for 4.1 Christoph Hellwig
2015-05-05 15:44     ` Christoph Hellwig
     [not found]     ` <20150505154411.GA16729-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>
2015-05-05 16:04       ` Chuck Lever
2015-05-05 16:04         ` Chuck Lever
     [not found]         ` <5E1B32EA-9803-49AA-856D-BF0E1A5DFFF4-QHcLZuEGTsvQT0dZR+AlfA@public.gmane.org>
2015-05-05 17:25           ` Christoph Hellwig
2015-05-05 17:25             ` Christoph Hellwig
     [not found]             ` <20150505172540.GA19442-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>
2015-05-05 18:14               ` Tom Talpey
2015-05-05 18:14                 ` Tom Talpey
     [not found]                 ` <55490886.4070502-CLs1Zie5N5HQT0dZR+AlfA@public.gmane.org>
2015-05-05 19:10                   ` Christoph Hellwig
2015-05-05 19:10                     ` Christoph Hellwig
     [not found]                     ` <20150505191012.GA21164-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>
2015-05-05 20:57                       ` Tom Talpey
2015-05-05 20:57                         ` Tom Talpey
     [not found]                         ` <55492ED3.7000507-CLs1Zie5N5HQT0dZR+AlfA@public.gmane.org>
2015-05-05 21:06                           ` Christoph Hellwig
2015-05-05 21:06                             ` Christoph Hellwig
     [not found]                             ` <20150505210627.GA5941-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>
2015-05-05 21:32                               ` Tom Talpey
2015-05-05 21:32                                 ` Tom Talpey
     [not found]                                 ` <554936E5.80607-CLs1Zie5N5HQT0dZR+AlfA@public.gmane.org>
2015-05-05 22:38                                   ` Jason Gunthorpe
2015-05-05 22:38                                     ` Jason Gunthorpe
     [not found]                                     ` <20150505223855.GA7696-ePGOBjL8dl3ta4EC/59zMFaTQe2KTcn/@public.gmane.org>
2015-05-06  0:16                                       ` Tom Talpey
2015-05-06  0:16                                         ` Tom Talpey
     [not found]                                         ` <55495D41.5090502-CLs1Zie5N5HQT0dZR+AlfA@public.gmane.org>
2015-05-06 16:20                                           ` Jason Gunthorpe
2015-05-06 16:20                                             ` Jason Gunthorpe
2015-05-06  7:01                                       ` Bart Van Assche
2015-05-06  7:01                                         ` Bart Van Assche
     [not found]                                         ` <5549BC33.30905-XdAiOPVOjttBDgjK7y7TUQ@public.gmane.org>
2015-05-06 16:38                                           ` Jason Gunthorpe
2015-05-06 16:38                                             ` Jason Gunthorpe
2015-05-06  7:33                                   ` Christoph Hellwig
2015-05-06  7:33                                     ` Christoph Hellwig
2015-05-06  7:09                               ` Bart Van Assche
2015-05-06  7:09                                 ` Bart Van Assche
     [not found]                                 ` <5549BE30.8020505-XdAiOPVOjttBDgjK7y7TUQ@public.gmane.org>
2015-05-06  7:29                                   ` Christoph Hellwig
2015-05-06  7:29                                     ` Christoph Hellwig
2015-05-06 12:15               ` Sagi Grimberg
2015-05-06 12:15                 ` Sagi Grimberg
2015-03-13 21:26 Chuck Lever
     [not found] ` <20150313212517.22783.18364.stgit-FYjufvaPoItvLzlybtyyYzGyq/o6K9yX@public.gmane.org>
2015-03-13 21:27   ` [PATCH v1 06/16] xprtrdma: Add a "deregister_external" op for each memreg mode Chuck Lever
     [not found]     ` <20150313212728.22783.11821.stgit-FYjufvaPoItvLzlybtyyYzGyq/o6K9yX@public.gmane.org>
2015-03-16 10:34       ` Sagi Grimberg
     [not found]         ` <5506B19F.80105-LDSdmyG8hGV8YrgS2mwiifqBs+8SCbDb@public.gmane.org>
2015-03-16 16:57           ` Chuck Lever
2015-03-24 11:12       ` Devesh Sharma
     [not found]         ` <EE7902D3F51F404C82415C4803930ACD5DC3A9C4-DWYeeINJQrxExQ8dmkPuX0M9+F4ksjoh@public.gmane.org>
2015-03-24 14:54           ` Chuck Lever

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=B6B5E091-91F7-48AB-B8B4-4232D28BFD17@oracle.com \
    --to=chuck.lever@oracle.com \
    --cc=Anna.Schumaker@netapp.com \
    --cc=linux-nfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.