All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/2] RPCRDMA Fixes
@ 2011-02-09 19:45 ` Tom Tucker
  0 siblings, 0 replies; 6+ messages in thread
From: Tom Tucker @ 2011-02-09 19:45 UTC (permalink / raw)
  To: Trond.Myklebust-HgOvQuBEEgTQT0dZR+AlfA
  Cc: spelic-9AbUPqfR1/2XDw4h08c5KA, linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	linux-nfs-u79uwXL29TY76Z2rM5mHXA

This pair of patches fixes a problem with the marshalling of XDR into
RPCRDMA messages and an issue with FRMR mapping in the presence of transport
errors. The problems were discovered together as part of looking into the
ENOSPC problems seen by spelic-9AbUPqfR1/13js5NuHN3yg@public.gmane.org

The fixes, however, are independent and do not rely on each other. I have tested
them indepently and together on 64b with both Infiniband and iWARP. They have been
compile tested on 32b.

---

Tom Tucker (2):
      RPCRDMA: Fix FRMR registration/invalidate handling.
      RPCRDMA: Fix to XDR page base interpretation in marshalling logic.


 net/sunrpc/xprtrdma/rpc_rdma.c  |   86 +++++++++++++++++++--------------------
 net/sunrpc/xprtrdma/verbs.c     |   52 ++++++++++++++++++++----
 net/sunrpc/xprtrdma/xprt_rdma.h |    1 
 3 files changed, 87 insertions(+), 52 deletions(-)

-- 
Signed-off-by: Tom Tucker <tom-/Yg/VP3ZvrM@public.gmane.org>
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 0/2] RPCRDMA Fixes
@ 2011-02-09 19:45 ` Tom Tucker
  0 siblings, 0 replies; 6+ messages in thread
From: Tom Tucker @ 2011-02-09 19:45 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: spelic, linux-rdma, linux-nfs

This pair of patches fixes a problem with the marshalling of XDR into
RPCRDMA messages and an issue with FRMR mapping in the presence of transport
errors. The problems were discovered together as part of looking into the
ENOSPC problems seen by spelic@shiftmail.com.

The fixes, however, are independent and do not rely on each other. I have tested
them indepently and together on 64b with both Infiniband and iWARP. They have been
compile tested on 32b.

---

Tom Tucker (2):
      RPCRDMA: Fix FRMR registration/invalidate handling.
      RPCRDMA: Fix to XDR page base interpretation in marshalling logic.


 net/sunrpc/xprtrdma/rpc_rdma.c  |   86 +++++++++++++++++++--------------------
 net/sunrpc/xprtrdma/verbs.c     |   52 ++++++++++++++++++++----
 net/sunrpc/xprtrdma/xprt_rdma.h |    1 
 3 files changed, 87 insertions(+), 52 deletions(-)

-- 
Signed-off-by: Tom Tucker <tom@ogc.us>

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 1/2] RPCRDMA: Fix to XDR page base interpretation in marshalling logic.
  2011-02-09 19:45 ` Tom Tucker
@ 2011-02-09 19:45     ` Tom Tucker
  -1 siblings, 0 replies; 6+ messages in thread
From: Tom Tucker @ 2011-02-09 19:45 UTC (permalink / raw)
  To: Trond.Myklebust-HgOvQuBEEgTQT0dZR+AlfA
  Cc: spelic-9AbUPqfR1/2XDw4h08c5KA, linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	linux-nfs-u79uwXL29TY76Z2rM5mHXA

The RPCRDMA marshalling logic assumed that xdr->page_base was an
offset into the first page of xdr->page_list. It is in fact an
offset into the xdr->page_list itself, that is, it selects the
first page in the page_list and the offset into that page.

The symptom depended in part on the rpc_memreg_strategy, if it was
FRMR, or some other one-shot mapping mode, the connection would get
torn down on a base and bounds error. When the badly marshalled RPC
was retransmitted it would reconnect, get the error, and tear down the
connection again in a loop forever. This resulted in a hung-mount. For
the other modes, it would result in silent data corruption. This bug is
most easily reproduced by writing more data than the filesystem
has space for.

This fix corrects the page_base assumption and otherwise simplifies
the iov mapping logic.

Signed-off-by: Tom Tucker <tom-/Yg/VP3ZvrM@public.gmane.org>
---

 net/sunrpc/xprtrdma/rpc_rdma.c |   86 ++++++++++++++++++++--------------------
 1 files changed, 42 insertions(+), 44 deletions(-)

diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 2ac3f6e..554d081 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -87,6 +87,8 @@ rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
 	enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
 {
 	int len, n = 0, p;
+	int page_base;
+	struct page **ppages;
 
 	if (pos == 0 && xdrbuf->head[0].iov_len) {
 		seg[n].mr_page = NULL;
@@ -95,34 +97,32 @@ rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
 		++n;
 	}
 
-	if (xdrbuf->page_len && (xdrbuf->pages[0] != NULL)) {
-		if (n == nsegs)
-			return 0;
-		seg[n].mr_page = xdrbuf->pages[0];
-		seg[n].mr_offset = (void *)(unsigned long) xdrbuf->page_base;
-		seg[n].mr_len = min_t(u32,
-			PAGE_SIZE - xdrbuf->page_base, xdrbuf->page_len);
-		len = xdrbuf->page_len - seg[n].mr_len;
+	len = xdrbuf->page_len;
+	ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
+	page_base = xdrbuf->page_base & ~PAGE_MASK;
+	p = 0;
+	while (len && n < nsegs) {
+		seg[n].mr_page = ppages[p];
+		seg[n].mr_offset = (void *)(unsigned long) page_base;
+		seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
+		BUG_ON(seg[n].mr_len > PAGE_SIZE);
+		len -= seg[n].mr_len;
 		++n;
-		p = 1;
-		while (len > 0) {
-			if (n == nsegs)
-				return 0;
-			seg[n].mr_page = xdrbuf->pages[p];
-			seg[n].mr_offset = NULL;
-			seg[n].mr_len = min_t(u32, PAGE_SIZE, len);
-			len -= seg[n].mr_len;
-			++n;
-			++p;
-		}
+		++p;
+		page_base = 0;	/* page offset only applies to first page */
 	}
 
+	/* Message overflows the seg array */
+	if (len && n == nsegs)
+		return 0;
+
 	if (xdrbuf->tail[0].iov_len) {
 		/* the rpcrdma protocol allows us to omit any trailing
 		 * xdr pad bytes, saving the server an RDMA operation. */
 		if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
 			return n;
 		if (n == nsegs)
+			/* Tail remains, but we're out of segments */
 			return 0;
 		seg[n].mr_page = NULL;
 		seg[n].mr_offset = xdrbuf->tail[0].iov_base;
@@ -296,6 +296,8 @@ rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
 	int copy_len;
 	unsigned char *srcp, *destp;
 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
+	int page_base;
+	struct page **ppages;
 
 	destp = rqst->rq_svec[0].iov_base;
 	curlen = rqst->rq_svec[0].iov_len;
@@ -324,28 +326,25 @@ rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
 			__func__, destp + copy_len, curlen);
 		rqst->rq_svec[0].iov_len += curlen;
 	}
-
 	r_xprt->rx_stats.pullup_copy_count += copy_len;
-	npages = PAGE_ALIGN(rqst->rq_snd_buf.page_base+copy_len) >> PAGE_SHIFT;
+
+	page_base = rqst->rq_snd_buf.page_base;
+	ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT);
+	page_base &= ~PAGE_MASK;
+	npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT;
 	for (i = 0; copy_len && i < npages; i++) {
-		if (i == 0)
-			curlen = PAGE_SIZE - rqst->rq_snd_buf.page_base;
-		else
-			curlen = PAGE_SIZE;
+		curlen = PAGE_SIZE - page_base;
 		if (curlen > copy_len)
 			curlen = copy_len;
 		dprintk("RPC:       %s: page %d destp 0x%p len %d curlen %d\n",
 			__func__, i, destp, copy_len, curlen);
-		srcp = kmap_atomic(rqst->rq_snd_buf.pages[i],
-					KM_SKB_SUNRPC_DATA);
-		if (i == 0)
-			memcpy(destp, srcp+rqst->rq_snd_buf.page_base, curlen);
-		else
-			memcpy(destp, srcp, curlen);
+		srcp = kmap_atomic(ppages[i], KM_SKB_SUNRPC_DATA);
+		memcpy(destp, srcp+page_base, curlen);
 		kunmap_atomic(srcp, KM_SKB_SUNRPC_DATA);
 		rqst->rq_svec[0].iov_len += curlen;
 		destp += curlen;
 		copy_len -= curlen;
+		page_base = 0;
 	}
 	/* header now contains entire send message */
 	return pad;
@@ -606,6 +605,8 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
 {
 	int i, npages, curlen, olen;
 	char *destp;
+	struct page **ppages;
+	int page_base;
 
 	curlen = rqst->rq_rcv_buf.head[0].iov_len;
 	if (curlen > copy_len) {	/* write chunk header fixup */
@@ -624,32 +625,29 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
 	olen = copy_len;
 	i = 0;
 	rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
+	page_base = rqst->rq_rcv_buf.page_base;
+	ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
+	page_base &= ~PAGE_MASK;
+
 	if (copy_len && rqst->rq_rcv_buf.page_len) {
-		npages = PAGE_ALIGN(rqst->rq_rcv_buf.page_base +
+		npages = PAGE_ALIGN(page_base +
 			rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
 		for (; i < npages; i++) {
-			if (i == 0)
-				curlen = PAGE_SIZE - rqst->rq_rcv_buf.page_base;
-			else
-				curlen = PAGE_SIZE;
+			curlen = PAGE_SIZE - page_base;
 			if (curlen > copy_len)
 				curlen = copy_len;
 			dprintk("RPC:       %s: page %d"
 				" srcp 0x%p len %d curlen %d\n",
 				__func__, i, srcp, copy_len, curlen);
-			destp = kmap_atomic(rqst->rq_rcv_buf.pages[i],
-						KM_SKB_SUNRPC_DATA);
-			if (i == 0)
-				memcpy(destp + rqst->rq_rcv_buf.page_base,
-						srcp, curlen);
-			else
-				memcpy(destp, srcp, curlen);
-			flush_dcache_page(rqst->rq_rcv_buf.pages[i]);
+			destp = kmap_atomic(ppages[i], KM_SKB_SUNRPC_DATA);
+			memcpy(destp + page_base, srcp, curlen);
+			flush_dcache_page(ppages[i]);
 			kunmap_atomic(destp, KM_SKB_SUNRPC_DATA);
 			srcp += curlen;
 			copy_len -= curlen;
 			if (copy_len == 0)
 				break;
+			page_base = 0;
 		}
 		rqst->rq_rcv_buf.page_len = olen - copy_len;
 	} else

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 1/2] RPCRDMA: Fix to XDR page base interpretation in marshalling logic.
@ 2011-02-09 19:45     ` Tom Tucker
  0 siblings, 0 replies; 6+ messages in thread
From: Tom Tucker @ 2011-02-09 19:45 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: spelic, linux-rdma, linux-nfs

The RPCRDMA marshalling logic assumed that xdr->page_base was an
offset into the first page of xdr->page_list. It is in fact an
offset into the xdr->page_list itself, that is, it selects the
first page in the page_list and the offset into that page.

The symptom depended in part on the rpc_memreg_strategy, if it was
FRMR, or some other one-shot mapping mode, the connection would get
torn down on a base and bounds error. When the badly marshalled RPC
was retransmitted it would reconnect, get the error, and tear down the
connection again in a loop forever. This resulted in a hung-mount. For
the other modes, it would result in silent data corruption. This bug is
most easily reproduced by writing more data than the filesystem
has space for.

This fix corrects the page_base assumption and otherwise simplifies
the iov mapping logic.

Signed-off-by: Tom Tucker <tom@ogc.us>
---

 net/sunrpc/xprtrdma/rpc_rdma.c |   86 ++++++++++++++++++++--------------------
 1 files changed, 42 insertions(+), 44 deletions(-)

diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 2ac3f6e..554d081 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -87,6 +87,8 @@ rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
 	enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
 {
 	int len, n = 0, p;
+	int page_base;
+	struct page **ppages;
 
 	if (pos == 0 && xdrbuf->head[0].iov_len) {
 		seg[n].mr_page = NULL;
@@ -95,34 +97,32 @@ rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
 		++n;
 	}
 
-	if (xdrbuf->page_len && (xdrbuf->pages[0] != NULL)) {
-		if (n == nsegs)
-			return 0;
-		seg[n].mr_page = xdrbuf->pages[0];
-		seg[n].mr_offset = (void *)(unsigned long) xdrbuf->page_base;
-		seg[n].mr_len = min_t(u32,
-			PAGE_SIZE - xdrbuf->page_base, xdrbuf->page_len);
-		len = xdrbuf->page_len - seg[n].mr_len;
+	len = xdrbuf->page_len;
+	ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
+	page_base = xdrbuf->page_base & ~PAGE_MASK;
+	p = 0;
+	while (len && n < nsegs) {
+		seg[n].mr_page = ppages[p];
+		seg[n].mr_offset = (void *)(unsigned long) page_base;
+		seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
+		BUG_ON(seg[n].mr_len > PAGE_SIZE);
+		len -= seg[n].mr_len;
 		++n;
-		p = 1;
-		while (len > 0) {
-			if (n == nsegs)
-				return 0;
-			seg[n].mr_page = xdrbuf->pages[p];
-			seg[n].mr_offset = NULL;
-			seg[n].mr_len = min_t(u32, PAGE_SIZE, len);
-			len -= seg[n].mr_len;
-			++n;
-			++p;
-		}
+		++p;
+		page_base = 0;	/* page offset only applies to first page */
 	}
 
+	/* Message overflows the seg array */
+	if (len && n == nsegs)
+		return 0;
+
 	if (xdrbuf->tail[0].iov_len) {
 		/* the rpcrdma protocol allows us to omit any trailing
 		 * xdr pad bytes, saving the server an RDMA operation. */
 		if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
 			return n;
 		if (n == nsegs)
+			/* Tail remains, but we're out of segments */
 			return 0;
 		seg[n].mr_page = NULL;
 		seg[n].mr_offset = xdrbuf->tail[0].iov_base;
@@ -296,6 +296,8 @@ rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
 	int copy_len;
 	unsigned char *srcp, *destp;
 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
+	int page_base;
+	struct page **ppages;
 
 	destp = rqst->rq_svec[0].iov_base;
 	curlen = rqst->rq_svec[0].iov_len;
@@ -324,28 +326,25 @@ rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
 			__func__, destp + copy_len, curlen);
 		rqst->rq_svec[0].iov_len += curlen;
 	}
-
 	r_xprt->rx_stats.pullup_copy_count += copy_len;
-	npages = PAGE_ALIGN(rqst->rq_snd_buf.page_base+copy_len) >> PAGE_SHIFT;
+
+	page_base = rqst->rq_snd_buf.page_base;
+	ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT);
+	page_base &= ~PAGE_MASK;
+	npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT;
 	for (i = 0; copy_len && i < npages; i++) {
-		if (i == 0)
-			curlen = PAGE_SIZE - rqst->rq_snd_buf.page_base;
-		else
-			curlen = PAGE_SIZE;
+		curlen = PAGE_SIZE - page_base;
 		if (curlen > copy_len)
 			curlen = copy_len;
 		dprintk("RPC:       %s: page %d destp 0x%p len %d curlen %d\n",
 			__func__, i, destp, copy_len, curlen);
-		srcp = kmap_atomic(rqst->rq_snd_buf.pages[i],
-					KM_SKB_SUNRPC_DATA);
-		if (i == 0)
-			memcpy(destp, srcp+rqst->rq_snd_buf.page_base, curlen);
-		else
-			memcpy(destp, srcp, curlen);
+		srcp = kmap_atomic(ppages[i], KM_SKB_SUNRPC_DATA);
+		memcpy(destp, srcp+page_base, curlen);
 		kunmap_atomic(srcp, KM_SKB_SUNRPC_DATA);
 		rqst->rq_svec[0].iov_len += curlen;
 		destp += curlen;
 		copy_len -= curlen;
+		page_base = 0;
 	}
 	/* header now contains entire send message */
 	return pad;
@@ -606,6 +605,8 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
 {
 	int i, npages, curlen, olen;
 	char *destp;
+	struct page **ppages;
+	int page_base;
 
 	curlen = rqst->rq_rcv_buf.head[0].iov_len;
 	if (curlen > copy_len) {	/* write chunk header fixup */
@@ -624,32 +625,29 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
 	olen = copy_len;
 	i = 0;
 	rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
+	page_base = rqst->rq_rcv_buf.page_base;
+	ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
+	page_base &= ~PAGE_MASK;
+
 	if (copy_len && rqst->rq_rcv_buf.page_len) {
-		npages = PAGE_ALIGN(rqst->rq_rcv_buf.page_base +
+		npages = PAGE_ALIGN(page_base +
 			rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
 		for (; i < npages; i++) {
-			if (i == 0)
-				curlen = PAGE_SIZE - rqst->rq_rcv_buf.page_base;
-			else
-				curlen = PAGE_SIZE;
+			curlen = PAGE_SIZE - page_base;
 			if (curlen > copy_len)
 				curlen = copy_len;
 			dprintk("RPC:       %s: page %d"
 				" srcp 0x%p len %d curlen %d\n",
 				__func__, i, srcp, copy_len, curlen);
-			destp = kmap_atomic(rqst->rq_rcv_buf.pages[i],
-						KM_SKB_SUNRPC_DATA);
-			if (i == 0)
-				memcpy(destp + rqst->rq_rcv_buf.page_base,
-						srcp, curlen);
-			else
-				memcpy(destp, srcp, curlen);
-			flush_dcache_page(rqst->rq_rcv_buf.pages[i]);
+			destp = kmap_atomic(ppages[i], KM_SKB_SUNRPC_DATA);
+			memcpy(destp + page_base, srcp, curlen);
+			flush_dcache_page(ppages[i]);
 			kunmap_atomic(destp, KM_SKB_SUNRPC_DATA);
 			srcp += curlen;
 			copy_len -= curlen;
 			if (copy_len == 0)
 				break;
+			page_base = 0;
 		}
 		rqst->rq_rcv_buf.page_len = olen - copy_len;
 	} else


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/2] RPCRDMA: Fix FRMR registration/invalidate handling.
  2011-02-09 19:45 ` Tom Tucker
@ 2011-02-09 19:45     ` Tom Tucker
  -1 siblings, 0 replies; 6+ messages in thread
From: Tom Tucker @ 2011-02-09 19:45 UTC (permalink / raw)
  To: Trond.Myklebust-HgOvQuBEEgTQT0dZR+AlfA
  Cc: spelic-9AbUPqfR1/2XDw4h08c5KA, linux-rdma-u79uwXL29TY76Z2rM5mHXA,
	linux-nfs-u79uwXL29TY76Z2rM5mHXA

When the rpc_memreg_strategy is 5, FRMR are used to map RPC data.
This mode uses an FRMR to map the RPC data, then invalidates
(i.e. unregisers) the data in xprt_rdma_free. These FRMR are used
across connections on the same mount, i.e. if the connection goes
away on an idle timeout and reconnects later, the FRMR are not
destroyed and recreated.

This creates a problem for transport errors because the WR that
invalidate an FRMR may be flushed (i.e. fail) leaving the
FRMR valid. When the FRMR is later used to map an RPC it will fail,
tearing down the transport and starting over. Over time, more and
more of the FRMR pool end up in the wrong state resulting in
seemingly random disconnects.

This fix keeps track of the FRMR state explicitly by setting it's
state based on the successful completion of a reg/inv WR. If the FRMR
is ever used and found to be in the wrong state, an invalidate WR
is prepended, re-syncing the FRMR state and avoiding the connection loss.

Signed-off-by: Tom Tucker <tom-/Yg/VP3ZvrM@public.gmane.org>
---

 net/sunrpc/xprtrdma/verbs.c     |   52 +++++++++++++++++++++++++++++++++------
 net/sunrpc/xprtrdma/xprt_rdma.h |    1 +
 2 files changed, 45 insertions(+), 8 deletions(-)

diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 5f4c7b3..570f08d 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -144,6 +144,7 @@ rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context)
 static inline
 void rpcrdma_event_process(struct ib_wc *wc)
 {
+	struct rpcrdma_mw *frmr;
 	struct rpcrdma_rep *rep =
 			(struct rpcrdma_rep *)(unsigned long) wc->wr_id;
 
@@ -154,15 +155,23 @@ void rpcrdma_event_process(struct ib_wc *wc)
 		return;
 
 	if (IB_WC_SUCCESS != wc->status) {
-		dprintk("RPC:       %s: %s WC status %X, connection lost\n",
-			__func__, (wc->opcode & IB_WC_RECV) ? "recv" : "send",
-			 wc->status);
+		dprintk("RPC:       %s: WC opcode %d status %X, connection lost\n",
+			__func__, wc->opcode, wc->status);
 		rep->rr_len = ~0U;
-		rpcrdma_schedule_tasklet(rep);
+		if (wc->opcode != IB_WC_FAST_REG_MR && wc->opcode != IB_WC_LOCAL_INV)
+			rpcrdma_schedule_tasklet(rep);
 		return;
 	}
 
 	switch (wc->opcode) {
+	case IB_WC_FAST_REG_MR:
+		frmr = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
+		frmr->r.frmr.state = FRMR_IS_VALID;
+		break;
+	case IB_WC_LOCAL_INV:
+		frmr = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
+		frmr->r.frmr.state = FRMR_IS_INVALID;
+		break;
 	case IB_WC_RECV:
 		rep->rr_len = wc->byte_len;
 		ib_dma_sync_single_for_cpu(
@@ -1450,6 +1459,11 @@ rpcrdma_map_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg, int writing)
 		seg->mr_dma = ib_dma_map_single(ia->ri_id->device,
 				seg->mr_offset,
 				seg->mr_dmalen, seg->mr_dir);
+	if (ib_dma_mapping_error(ia->ri_id->device, seg->mr_dma)) {
+		dprintk("RPC:       %s: mr_dma %llx mr_offset %p mr_dma_len %zu\n",
+			__func__,
+			seg->mr_dma, seg->mr_offset, seg->mr_dmalen);
+	}
 }
 
 static void
@@ -1469,7 +1483,8 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
 			struct rpcrdma_xprt *r_xprt)
 {
 	struct rpcrdma_mr_seg *seg1 = seg;
-	struct ib_send_wr frmr_wr, *bad_wr;
+	struct ib_send_wr invalidate_wr, frmr_wr, *bad_wr, *post_wr;
+
 	u8 key;
 	int len, pageoff;
 	int i, rc;
@@ -1484,6 +1499,7 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
 		rpcrdma_map_one(ia, seg, writing);
 		seg1->mr_chunk.rl_mw->r.frmr.fr_pgl->page_list[i] = seg->mr_dma;
 		len += seg->mr_len;
+		BUG_ON(seg->mr_len > PAGE_SIZE);
 		++seg;
 		++i;
 		/* Check for holes */
@@ -1494,26 +1510,45 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
 	dprintk("RPC:       %s: Using frmr %p to map %d segments\n",
 		__func__, seg1->mr_chunk.rl_mw, i);
 
+	if (unlikely(seg1->mr_chunk.rl_mw->r.frmr.state == FRMR_IS_VALID)) {
+		dprintk("RPC:       %s: frmr %x left valid, posting invalidate.\n",
+			__func__,
+			seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey);
+		/* Invalidate before using. */
+		memset(&invalidate_wr, 0, sizeof invalidate_wr);
+		invalidate_wr.wr_id = (unsigned long)(void *)seg1->mr_chunk.rl_mw;
+		invalidate_wr.next = &frmr_wr;
+		invalidate_wr.opcode = IB_WR_LOCAL_INV;
+		invalidate_wr.send_flags = IB_SEND_SIGNALED;
+		invalidate_wr.ex.invalidate_rkey =
+			seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey;
+		DECR_CQCOUNT(&r_xprt->rx_ep);
+		post_wr = &invalidate_wr;
+	} else
+		post_wr = &frmr_wr;
+
 	/* Bump the key */
 	key = (u8)(seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey & 0x000000FF);
 	ib_update_fast_reg_key(seg1->mr_chunk.rl_mw->r.frmr.fr_mr, ++key);
 
 	/* Prepare FRMR WR */
 	memset(&frmr_wr, 0, sizeof frmr_wr);
+	frmr_wr.wr_id = (unsigned long)(void *)seg1->mr_chunk.rl_mw;
 	frmr_wr.opcode = IB_WR_FAST_REG_MR;
-	frmr_wr.send_flags = 0;			/* unsignaled */
+	frmr_wr.send_flags = IB_SEND_SIGNALED;
 	frmr_wr.wr.fast_reg.iova_start = seg1->mr_dma;
 	frmr_wr.wr.fast_reg.page_list = seg1->mr_chunk.rl_mw->r.frmr.fr_pgl;
 	frmr_wr.wr.fast_reg.page_list_len = i;
 	frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
 	frmr_wr.wr.fast_reg.length = i << PAGE_SHIFT;
+	BUG_ON(frmr_wr.wr.fast_reg.length < len);
 	frmr_wr.wr.fast_reg.access_flags = (writing ?
 				IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
 				IB_ACCESS_REMOTE_READ);
 	frmr_wr.wr.fast_reg.rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey;
 	DECR_CQCOUNT(&r_xprt->rx_ep);
 
-	rc = ib_post_send(ia->ri_id->qp, &frmr_wr, &bad_wr);
+	rc = ib_post_send(ia->ri_id->qp, post_wr, &bad_wr);
 
 	if (rc) {
 		dprintk("RPC:       %s: failed ib_post_send for register,"
@@ -1542,8 +1577,9 @@ rpcrdma_deregister_frmr_external(struct rpcrdma_mr_seg *seg,
 		rpcrdma_unmap_one(ia, seg++);
 
 	memset(&invalidate_wr, 0, sizeof invalidate_wr);
+	invalidate_wr.wr_id = (unsigned long)(void *)seg1->mr_chunk.rl_mw;
 	invalidate_wr.opcode = IB_WR_LOCAL_INV;
-	invalidate_wr.send_flags = 0;			/* unsignaled */
+	invalidate_wr.send_flags = IB_SEND_SIGNALED;
 	invalidate_wr.ex.invalidate_rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey;
 	DECR_CQCOUNT(&r_xprt->rx_ep);
 
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index c7a7eba..cae761a 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -164,6 +164,7 @@ struct rpcrdma_mr_seg {		/* chunk descriptors */
 				struct {
 					struct ib_fast_reg_page_list *fr_pgl;
 					struct ib_mr *fr_mr;
+					enum { FRMR_IS_INVALID, FRMR_IS_VALID  } state;
 				} frmr;
 			} r;
 			struct list_head mw_list;

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/2] RPCRDMA: Fix FRMR registration/invalidate handling.
@ 2011-02-09 19:45     ` Tom Tucker
  0 siblings, 0 replies; 6+ messages in thread
From: Tom Tucker @ 2011-02-09 19:45 UTC (permalink / raw)
  To: Trond.Myklebust; +Cc: spelic, linux-rdma, linux-nfs

When the rpc_memreg_strategy is 5, FRMR are used to map RPC data.
This mode uses an FRMR to map the RPC data, then invalidates
(i.e. unregisers) the data in xprt_rdma_free. These FRMR are used
across connections on the same mount, i.e. if the connection goes
away on an idle timeout and reconnects later, the FRMR are not
destroyed and recreated.

This creates a problem for transport errors because the WR that
invalidate an FRMR may be flushed (i.e. fail) leaving the
FRMR valid. When the FRMR is later used to map an RPC it will fail,
tearing down the transport and starting over. Over time, more and
more of the FRMR pool end up in the wrong state resulting in
seemingly random disconnects.

This fix keeps track of the FRMR state explicitly by setting it's
state based on the successful completion of a reg/inv WR. If the FRMR
is ever used and found to be in the wrong state, an invalidate WR
is prepended, re-syncing the FRMR state and avoiding the connection loss.

Signed-off-by: Tom Tucker <tom@ogc.us>
---

 net/sunrpc/xprtrdma/verbs.c     |   52 +++++++++++++++++++++++++++++++++------
 net/sunrpc/xprtrdma/xprt_rdma.h |    1 +
 2 files changed, 45 insertions(+), 8 deletions(-)

diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 5f4c7b3..570f08d 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -144,6 +144,7 @@ rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context)
 static inline
 void rpcrdma_event_process(struct ib_wc *wc)
 {
+	struct rpcrdma_mw *frmr;
 	struct rpcrdma_rep *rep =
 			(struct rpcrdma_rep *)(unsigned long) wc->wr_id;
 
@@ -154,15 +155,23 @@ void rpcrdma_event_process(struct ib_wc *wc)
 		return;
 
 	if (IB_WC_SUCCESS != wc->status) {
-		dprintk("RPC:       %s: %s WC status %X, connection lost\n",
-			__func__, (wc->opcode & IB_WC_RECV) ? "recv" : "send",
-			 wc->status);
+		dprintk("RPC:       %s: WC opcode %d status %X, connection lost\n",
+			__func__, wc->opcode, wc->status);
 		rep->rr_len = ~0U;
-		rpcrdma_schedule_tasklet(rep);
+		if (wc->opcode != IB_WC_FAST_REG_MR && wc->opcode != IB_WC_LOCAL_INV)
+			rpcrdma_schedule_tasklet(rep);
 		return;
 	}
 
 	switch (wc->opcode) {
+	case IB_WC_FAST_REG_MR:
+		frmr = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
+		frmr->r.frmr.state = FRMR_IS_VALID;
+		break;
+	case IB_WC_LOCAL_INV:
+		frmr = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
+		frmr->r.frmr.state = FRMR_IS_INVALID;
+		break;
 	case IB_WC_RECV:
 		rep->rr_len = wc->byte_len;
 		ib_dma_sync_single_for_cpu(
@@ -1450,6 +1459,11 @@ rpcrdma_map_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg, int writing)
 		seg->mr_dma = ib_dma_map_single(ia->ri_id->device,
 				seg->mr_offset,
 				seg->mr_dmalen, seg->mr_dir);
+	if (ib_dma_mapping_error(ia->ri_id->device, seg->mr_dma)) {
+		dprintk("RPC:       %s: mr_dma %llx mr_offset %p mr_dma_len %zu\n",
+			__func__,
+			seg->mr_dma, seg->mr_offset, seg->mr_dmalen);
+	}
 }
 
 static void
@@ -1469,7 +1483,8 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
 			struct rpcrdma_xprt *r_xprt)
 {
 	struct rpcrdma_mr_seg *seg1 = seg;
-	struct ib_send_wr frmr_wr, *bad_wr;
+	struct ib_send_wr invalidate_wr, frmr_wr, *bad_wr, *post_wr;
+
 	u8 key;
 	int len, pageoff;
 	int i, rc;
@@ -1484,6 +1499,7 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
 		rpcrdma_map_one(ia, seg, writing);
 		seg1->mr_chunk.rl_mw->r.frmr.fr_pgl->page_list[i] = seg->mr_dma;
 		len += seg->mr_len;
+		BUG_ON(seg->mr_len > PAGE_SIZE);
 		++seg;
 		++i;
 		/* Check for holes */
@@ -1494,26 +1510,45 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
 	dprintk("RPC:       %s: Using frmr %p to map %d segments\n",
 		__func__, seg1->mr_chunk.rl_mw, i);
 
+	if (unlikely(seg1->mr_chunk.rl_mw->r.frmr.state == FRMR_IS_VALID)) {
+		dprintk("RPC:       %s: frmr %x left valid, posting invalidate.\n",
+			__func__,
+			seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey);
+		/* Invalidate before using. */
+		memset(&invalidate_wr, 0, sizeof invalidate_wr);
+		invalidate_wr.wr_id = (unsigned long)(void *)seg1->mr_chunk.rl_mw;
+		invalidate_wr.next = &frmr_wr;
+		invalidate_wr.opcode = IB_WR_LOCAL_INV;
+		invalidate_wr.send_flags = IB_SEND_SIGNALED;
+		invalidate_wr.ex.invalidate_rkey =
+			seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey;
+		DECR_CQCOUNT(&r_xprt->rx_ep);
+		post_wr = &invalidate_wr;
+	} else
+		post_wr = &frmr_wr;
+
 	/* Bump the key */
 	key = (u8)(seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey & 0x000000FF);
 	ib_update_fast_reg_key(seg1->mr_chunk.rl_mw->r.frmr.fr_mr, ++key);
 
 	/* Prepare FRMR WR */
 	memset(&frmr_wr, 0, sizeof frmr_wr);
+	frmr_wr.wr_id = (unsigned long)(void *)seg1->mr_chunk.rl_mw;
 	frmr_wr.opcode = IB_WR_FAST_REG_MR;
-	frmr_wr.send_flags = 0;			/* unsignaled */
+	frmr_wr.send_flags = IB_SEND_SIGNALED;
 	frmr_wr.wr.fast_reg.iova_start = seg1->mr_dma;
 	frmr_wr.wr.fast_reg.page_list = seg1->mr_chunk.rl_mw->r.frmr.fr_pgl;
 	frmr_wr.wr.fast_reg.page_list_len = i;
 	frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
 	frmr_wr.wr.fast_reg.length = i << PAGE_SHIFT;
+	BUG_ON(frmr_wr.wr.fast_reg.length < len);
 	frmr_wr.wr.fast_reg.access_flags = (writing ?
 				IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
 				IB_ACCESS_REMOTE_READ);
 	frmr_wr.wr.fast_reg.rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey;
 	DECR_CQCOUNT(&r_xprt->rx_ep);
 
-	rc = ib_post_send(ia->ri_id->qp, &frmr_wr, &bad_wr);
+	rc = ib_post_send(ia->ri_id->qp, post_wr, &bad_wr);
 
 	if (rc) {
 		dprintk("RPC:       %s: failed ib_post_send for register,"
@@ -1542,8 +1577,9 @@ rpcrdma_deregister_frmr_external(struct rpcrdma_mr_seg *seg,
 		rpcrdma_unmap_one(ia, seg++);
 
 	memset(&invalidate_wr, 0, sizeof invalidate_wr);
+	invalidate_wr.wr_id = (unsigned long)(void *)seg1->mr_chunk.rl_mw;
 	invalidate_wr.opcode = IB_WR_LOCAL_INV;
-	invalidate_wr.send_flags = 0;			/* unsignaled */
+	invalidate_wr.send_flags = IB_SEND_SIGNALED;
 	invalidate_wr.ex.invalidate_rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey;
 	DECR_CQCOUNT(&r_xprt->rx_ep);
 
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index c7a7eba..cae761a 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -164,6 +164,7 @@ struct rpcrdma_mr_seg {		/* chunk descriptors */
 				struct {
 					struct ib_fast_reg_page_list *fr_pgl;
 					struct ib_mr *fr_mr;
+					enum { FRMR_IS_INVALID, FRMR_IS_VALID  } state;
 				} frmr;
 			} r;
 			struct list_head mw_list;


^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2011-02-09 19:45 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-02-09 19:45 [PATCH 0/2] RPCRDMA Fixes Tom Tucker
2011-02-09 19:45 ` Tom Tucker
     [not found] ` <20110209194430.22358.46254.stgit-T4OLL4TyM9aNDNWfRnPdfg@public.gmane.org>
2011-02-09 19:45   ` [PATCH 1/2] RPCRDMA: Fix to XDR page base interpretation in marshalling logic Tom Tucker
2011-02-09 19:45     ` Tom Tucker
2011-02-09 19:45   ` [PATCH 2/2] RPCRDMA: Fix FRMR registration/invalidate handling Tom Tucker
2011-02-09 19:45     ` Tom Tucker

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.