linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Bob Pearson <rpearsonhpe@gmail.com>
To: linux-rdma@vger.kernel.org, zyjzyj2000@gmail.com
Cc: Bob Pearson <rpearson@hpe.com>
Subject: [PATCH v3 12/17] rdma_rxe: Added bind mw API stub
Date: Thu, 20 Aug 2020 17:46:33 -0500	[thread overview]
Message-ID: <20200820224638.3212-13-rpearson@hpe.com> (raw)
In-Reply-To: <20200820224638.3212-1-rpearson@hpe.com>

In rxe_opcode.c
Added bind MW WR opcode
Changed RXE_REG_MASK to RXE_LOCAL_MASK since it refers to
local wqe commands generally.

In rxe_req.c
Added a local bind MW operation
Changes the error returns to each have a separate status.
Fixed a bug which caused rxe_comp to not report bind errors in WCs.
Noted a couple of more unrelated bugs for later fix up.

In rxe_mw.c
Added a stub for bind_mw

Signed-off-by: Bob Pearson <rpearson@hpe.com>
---
 drivers/infiniband/sw/rxe/rxe_comp.c   |  1 +
 drivers/infiniband/sw/rxe/rxe_loc.h    |  1 +
 drivers/infiniband/sw/rxe/rxe_mw.c     |  6 ++
 drivers/infiniband/sw/rxe/rxe_opcode.c | 11 ++-
 drivers/infiniband/sw/rxe/rxe_opcode.h |  1 -
 drivers/infiniband/sw/rxe/rxe_req.c    | 92 +++++++++++++++++++-------
 drivers/infiniband/sw/rxe/rxe_verbs.c  |  2 +-
 7 files changed, 85 insertions(+), 29 deletions(-)

diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 54fc55487bc0..c0fd1bad8c55 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -105,6 +105,7 @@ static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
 	case IB_WR_RDMA_READ_WITH_INV:		return IB_WC_RDMA_READ;
 	case IB_WR_LOCAL_INV:			return IB_WC_LOCAL_INV;
 	case IB_WR_REG_MR:			return IB_WC_REG_MR;
+	case IB_WR_BIND_MW:			return IB_WC_BIND_MW;
 
 	default:
 		return 0xff;
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index cbed269edfe7..18ae0eb11fa8 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -115,6 +115,7 @@ int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
 struct ib_mw *rxe_alloc_mw(struct ib_pd *ibpd, enum ib_mw_type type,
 			   struct ib_udata *udata);
 int rxe_dealloc_mw(struct ib_mw *ibmw);
+int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
 
 /* rxe_net.c */
 void rxe_loopback(struct sk_buff *skb);
diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
index ea8510044fbe..b461aed98c0c 100644
--- a/drivers/infiniband/sw/rxe/rxe_mw.c
+++ b/drivers/infiniband/sw/rxe/rxe_mw.c
@@ -67,3 +67,9 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
 
 	return 0;
 }
+
+int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+	pr_err_once("%s: not implemented\n", __func__);
+	return -EINVAL;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c
index ddfc08c14893..0a34075ef25a 100644
--- a/drivers/infiniband/sw/rxe/rxe_opcode.c
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.c
@@ -89,13 +89,20 @@ struct rxe_wr_opcode_info rxe_wr_opcode_info[] = {
 	[IB_WR_LOCAL_INV]				= {
 		.name	= "IB_WR_LOCAL_INV",
 		.mask	= {
-			[IB_QPT_RC]	= WR_REG_MASK,
+			[IB_QPT_RC]	= WR_LOCAL_MASK,
 		},
 	},
 	[IB_WR_REG_MR]					= {
 		.name	= "IB_WR_REG_MR",
 		.mask	= {
-			[IB_QPT_RC]	= WR_REG_MASK,
+			[IB_QPT_RC]	= WR_LOCAL_MASK,
+		},
+	},
+	[IB_WR_BIND_MW]					= {
+		.name	= "IB_WR_BIND_MW",
+		.mask	= {
+			[IB_QPT_RC]	= WR_LOCAL_MASK,
+			[IB_QPT_UC]	= WR_LOCAL_MASK,
 		},
 	},
 };
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.h b/drivers/infiniband/sw/rxe/rxe_opcode.h
index 59e8b3875826..4775453409d9 100644
--- a/drivers/infiniband/sw/rxe/rxe_opcode.h
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.h
@@ -22,7 +22,6 @@ enum rxe_wr_mask {
 	WR_READ_MASK			= BIT(3),
 	WR_WRITE_MASK			= BIT(4),
 	WR_LOCAL_MASK			= BIT(5),
-	WR_REG_MASK			= BIT(6),
 
 	WR_READ_OR_WRITE_MASK		= WR_READ_MASK | WR_WRITE_MASK,
 	WR_READ_WRITE_OR_SEND_MASK	= WR_READ_OR_WRITE_MASK | WR_SEND_MASK,
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index ffc8f65b2ad7..46550a6fd6f8 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -561,6 +561,8 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
 int rxe_requester(void *arg)
 {
 	struct rxe_qp *qp = (struct rxe_qp *)arg;
+	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+	struct rxe_mr *mr;
 	struct rxe_pkt_info pkt;
 	struct sk_buff *skb;
 	struct rxe_send_wqe *wqe;
@@ -596,42 +598,55 @@ int rxe_requester(void *arg)
 	if (unlikely(!wqe))
 		goto exit;
 
-	if (wqe->mask & WR_REG_MASK) {
-		if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
-			struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
-			struct rxe_mr *rmr;
-
-			rmr = rxe_pool_get_index(&rxe->mr_pool,
-						 wqe->wr.ex.invalidate_rkey >> 8);
-			if (!rmr) {
+	if (wqe->mask & WR_LOCAL_MASK) {
+		switch (wqe->wr.opcode) {
+		case IB_WR_LOCAL_INV:
+			mr = rxe_pool_get_index(&rxe->mr_pool,
+					wqe->wr.ex.invalidate_rkey >> 8);
+			if (!mr) {
 				pr_err("No mr for key %#x\n",
 				       wqe->wr.ex.invalidate_rkey);
 				wqe->state = wqe_state_error;
 				wqe->status = IB_WC_MW_BIND_ERR;
+				/* TODO this should be goto err */
 				goto exit;
 			}
-			rmr->state = RXE_MEM_STATE_FREE;
-			rxe_drop_ref(rmr);
+			mr->state = RXE_MEM_STATE_FREE;
+			rxe_drop_ref(mr);
 			wqe->state = wqe_state_done;
 			wqe->status = IB_WC_SUCCESS;
-		} else if (wqe->wr.opcode == IB_WR_REG_MR) {
-			struct rxe_mr *rmr = to_rmr(wqe->wr.wr.reg.mr);
-
-			rmr->state = RXE_MEM_STATE_VALID;
-			rmr->access = wqe->wr.wr.reg.access;
-			rmr->lkey = wqe->wr.wr.reg.key;
-			rmr->rkey = wqe->wr.wr.reg.key;
-			rmr->iova = wqe->wr.wr.reg.mr->iova;
+			break;
+		case IB_WR_REG_MR:
+			mr = to_rmr(wqe->wr.wr.reg.mr);
+			mr->state = RXE_MEM_STATE_VALID;
+			mr->access = wqe->wr.wr.reg.access;
+			mr->lkey = wqe->wr.wr.reg.key;
+			mr->rkey = wqe->wr.wr.reg.key;
+			mr->iova = wqe->wr.wr.reg.mr->iova;
 			wqe->state = wqe_state_done;
 			wqe->status = IB_WC_SUCCESS;
-		} else {
+			break;
+		case IB_WR_BIND_MW:
+			ret = rxe_bind_mw(qp, wqe);
+			if (ret) {
+				wqe->state = wqe_state_done;
+				wqe->status = IB_WC_MW_BIND_ERR;
+				goto err;
+			}
+			wqe->state = wqe_state_done;
+			wqe->status = IB_WC_SUCCESS;
+			break;
+		default:
+			pr_err_once("unexpected LOCAL WR opcode = %d\n",
+					wqe->wr.opcode);
 			goto exit;
 		}
+		qp->req.wqe_index = next_index(qp->sq.queue,
+						qp->req.wqe_index);
+
 		if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
 		    qp->sq_sig_type == IB_SIGNAL_ALL_WR)
 			rxe_run_task(&qp->comp.task, 1);
-		qp->req.wqe_index = next_index(qp->sq.queue,
-						qp->req.wqe_index);
 		goto next_wqe;
 	}
 
@@ -651,6 +666,7 @@ int rxe_requester(void *arg)
 	opcode = next_opcode(qp, wqe, wqe->wr.opcode);
 	if (unlikely(opcode < 0)) {
 		wqe->status = IB_WC_LOC_QP_OP_ERR;
+		/* TODO this should be goto err */
 		goto exit;
 	}
 
@@ -680,8 +696,7 @@ int rxe_requester(void *arg)
 			wqe->state = wqe_state_done;
 			wqe->status = IB_WC_SUCCESS;
 			__rxe_do_task(&qp->comp.task);
-			rxe_drop_ref(qp);
-			return 0;
+			goto again;
 		}
 		payload = mtu;
 	}
@@ -689,12 +704,14 @@ int rxe_requester(void *arg)
 	skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
 	if (unlikely(!skb)) {
 		pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
+		wqe->status = IB_WC_LOC_PROT_ERR;
 		goto err;
 	}
 
 	if (fill_packet(qp, wqe, &pkt, skb, payload)) {
 		pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
 		kfree_skb(skb);
+		wqe->status = IB_WC_LOC_PROT_ERR;
 		goto err;
 	}
 
@@ -718,6 +735,7 @@ int rxe_requester(void *arg)
 			goto exit;
 		}
 
+		wqe->status = IB_WC_LOC_PROT_ERR;
 		goto err;
 	}
 
@@ -726,11 +744,35 @@ int rxe_requester(void *arg)
 	goto next_wqe;
 
 err:
-	wqe->status = IB_WC_LOC_PROT_ERR;
+	/* we come here if an error occurred while processing
+	 * a send wqe. The completer will put the qp in error
+	 * state and no more wqes will be processed unless
+	 * the qp is cleaned up and restarted. We do not want
+	 * to be called again
+	 */
 	wqe->state = wqe_state_error;
 	__rxe_do_task(&qp->comp.task);
+	ret = -EAGAIN;
+	goto done;
 
 exit:
+	/* we come here if either there are no more wqes in the send
+	 * queue or we are blocked waiting for some resource or event.
+	 * The current wqe will be restarted or new wqe started when
+	 * there is work to do or we can complete the current wqe.
+	 */
+	ret = -EAGAIN;
+	goto done;
+
+again:
+	/* we come here if we are done with the current wqe but want to
+	 * get called again. Mostly we loop back to next wqe so should
+	 * be all one way or the other
+	 */
+	ret = 0;
+	goto done;
+
+done:
 	rxe_drop_ref(qp);
-	return -EAGAIN;
+	return ret;
 }
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index c0db1e318dab..d1630a2134da 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -566,7 +566,7 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
 	    qp_type(qp) == IB_QPT_GSI)
 		memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
 
-	if (mask & WR_REG_MASK) {
+	if (mask & WR_LOCAL_MASK) {
 		wqe->mask = mask;
 		wqe->state = wqe_state_posted;
 		return 0;
-- 
2.25.1


  parent reply	other threads:[~2020-08-20 22:47 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-20 22:46 [PATCH v3 00/17] Memory window support for rdma_rxe Bob Pearson
2020-08-20 22:46 ` [PATCH v3 01/17] rdma_rxe: Added SPDX headers to rxe source files Bob Pearson
2020-08-24 10:03   ` Leon Romanovsky
2020-08-20 22:46 ` [PATCH v3 02/17] rdma_rxe: Fixed style warnings Bob Pearson
2020-08-27 12:52   ` Jason Gunthorpe
2020-08-20 22:46 ` [PATCH v3 03/17] ib_user_verbs.h: Added ib_uverbs_wc_opcode Bob Pearson
2020-08-27 12:53   ` Jason Gunthorpe
2020-08-20 22:46 ` [PATCH v3 04/17] ib_verbs.h: Added missing IB_WR_BIND_MW opcode Bob Pearson
2020-08-27 12:54   ` Jason Gunthorpe
2020-08-20 22:46 ` [PATCH v3 05/17] rdma_rxe: Added bind_mw parameters to rxe_send_wr Bob Pearson
2020-08-27 12:56   ` Jason Gunthorpe
2020-08-20 22:46 ` [PATCH v3 06/17] rdma_rxe: Added stubs for alloc_mw and dealloc_mw verbs Bob Pearson
2020-08-27 12:56   ` Jason Gunthorpe
2020-08-20 22:46 ` [PATCH v3 07/17] rdma_rxe: Separated MR and MW objects Bob Pearson
2020-08-20 22:46 ` [PATCH v3 08/17] rdma_rxe: Added mw object Bob Pearson
2020-08-22  3:39   ` Zhu Yanjun
2020-08-20 22:46 ` [PATCH v3 09/17] rdma_rxe: Extended pools to support both keys and indices Bob Pearson
2020-08-20 22:46 ` [PATCH v3 10/17] rdma_rxe: Implemented functional alloc_mw and dealloc_mw APIs Bob Pearson
2020-08-20 22:46 ` [PATCH v3 11/17] rdma_rxe: Address an issue with hardened user copy Bob Pearson
2020-08-22  3:32   ` Zhu Yanjun
2020-08-22  4:16     ` Bob Pearson
2020-08-24  8:47       ` Leon Romanovsky
2020-08-24  8:52       ` Leon Romanovsky
2020-08-24 23:52         ` Bob Pearson
2020-08-25  5:04           ` Leon Romanovsky
2020-08-20 22:46 ` Bob Pearson [this message]
2020-08-20 22:46 ` [PATCH v3 13/17] rdma_rxe: Give MR and MW objects indices and keys Bob Pearson
2020-08-20 22:46 ` [PATCH v3 14/17] rdma_rxe: Added stub for invalidate mw Bob Pearson
2020-08-20 22:46 ` [PATCH v3 15/17] rdma_rxe: Added functional bind and invalidate MW ops Bob Pearson
2020-08-20 22:46 ` [PATCH v3 16/17] rdma_rxe: Implemented read/write/atomic access to MW Bob Pearson
2020-08-20 22:46 ` [PATCH v3 17/17] rdma_rxe: minor cleanups Bob Pearson
     [not found]   ` <a153a775-9b53-3ccc-4c2a-ec76f863d1a1@gmail.com>
2020-08-22  4:05     ` Bob Pearson
2020-08-24  9:02       ` Leon Romanovsky
2020-08-27 13:00   ` Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200820224638.3212-13-rpearson@hpe.com \
    --to=rpearsonhpe@gmail.com \
    --cc=linux-rdma@vger.kernel.org \
    --cc=rpearson@hpe.com \
    --cc=zyjzyj2000@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).