All of lore.kernel.org
 help / color / mirror / Atom feed
From: Bob Pearson <rpearsonhpe@gmail.com>
To: jgg@nvidia.com, zyjzyj2000@gmail.com, monis@mellanox.com,
	linux-rdma@vger.kernel.org
Cc: Bob Pearson <rpearsonhpe@gmail.com>
Subject: [PATCH for-next v9 06/10] RDMA/rxe: Move local ops to subroutine
Date: Mon,  7 Jun 2021 23:25:49 -0500	[thread overview]
Message-ID: <20210608042552.33275-7-rpearsonhpe@gmail.com> (raw)
In-Reply-To: <20210608042552.33275-1-rpearsonhpe@gmail.com>

Simplify rxe_requester() by moving the local operations to a subroutine.
Add an error return for illegal send WR opcode.  Moved next_index ahead
of rxe_run_task which fixed a small bug where work completions were
delayed until after the next wqe which was not the intended behavior.
Let errors return their own WC status. Previously all errors were
reported as protection errors which was incorrect. Changed the return of
errors from rxe_do_local_ops() to err: which causes an immediate completion.
Without this an error on a last WR may get lost. Changed fill_packet() to
finish_packet() which is more accurate.

Fixes: 8700e2e7c485 ("The software RoCE driver")
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
v9:
  Fixed long standing incorrect WC status values and error returns.
---
 drivers/infiniband/sw/rxe/rxe_req.c | 103 +++++++++++++++++-----------
 1 file changed, 63 insertions(+), 40 deletions(-)

diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 889c7fea6f18..80872ec54219 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -462,7 +462,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
 	return skb;
 }
 
-static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
 		       struct rxe_pkt_info *pkt, struct sk_buff *skb,
 		       int paylen)
 {
@@ -578,6 +578,54 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
 			  jiffies + qp->qp_timeout_jiffies);
 }
 
+static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+	u8 opcode = wqe->wr.opcode;
+	struct rxe_dev *rxe;
+	struct rxe_mr *mr;
+	u32 rkey;
+
+	switch (opcode) {
+	case IB_WR_LOCAL_INV:
+		rxe = to_rdev(qp->ibqp.device);
+		rkey = wqe->wr.ex.invalidate_rkey;
+		mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
+		if (!mr) {
+			pr_err("No MR for rkey %#x\n", rkey);
+			wqe->status = IB_WC_LOC_QP_OP_ERR;
+			return -EINVAL;
+		}
+		mr->state = RXE_MR_STATE_FREE;
+		rxe_drop_ref(mr);
+		break;
+	case IB_WR_REG_MR:
+		mr = to_rmr(wqe->wr.wr.reg.mr);
+
+		rxe_add_ref(mr);
+		mr->state = RXE_MR_STATE_VALID;
+		mr->access = wqe->wr.wr.reg.access;
+		mr->ibmr.lkey = wqe->wr.wr.reg.key;
+		mr->ibmr.rkey = wqe->wr.wr.reg.key;
+		mr->iova = wqe->wr.wr.reg.mr->iova;
+		rxe_drop_ref(mr);
+		break;
+	default:
+		pr_err("Unexpected send wqe opcode %d\n", opcode);
+		wqe->status = IB_WC_LOC_QP_OP_ERR;
+		return -EINVAL;
+	}
+
+	wqe->state = wqe_state_done;
+	wqe->status = IB_WC_SUCCESS;
+	qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
+
+	if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+	    qp->sq_sig_type == IB_SIGNAL_ALL_WR)
+		rxe_run_task(&qp->comp.task, 1);
+
+	return 0;
+}
+
 int rxe_requester(void *arg)
 {
 	struct rxe_qp *qp = (struct rxe_qp *)arg;
@@ -618,42 +666,11 @@ int rxe_requester(void *arg)
 		goto exit;
 
 	if (wqe->mask & WR_LOCAL_OP_MASK) {
-		if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
-			struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
-			struct rxe_mr *rmr;
-
-			rmr = rxe_pool_get_index(&rxe->mr_pool,
-						 wqe->wr.ex.invalidate_rkey >> 8);
-			if (!rmr) {
-				pr_err("No mr for key %#x\n",
-				       wqe->wr.ex.invalidate_rkey);
-				wqe->state = wqe_state_error;
-				wqe->status = IB_WC_MW_BIND_ERR;
-				goto exit;
-			}
-			rmr->state = RXE_MR_STATE_FREE;
-			rxe_drop_ref(rmr);
-			wqe->state = wqe_state_done;
-			wqe->status = IB_WC_SUCCESS;
-		} else if (wqe->wr.opcode == IB_WR_REG_MR) {
-			struct rxe_mr *rmr = to_rmr(wqe->wr.wr.reg.mr);
-
-			rmr->state = RXE_MR_STATE_VALID;
-			rmr->access = wqe->wr.wr.reg.access;
-			rmr->ibmr.lkey = wqe->wr.wr.reg.key;
-			rmr->ibmr.rkey = wqe->wr.wr.reg.key;
-			rmr->iova = wqe->wr.wr.reg.mr->iova;
-			wqe->state = wqe_state_done;
-			wqe->status = IB_WC_SUCCESS;
-		} else {
-			goto exit;
-		}
-		if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
-		    qp->sq_sig_type == IB_SIGNAL_ALL_WR)
-			rxe_run_task(&qp->comp.task, 1);
-		qp->req.wqe_index = next_index(qp->sq.queue,
-						qp->req.wqe_index);
-		goto next_wqe;
+		ret = rxe_do_local_ops(qp, wqe);
+		if (unlikely(ret))
+			goto err;
+		else
+			goto next_wqe;
 	}
 
 	if (unlikely(qp_type(qp) == IB_QPT_RC &&
@@ -711,11 +728,17 @@ int rxe_requester(void *arg)
 	skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
 	if (unlikely(!skb)) {
 		pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
+		wqe->status = IB_WC_LOC_QP_OP_ERR;
 		goto err;
 	}
 
-	if (fill_packet(qp, wqe, &pkt, skb, payload)) {
-		pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
+	ret = finish_packet(qp, wqe, &pkt, skb, payload);
+	if (unlikely(ret)) {
+		pr_debug("qp#%d Error during finish packet\n", qp_num(qp));
+		if (ret == -EFAULT)
+			wqe->status = IB_WC_LOC_PROT_ERR;
+		else
+			wqe->status = IB_WC_LOC_QP_OP_ERR;
 		kfree_skb(skb);
 		goto err;
 	}
@@ -740,6 +763,7 @@ int rxe_requester(void *arg)
 			goto exit;
 		}
 
+		wqe->status = IB_WC_LOC_QP_OP_ERR;
 		goto err;
 	}
 
@@ -748,7 +772,6 @@ int rxe_requester(void *arg)
 	goto next_wqe;
 
 err:
-	wqe->status = IB_WC_LOC_PROT_ERR;
 	wqe->state = wqe_state_error;
 	__rxe_do_task(&qp->comp.task);
 
-- 
2.30.2


  parent reply	other threads:[~2021-06-08  4:26 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-08  4:25 [PATCH for-next v9 00/10] RDMA/rxe: Implement memory windows Bob Pearson
2021-06-08  4:25 ` [PATCH for-next v9 1/9] RDMA/rxe: Add bind MW fields to rxe_send_wr Bob Pearson
2021-06-08  4:25 ` [PATCH for-next v9 02/10] RDMA/rxe: Return errors for add index and key Bob Pearson
2021-06-08  4:25 ` [PATCH for-next v9 03/10] RDMA/rxe: Enable MW object pool Bob Pearson
2021-06-08  4:25 ` [PATCH for-next v9 04/10] RDMA/rxe: Add ib_alloc_mw and ib_dealloc_mw verbs Bob Pearson
2021-06-08  4:25 ` [PATCH for-next v9 05/10] RDMA/rxe: Replace WR_REG_MASK by WR_LOCAL_OP_MASK Bob Pearson
2021-06-08  4:25 ` Bob Pearson [this message]
2021-06-08  4:25 ` [PATCH for-next v9 07/10] RDMA/rxe: Add support for bind MW work requests Bob Pearson
2021-06-08  4:25 ` [PATCH for-next v9 08/10] RDMA/rxe: Implement invalidate MW operations Bob Pearson
2021-06-08  4:25 ` [PATCH for-next v9 09/10] RDMA/rxe: Implement memory access through MWs Bob Pearson
2021-06-08  4:25 ` [PATCH for-next v9 10/10] RDMA/rxe: Disallow MR dereg and invalidate when bound Bob Pearson
2021-06-18 13:19 ` [PATCH for-next v9 00/10] RDMA/rxe: Implement memory windows Jason Gunthorpe
2021-06-18 15:39   ` Bob Pearson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210608042552.33275-7-rpearsonhpe@gmail.com \
    --to=rpearsonhpe@gmail.com \
    --cc=jgg@nvidia.com \
    --cc=linux-rdma@vger.kernel.org \
    --cc=monis@mellanox.com \
    --cc=zyjzyj2000@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.