All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH for-next 01/13] RDMA/rxe: Replace START->FIRST, END->LAST
@ 2022-09-17  3:10 Bob Pearson
  2022-09-17  3:10 ` [PATCH for-next 02/13] RDMA/rxe: Move next_opcode() to rxe_opcode.c Bob Pearson
                   ` (7 more replies)
  0 siblings, 8 replies; 10+ messages in thread
From: Bob Pearson @ 2022-09-17  3:10 UTC (permalink / raw)
  To: jgg, zyjzyj2000, lizhijian, linux-rdma; +Cc: Bob Pearson

Replace RXE_START_MASK by RXE_FIRST_MASK, RXE_END_MASK by
RXE_LAST_MASK and add RXE_ONLY_MASK = FIRST | LAST to match
normal IBA usage.

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe_comp.c   |   6 +-
 drivers/infiniband/sw/rxe/rxe_net.c    |   2 +-
 drivers/infiniband/sw/rxe/rxe_opcode.c | 143 +++++++++++--------------
 drivers/infiniband/sw/rxe/rxe_opcode.h |   5 +-
 drivers/infiniband/sw/rxe/rxe_req.c    |  10 +-
 drivers/infiniband/sw/rxe/rxe_resp.c   |   4 +-
 6 files changed, 76 insertions(+), 94 deletions(-)

diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index fb0c008af78c..1f10ae4a35d5 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -221,7 +221,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
 	switch (qp->comp.opcode) {
 	case -1:
 		/* Will catch all *_ONLY cases. */
-		if (!(mask & RXE_START_MASK))
+		if (!(mask & RXE_FIRST_MASK))
 			return COMPST_ERROR;
 
 		break;
@@ -354,7 +354,7 @@ static inline enum comp_state do_read(struct rxe_qp *qp,
 		return COMPST_ERROR;
 	}
 
-	if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
+	if (wqe->dma.resid == 0 && (pkt->mask & RXE_LAST_MASK))
 		return COMPST_COMP_ACK;
 
 	return COMPST_UPDATE_COMP;
@@ -636,7 +636,7 @@ int rxe_completer(void *arg)
 			break;
 
 		case COMPST_UPDATE_COMP:
-			if (pkt->mask & RXE_END_MASK)
+			if (pkt->mask & RXE_LAST_MASK)
 				qp->comp.opcode = -1;
 			else
 				qp->comp.opcode = pkt->opcode;
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index c53f4529f098..d46190ad082f 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -428,7 +428,7 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
 	}
 
 	if ((qp_type(qp) != IB_QPT_RC) &&
-	    (pkt->mask & RXE_END_MASK)) {
+	    (pkt->mask & RXE_LAST_MASK)) {
 		pkt->wqe->state = wqe_state_done;
 		rxe_run_task(&qp->comp.task, 1);
 	}
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c
index d4ba4d506f17..0ea587c15931 100644
--- a/drivers/infiniband/sw/rxe/rxe_opcode.c
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.c
@@ -107,7 +107,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RC_SEND_FIRST]			= {
 		.name	= "IB_OPCODE_RC_SEND_FIRST",
 		.mask	= RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK |
-			  RXE_SEND_MASK | RXE_START_MASK,
+			  RXE_SEND_MASK | RXE_FIRST_MASK,
 		.length = RXE_BTH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -127,7 +127,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RC_SEND_LAST]			= {
 		.name	= "IB_OPCODE_RC_SEND_LAST",
 		.mask	= RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
-			  RXE_SEND_MASK | RXE_END_MASK,
+			  RXE_SEND_MASK | RXE_LAST_MASK,
 		.length = RXE_BTH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -137,7 +137,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE]		= {
 		.name	= "IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE",
 		.mask	= RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
-			  RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+			  RXE_COMP_MASK | RXE_SEND_MASK | RXE_LAST_MASK,
 		.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -149,8 +149,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RC_SEND_ONLY]			= {
 		.name	= "IB_OPCODE_RC_SEND_ONLY",
 		.mask	= RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
-			  RXE_RWR_MASK | RXE_SEND_MASK |
-			  RXE_START_MASK | RXE_END_MASK,
+			  RXE_RWR_MASK | RXE_SEND_MASK | RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -161,7 +160,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 		.name	= "IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE",
 		.mask	= RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
 			  RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
-			  RXE_START_MASK | RXE_END_MASK,
+			  RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -173,7 +172,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RC_RDMA_WRITE_FIRST]		= {
 		.name	= "IB_OPCODE_RC_RDMA_WRITE_FIRST",
 		.mask	= RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
-			  RXE_WRITE_MASK | RXE_START_MASK,
+			  RXE_WRITE_MASK | RXE_FIRST_MASK,
 		.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -195,7 +194,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RC_RDMA_WRITE_LAST]			= {
 		.name	= "IB_OPCODE_RC_RDMA_WRITE_LAST",
 		.mask	= RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
-			  RXE_END_MASK,
+			  RXE_LAST_MASK,
 		.length = RXE_BTH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -206,7 +205,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 		.name	= "IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE",
 		.mask	= RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
 			  RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
-			  RXE_END_MASK,
+			  RXE_LAST_MASK,
 		.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -218,8 +217,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RC_RDMA_WRITE_ONLY]			= {
 		.name	= "IB_OPCODE_RC_RDMA_WRITE_ONLY",
 		.mask	= RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
-			  RXE_WRITE_MASK | RXE_START_MASK |
-			  RXE_END_MASK,
+			  RXE_WRITE_MASK | RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -231,9 +229,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE]		= {
 		.name	= "IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
 		.mask	= RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
-			  RXE_REQ_MASK | RXE_WRITE_MASK |
-			  RXE_COMP_MASK | RXE_RWR_MASK |
-			  RXE_START_MASK | RXE_END_MASK,
+			  RXE_REQ_MASK | RXE_WRITE_MASK | RXE_COMP_MASK |
+			  RXE_RWR_MASK | RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -248,7 +245,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RC_RDMA_READ_REQUEST]			= {
 		.name	= "IB_OPCODE_RC_RDMA_READ_REQUEST",
 		.mask	= RXE_RETH_MASK | RXE_REQ_MASK | RXE_READ_MASK |
-			  RXE_START_MASK | RXE_END_MASK,
+			  RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -260,7 +257,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST]		= {
 		.name	= "IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST",
 		.mask	= RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
-			  RXE_START_MASK,
+			  RXE_FIRST_MASK,
 		.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -281,7 +278,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST]		= {
 		.name	= "IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST",
 		.mask	= RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
-			  RXE_END_MASK,
+			  RXE_LAST_MASK,
 		.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -293,7 +290,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY]		= {
 		.name	= "IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY",
 		.mask	= RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
-			  RXE_START_MASK | RXE_END_MASK,
+			  RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -304,8 +301,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	},
 	[IB_OPCODE_RC_ACKNOWLEDGE]			= {
 		.name	= "IB_OPCODE_RC_ACKNOWLEDGE",
-		.mask	= RXE_AETH_MASK | RXE_ACK_MASK | RXE_START_MASK |
-			  RXE_END_MASK,
+		.mask	= RXE_AETH_MASK | RXE_ACK_MASK | RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -317,7 +313,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE]			= {
 		.name	= "IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE",
 		.mask	= RXE_AETH_MASK | RXE_ATMACK_MASK | RXE_ACK_MASK |
-			  RXE_START_MASK | RXE_END_MASK,
+			  RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -332,7 +328,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RC_COMPARE_SWAP]			= {
 		.name	= "IB_OPCODE_RC_COMPARE_SWAP",
 		.mask	= RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK |
-			  RXE_START_MASK | RXE_END_MASK,
+			  RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_ATMETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -344,7 +340,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RC_FETCH_ADD]			= {
 		.name	= "IB_OPCODE_RC_FETCH_ADD",
 		.mask	= RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK |
-			  RXE_START_MASK | RXE_END_MASK,
+			  RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_ATMETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -356,7 +352,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE]		= {
 		.name	= "IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE",
 		.mask	= RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
-			  RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+			  RXE_COMP_MASK | RXE_SEND_MASK | RXE_LAST_MASK,
 		.length = RXE_BTH_BYTES + RXE_IETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -369,7 +365,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 		.name	= "IB_OPCODE_RC_SEND_ONLY_INV",
 		.mask	= RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
 			  RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
-			  RXE_END_MASK  | RXE_START_MASK,
+			  RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_IETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -383,7 +379,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_UC_SEND_FIRST]			= {
 		.name	= "IB_OPCODE_UC_SEND_FIRST",
 		.mask	= RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK |
-			  RXE_SEND_MASK | RXE_START_MASK,
+			  RXE_SEND_MASK | RXE_FIRST_MASK,
 		.length = RXE_BTH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -403,7 +399,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_UC_SEND_LAST]			= {
 		.name	= "IB_OPCODE_UC_SEND_LAST",
 		.mask	= RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
-			  RXE_SEND_MASK | RXE_END_MASK,
+			  RXE_SEND_MASK | RXE_LAST_MASK,
 		.length = RXE_BTH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -413,7 +409,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE]		= {
 		.name	= "IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE",
 		.mask	= RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
-			  RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+			  RXE_COMP_MASK | RXE_SEND_MASK | RXE_LAST_MASK,
 		.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -425,8 +421,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_UC_SEND_ONLY]			= {
 		.name	= "IB_OPCODE_UC_SEND_ONLY",
 		.mask	= RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
-			  RXE_RWR_MASK | RXE_SEND_MASK |
-			  RXE_START_MASK | RXE_END_MASK,
+			  RXE_RWR_MASK | RXE_SEND_MASK | RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -437,7 +432,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 		.name	= "IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE",
 		.mask	= RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
 			  RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
-			  RXE_START_MASK | RXE_END_MASK,
+			  RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -449,7 +444,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_UC_RDMA_WRITE_FIRST]		= {
 		.name	= "IB_OPCODE_UC_RDMA_WRITE_FIRST",
 		.mask	= RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
-			  RXE_WRITE_MASK | RXE_START_MASK,
+			  RXE_WRITE_MASK | RXE_FIRST_MASK,
 		.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -471,7 +466,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_UC_RDMA_WRITE_LAST]			= {
 		.name	= "IB_OPCODE_UC_RDMA_WRITE_LAST",
 		.mask	= RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
-			  RXE_END_MASK,
+			  RXE_LAST_MASK,
 		.length = RXE_BTH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -482,7 +477,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 		.name	= "IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE",
 		.mask	= RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
 			  RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
-			  RXE_END_MASK,
+			  RXE_LAST_MASK,
 		.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -494,8 +489,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_UC_RDMA_WRITE_ONLY]			= {
 		.name	= "IB_OPCODE_UC_RDMA_WRITE_ONLY",
 		.mask	= RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
-			  RXE_WRITE_MASK | RXE_START_MASK |
-			  RXE_END_MASK,
+			  RXE_WRITE_MASK | RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -507,9 +501,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE]		= {
 		.name	= "IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
 		.mask	= RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
-			  RXE_REQ_MASK | RXE_WRITE_MASK |
-			  RXE_COMP_MASK | RXE_RWR_MASK |
-			  RXE_START_MASK | RXE_END_MASK,
+			  RXE_REQ_MASK | RXE_WRITE_MASK | RXE_COMP_MASK |
+			  RXE_RWR_MASK | RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -527,7 +520,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 		.name	= "IB_OPCODE_RD_SEND_FIRST",
 		.mask	= RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
 			  RXE_REQ_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
-			  RXE_START_MASK,
+			  RXE_FIRST_MASK,
 		.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -542,8 +535,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RD_SEND_MIDDLE]		= {
 		.name	= "IB_OPCODE_RD_SEND_MIDDLE",
 		.mask	= RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
-			  RXE_REQ_MASK | RXE_SEND_MASK |
-			  RXE_MIDDLE_MASK,
+			  RXE_REQ_MASK | RXE_SEND_MASK | RXE_MIDDLE_MASK,
 		.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -559,7 +551,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 		.name	= "IB_OPCODE_RD_SEND_LAST",
 		.mask	= RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
 			  RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK |
-			  RXE_END_MASK,
+			  RXE_LAST_MASK,
 		.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -574,9 +566,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE]		= {
 		.name	= "IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE",
 		.mask	= RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK |
-			  RXE_PAYLOAD_MASK | RXE_REQ_MASK |
-			  RXE_COMP_MASK | RXE_SEND_MASK |
-			  RXE_END_MASK,
+			  RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+			  RXE_SEND_MASK | RXE_LAST_MASK,
 		.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES +
 			  RXE_RDETH_BYTES,
 		.offset = {
@@ -597,7 +588,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 		.name	= "IB_OPCODE_RD_SEND_ONLY",
 		.mask	= RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
 			  RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
-			  RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
+			  RXE_SEND_MASK | RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -612,9 +603,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE]		= {
 		.name	= "IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE",
 		.mask	= RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK |
-			  RXE_PAYLOAD_MASK | RXE_REQ_MASK |
-			  RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
-			  RXE_START_MASK | RXE_END_MASK,
+			  RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+			  RXE_RWR_MASK | RXE_SEND_MASK | RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES +
 			  RXE_RDETH_BYTES,
 		.offset = {
@@ -634,8 +624,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RD_RDMA_WRITE_FIRST]		= {
 		.name	= "IB_OPCODE_RD_RDMA_WRITE_FIRST",
 		.mask	= RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
-			  RXE_PAYLOAD_MASK | RXE_REQ_MASK |
-			  RXE_WRITE_MASK | RXE_START_MASK,
+			  RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+			  RXE_FIRST_MASK,
 		.length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES +
 			  RXE_RDETH_BYTES,
 		.offset = {
@@ -655,8 +645,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RD_RDMA_WRITE_MIDDLE]		= {
 		.name	= "IB_OPCODE_RD_RDMA_WRITE_MIDDLE",
 		.mask	= RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
-			  RXE_REQ_MASK | RXE_WRITE_MASK |
-			  RXE_MIDDLE_MASK,
+			  RXE_REQ_MASK | RXE_WRITE_MASK | RXE_MIDDLE_MASK,
 		.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -671,8 +660,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RD_RDMA_WRITE_LAST]			= {
 		.name	= "IB_OPCODE_RD_RDMA_WRITE_LAST",
 		.mask	= RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
-			  RXE_REQ_MASK | RXE_WRITE_MASK |
-			  RXE_END_MASK,
+			  RXE_REQ_MASK | RXE_WRITE_MASK | RXE_LAST_MASK,
 		.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -687,9 +675,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE]		= {
 		.name	= "IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE",
 		.mask	= RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK |
-			  RXE_PAYLOAD_MASK | RXE_REQ_MASK |
-			  RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
-			  RXE_END_MASK,
+			  RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+			  RXE_COMP_MASK | RXE_RWR_MASK | RXE_LAST_MASK,
 		.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES +
 			  RXE_RDETH_BYTES,
 		.offset = {
@@ -709,9 +696,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RD_RDMA_WRITE_ONLY]			= {
 		.name	= "IB_OPCODE_RD_RDMA_WRITE_ONLY",
 		.mask	= RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
-				RXE_PAYLOAD_MASK | RXE_REQ_MASK |
-				RXE_WRITE_MASK | RXE_START_MASK |
-				RXE_END_MASK,
+			  RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+			  RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES +
 			  RXE_RDETH_BYTES,
 		.offset = {
@@ -731,10 +717,9 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE]		= {
 		.name	= "IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
 		.mask	= RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
-			  RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
-			  RXE_REQ_MASK | RXE_WRITE_MASK |
-			  RXE_COMP_MASK | RXE_RWR_MASK |
-			  RXE_START_MASK | RXE_END_MASK,
+			  RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+			  RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+			  RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES +
 			  RXE_DETH_BYTES + RXE_RDETH_BYTES,
 		.offset = {
@@ -759,8 +744,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RD_RDMA_READ_REQUEST]			= {
 		.name	= "IB_OPCODE_RD_RDMA_READ_REQUEST",
 		.mask	= RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
-			  RXE_REQ_MASK | RXE_READ_MASK |
-			  RXE_START_MASK | RXE_END_MASK,
+			  RXE_REQ_MASK | RXE_READ_MASK | RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES +
 			  RXE_RDETH_BYTES,
 		.offset = {
@@ -779,9 +763,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	},
 	[IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST]		= {
 		.name	= "IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST",
-		.mask	= RXE_RDETH_MASK | RXE_AETH_MASK |
-			  RXE_PAYLOAD_MASK | RXE_ACK_MASK |
-			  RXE_START_MASK,
+		.mask	= RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK |
+			  RXE_ACK_MASK | RXE_FIRST_MASK,
 		.length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -808,7 +791,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST]		= {
 		.name	= "IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST",
 		.mask	= RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK |
-			  RXE_ACK_MASK | RXE_END_MASK,
+			  RXE_ACK_MASK | RXE_LAST_MASK,
 		.length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -823,7 +806,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY]		= {
 		.name	= "IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY",
 		.mask	= RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK |
-			  RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
+			  RXE_ACK_MASK | RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -838,7 +821,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RD_ACKNOWLEDGE]			= {
 		.name	= "IB_OPCODE_RD_ACKNOWLEDGE",
 		.mask	= RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ACK_MASK |
-			  RXE_START_MASK | RXE_END_MASK,
+			  RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -850,7 +833,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE]			= {
 		.name	= "IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE",
 		.mask	= RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ATMACK_MASK |
-			  RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
+			  RXE_ACK_MASK | RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES +
 			  RXE_RDETH_BYTES,
 		.offset = {
@@ -866,8 +849,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RD_COMPARE_SWAP]			= {
 		.name	= "RD_COMPARE_SWAP",
 		.mask	= RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK |
-			  RXE_REQ_MASK | RXE_ATOMIC_MASK |
-			  RXE_START_MASK | RXE_END_MASK,
+			  RXE_REQ_MASK | RXE_ATOMIC_MASK | RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES +
 			  RXE_RDETH_BYTES,
 		.offset = {
@@ -887,8 +869,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	[IB_OPCODE_RD_FETCH_ADD]			= {
 		.name	= "IB_OPCODE_RD_FETCH_ADD",
 		.mask	= RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK |
-			  RXE_REQ_MASK | RXE_ATOMIC_MASK |
-			  RXE_START_MASK | RXE_END_MASK,
+			  RXE_REQ_MASK | RXE_ATOMIC_MASK | RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES +
 			  RXE_RDETH_BYTES,
 		.offset = {
@@ -911,7 +892,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 		.name	= "IB_OPCODE_UD_SEND_ONLY",
 		.mask	= RXE_DETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
 			  RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
-			  RXE_START_MASK | RXE_END_MASK,
+			  RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_DETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
@@ -924,7 +905,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 		.name	= "IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE",
 		.mask	= RXE_DETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
 			  RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
-			  RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
+			  RXE_SEND_MASK | RXE_ONLY_MASK,
 		.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES,
 		.offset = {
 			[RXE_BTH]	= 0,
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.h b/drivers/infiniband/sw/rxe/rxe_opcode.h
index 8f9aaaf260f2..d2b6a8232e92 100644
--- a/drivers/infiniband/sw/rxe/rxe_opcode.h
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.h
@@ -75,9 +75,10 @@ enum rxe_hdr_mask {
 	RXE_RWR_MASK		= BIT(NUM_HDR_TYPES + 6),
 	RXE_COMP_MASK		= BIT(NUM_HDR_TYPES + 7),
 
-	RXE_START_MASK		= BIT(NUM_HDR_TYPES + 8),
+	RXE_FIRST_MASK		= BIT(NUM_HDR_TYPES + 8),
 	RXE_MIDDLE_MASK		= BIT(NUM_HDR_TYPES + 9),
-	RXE_END_MASK		= BIT(NUM_HDR_TYPES + 10),
+	RXE_LAST_MASK		= BIT(NUM_HDR_TYPES + 10),
+	RXE_ONLY_MASK		= RXE_FIRST_MASK | RXE_LAST_MASK,
 
 	RXE_LOOPBACK_MASK	= BIT(NUM_HDR_TYPES + 12),
 
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index f63771207970..e136abc802af 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -403,7 +403,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
 
 	/* init bth */
 	solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
-			(pkt->mask & RXE_END_MASK) &&
+			(pkt->mask & RXE_LAST_MASK) &&
 			((pkt->mask & (RXE_SEND_MASK)) ||
 			(pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
 			(RXE_WRITE_MASK | RXE_IMMDT_MASK));
@@ -411,7 +411,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
 	qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
 					 qp->attr.dest_qp_num;
 
-	ack_req = ((pkt->mask & RXE_END_MASK) ||
+	ack_req = ((pkt->mask & RXE_LAST_MASK) ||
 		(qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
 	if (ack_req)
 		qp->req.noack_pkts = 0;
@@ -493,7 +493,7 @@ static void update_wqe_state(struct rxe_qp *qp,
 		struct rxe_send_wqe *wqe,
 		struct rxe_pkt_info *pkt)
 {
-	if (pkt->mask & RXE_END_MASK) {
+	if (pkt->mask & RXE_LAST_MASK) {
 		if (qp_type(qp) == IB_QPT_RC)
 			wqe->state = wqe_state_pending;
 	} else {
@@ -513,7 +513,7 @@ static void update_wqe_psn(struct rxe_qp *qp,
 	if (num_pkt == 0)
 		num_pkt = 1;
 
-	if (pkt->mask & RXE_START_MASK) {
+	if (pkt->mask & RXE_FIRST_MASK) {
 		wqe->first_psn = qp->req.psn;
 		wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
 	}
@@ -550,7 +550,7 @@ static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
 {
 	qp->req.opcode = pkt->opcode;
 
-	if (pkt->mask & RXE_END_MASK)
+	if (pkt->mask & RXE_LAST_MASK)
 		qp->req.wqe_index = queue_next_index(qp->sq.queue,
 						     qp->req.wqe_index);
 
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 7c336db5cb54..cb560cbe418d 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -147,7 +147,7 @@ static enum resp_states check_psn(struct rxe_qp *qp,
 
 	case IB_QPT_UC:
 		if (qp->resp.drop_msg || diff != 0) {
-			if (pkt->mask & RXE_START_MASK) {
+			if (pkt->mask & RXE_FIRST_MASK) {
 				qp->resp.drop_msg = 0;
 				return RESPST_CHK_OP_SEQ;
 			}
@@ -901,7 +901,7 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
 			return RESPST_ERR_INVALIDATE_RKEY;
 	}
 
-	if (pkt->mask & RXE_END_MASK)
+	if (pkt->mask & RXE_LAST_MASK)
 		/* We successfully processed this new request. */
 		qp->resp.msn++;
 
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH for-next 02/13] RDMA/rxe: Move next_opcode() to rxe_opcode.c
  2022-09-17  3:10 [PATCH for-next 01/13] RDMA/rxe: Replace START->FIRST, END->LAST Bob Pearson
@ 2022-09-17  3:10 ` Bob Pearson
  2022-09-17  3:10 ` [PATCH for-next 03/13] RDMA: Add xrc opcodes to ib_pack.h Bob Pearson
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 10+ messages in thread
From: Bob Pearson @ 2022-09-17  3:10 UTC (permalink / raw)
  To: jgg, zyjzyj2000, lizhijian, linux-rdma; +Cc: Bob Pearson

Move next_opcode() from rxe_req.c to rxe_opcode.c.

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe_loc.h    |   3 +
 drivers/infiniband/sw/rxe/rxe_opcode.c | 156 ++++++++++++++++++++++++-
 drivers/infiniband/sw/rxe/rxe_req.c    | 156 -------------------------
 3 files changed, 157 insertions(+), 158 deletions(-)

diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 22f6cc31d1d6..5526d83697c7 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -99,6 +99,9 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
 		    struct sk_buff *skb);
 const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num);
 
+/* opcode.c */
+int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, u32 opcode);
+
 /* rxe_qp.c */
 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);
 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c
index 0ea587c15931..6b1a1f197c4d 100644
--- a/drivers/infiniband/sw/rxe/rxe_opcode.c
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.c
@@ -5,8 +5,8 @@
  */
 
 #include <rdma/ib_pack.h>
-#include "rxe_opcode.h"
-#include "rxe_hdr.h"
+
+#include "rxe.h"
 
 /* useful information about work request opcodes and pkt opcodes in
  * table form
@@ -919,3 +919,155 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	},
 
 };
+
+static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
+{
+	switch (opcode) {
+	case IB_WR_RDMA_WRITE:
+		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
+		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
+			return fits ?
+				IB_OPCODE_RC_RDMA_WRITE_LAST :
+				IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
+		else
+			return fits ?
+				IB_OPCODE_RC_RDMA_WRITE_ONLY :
+				IB_OPCODE_RC_RDMA_WRITE_FIRST;
+
+	case IB_WR_RDMA_WRITE_WITH_IMM:
+		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
+		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
+			return fits ?
+				IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
+				IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
+		else
+			return fits ?
+				IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
+				IB_OPCODE_RC_RDMA_WRITE_FIRST;
+
+	case IB_WR_SEND:
+		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
+		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
+			return fits ?
+				IB_OPCODE_RC_SEND_LAST :
+				IB_OPCODE_RC_SEND_MIDDLE;
+		else
+			return fits ?
+				IB_OPCODE_RC_SEND_ONLY :
+				IB_OPCODE_RC_SEND_FIRST;
+
+	case IB_WR_SEND_WITH_IMM:
+		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
+		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
+			return fits ?
+				IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
+				IB_OPCODE_RC_SEND_MIDDLE;
+		else
+			return fits ?
+				IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
+				IB_OPCODE_RC_SEND_FIRST;
+
+	case IB_WR_RDMA_READ:
+		return IB_OPCODE_RC_RDMA_READ_REQUEST;
+
+	case IB_WR_ATOMIC_CMP_AND_SWP:
+		return IB_OPCODE_RC_COMPARE_SWAP;
+
+	case IB_WR_ATOMIC_FETCH_AND_ADD:
+		return IB_OPCODE_RC_FETCH_ADD;
+
+	case IB_WR_SEND_WITH_INV:
+		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
+		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
+			return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
+				IB_OPCODE_RC_SEND_MIDDLE;
+		else
+			return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
+				IB_OPCODE_RC_SEND_FIRST;
+	case IB_WR_REG_MR:
+	case IB_WR_LOCAL_INV:
+		return opcode;
+	}
+
+	return -EINVAL;
+}
+
+static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
+{
+	switch (opcode) {
+	case IB_WR_RDMA_WRITE:
+		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
+		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
+			return fits ?
+				IB_OPCODE_UC_RDMA_WRITE_LAST :
+				IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
+		else
+			return fits ?
+				IB_OPCODE_UC_RDMA_WRITE_ONLY :
+				IB_OPCODE_UC_RDMA_WRITE_FIRST;
+
+	case IB_WR_RDMA_WRITE_WITH_IMM:
+		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
+		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
+			return fits ?
+				IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
+				IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
+		else
+			return fits ?
+				IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
+				IB_OPCODE_UC_RDMA_WRITE_FIRST;
+
+	case IB_WR_SEND:
+		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
+		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
+			return fits ?
+				IB_OPCODE_UC_SEND_LAST :
+				IB_OPCODE_UC_SEND_MIDDLE;
+		else
+			return fits ?
+				IB_OPCODE_UC_SEND_ONLY :
+				IB_OPCODE_UC_SEND_FIRST;
+
+	case IB_WR_SEND_WITH_IMM:
+		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
+		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
+			return fits ?
+				IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
+				IB_OPCODE_UC_SEND_MIDDLE;
+		else
+			return fits ?
+				IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
+				IB_OPCODE_UC_SEND_FIRST;
+	}
+
+	return -EINVAL;
+}
+
+int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, u32 opcode)
+{
+	int fits = (wqe->dma.resid <= qp->mtu);
+
+	switch (qp_type(qp)) {
+	case IB_QPT_RC:
+		return next_opcode_rc(qp, opcode, fits);
+
+	case IB_QPT_UC:
+		return next_opcode_uc(qp, opcode, fits);
+
+	case IB_QPT_UD:
+	case IB_QPT_GSI:
+		switch (opcode) {
+		case IB_WR_SEND:
+			return IB_OPCODE_UD_SEND_ONLY;
+
+		case IB_WR_SEND_WITH_IMM:
+			return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index e136abc802af..d2a9abfed596 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -11,9 +11,6 @@
 #include "rxe_loc.h"
 #include "rxe_queue.h"
 
-static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
-		       u32 opcode);
-
 static inline void retry_first_write_send(struct rxe_qp *qp,
 					  struct rxe_send_wqe *wqe, int npsn)
 {
@@ -194,159 +191,6 @@ static int rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
 		atomic_read(&qp->req.rd_atomic) != qp->attr.max_rd_atomic;
 }
 
-static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
-{
-	switch (opcode) {
-	case IB_WR_RDMA_WRITE:
-		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
-		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
-			return fits ?
-				IB_OPCODE_RC_RDMA_WRITE_LAST :
-				IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
-		else
-			return fits ?
-				IB_OPCODE_RC_RDMA_WRITE_ONLY :
-				IB_OPCODE_RC_RDMA_WRITE_FIRST;
-
-	case IB_WR_RDMA_WRITE_WITH_IMM:
-		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
-		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
-			return fits ?
-				IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
-				IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
-		else
-			return fits ?
-				IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
-				IB_OPCODE_RC_RDMA_WRITE_FIRST;
-
-	case IB_WR_SEND:
-		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
-		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
-			return fits ?
-				IB_OPCODE_RC_SEND_LAST :
-				IB_OPCODE_RC_SEND_MIDDLE;
-		else
-			return fits ?
-				IB_OPCODE_RC_SEND_ONLY :
-				IB_OPCODE_RC_SEND_FIRST;
-
-	case IB_WR_SEND_WITH_IMM:
-		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
-		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
-			return fits ?
-				IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
-				IB_OPCODE_RC_SEND_MIDDLE;
-		else
-			return fits ?
-				IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
-				IB_OPCODE_RC_SEND_FIRST;
-
-	case IB_WR_RDMA_READ:
-		return IB_OPCODE_RC_RDMA_READ_REQUEST;
-
-	case IB_WR_ATOMIC_CMP_AND_SWP:
-		return IB_OPCODE_RC_COMPARE_SWAP;
-
-	case IB_WR_ATOMIC_FETCH_AND_ADD:
-		return IB_OPCODE_RC_FETCH_ADD;
-
-	case IB_WR_SEND_WITH_INV:
-		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
-		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
-			return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
-				IB_OPCODE_RC_SEND_MIDDLE;
-		else
-			return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
-				IB_OPCODE_RC_SEND_FIRST;
-	case IB_WR_REG_MR:
-	case IB_WR_LOCAL_INV:
-		return opcode;
-	}
-
-	return -EINVAL;
-}
-
-static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
-{
-	switch (opcode) {
-	case IB_WR_RDMA_WRITE:
-		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
-		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
-			return fits ?
-				IB_OPCODE_UC_RDMA_WRITE_LAST :
-				IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
-		else
-			return fits ?
-				IB_OPCODE_UC_RDMA_WRITE_ONLY :
-				IB_OPCODE_UC_RDMA_WRITE_FIRST;
-
-	case IB_WR_RDMA_WRITE_WITH_IMM:
-		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
-		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
-			return fits ?
-				IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
-				IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
-		else
-			return fits ?
-				IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
-				IB_OPCODE_UC_RDMA_WRITE_FIRST;
-
-	case IB_WR_SEND:
-		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
-		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
-			return fits ?
-				IB_OPCODE_UC_SEND_LAST :
-				IB_OPCODE_UC_SEND_MIDDLE;
-		else
-			return fits ?
-				IB_OPCODE_UC_SEND_ONLY :
-				IB_OPCODE_UC_SEND_FIRST;
-
-	case IB_WR_SEND_WITH_IMM:
-		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
-		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
-			return fits ?
-				IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
-				IB_OPCODE_UC_SEND_MIDDLE;
-		else
-			return fits ?
-				IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
-				IB_OPCODE_UC_SEND_FIRST;
-	}
-
-	return -EINVAL;
-}
-
-static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
-		       u32 opcode)
-{
-	int fits = (wqe->dma.resid <= qp->mtu);
-
-	switch (qp_type(qp)) {
-	case IB_QPT_RC:
-		return next_opcode_rc(qp, opcode, fits);
-
-	case IB_QPT_UC:
-		return next_opcode_uc(qp, opcode, fits);
-
-	case IB_QPT_UD:
-	case IB_QPT_GSI:
-		switch (opcode) {
-		case IB_WR_SEND:
-			return IB_OPCODE_UD_SEND_ONLY;
-
-		case IB_WR_SEND_WITH_IMM:
-			return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
-		}
-		break;
-
-	default:
-		break;
-	}
-
-	return -EINVAL;
-}
-
 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
 {
 	int depth;
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH for-next 03/13] RDMA: Add xrc opcodes to ib_pack.h
  2022-09-17  3:10 [PATCH for-next 01/13] RDMA/rxe: Replace START->FIRST, END->LAST Bob Pearson
  2022-09-17  3:10 ` [PATCH for-next 02/13] RDMA/rxe: Move next_opcode() to rxe_opcode.c Bob Pearson
@ 2022-09-17  3:10 ` Bob Pearson
  2022-09-17  3:10 ` [PATCH for-next 04/13] RDMA/rxe: Extend opcodes and headers to support xrc Bob Pearson
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 10+ messages in thread
From: Bob Pearson @ 2022-09-17  3:10 UTC (permalink / raw)
  To: jgg, zyjzyj2000, lizhijian, linux-rdma; +Cc: Bob Pearson

Extend ib_pack.h to include xrc opcodes.

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 include/rdma/ib_pack.h | 32 +++++++++++++++++++++++++++++++-
 1 file changed, 31 insertions(+), 1 deletion(-)

diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index a9162f25beaf..cc9aac05d38e 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -56,8 +56,11 @@ enum {
 	IB_OPCODE_UD                                = 0x60,
 	/* per IBTA 1.3 vol 1 Table 38, A10.3.2 */
 	IB_OPCODE_CNP                               = 0x80,
+	IB_OPCODE_XRC				    = 0xa0,
 	/* Manufacturer specific */
 	IB_OPCODE_MSP                               = 0xe0,
+	/* opcode type bits */
+	IB_OPCODE_TYPE				    = 0xe0,
 
 	/* operations -- just used to define real constants */
 	IB_OPCODE_SEND_FIRST                        = 0x00,
@@ -84,6 +87,8 @@ enum {
 	/* opcode 0x15 is reserved */
 	IB_OPCODE_SEND_LAST_WITH_INVALIDATE         = 0x16,
 	IB_OPCODE_SEND_ONLY_WITH_INVALIDATE         = 0x17,
+	/* opcode command bits */
+	IB_OPCODE_CMD				    = 0x1f,
 
 	/* real constants follow -- see comment about above IB_OPCODE()
 	   macro for more details */
@@ -152,7 +157,32 @@ enum {
 
 	/* UD */
 	IB_OPCODE(UD, SEND_ONLY),
-	IB_OPCODE(UD, SEND_ONLY_WITH_IMMEDIATE)
+	IB_OPCODE(UD, SEND_ONLY_WITH_IMMEDIATE),
+
+	/* XRC */
+	IB_OPCODE(XRC, SEND_FIRST),
+	IB_OPCODE(XRC, SEND_MIDDLE),
+	IB_OPCODE(XRC, SEND_LAST),
+	IB_OPCODE(XRC, SEND_LAST_WITH_IMMEDIATE),
+	IB_OPCODE(XRC, SEND_ONLY),
+	IB_OPCODE(XRC, SEND_ONLY_WITH_IMMEDIATE),
+	IB_OPCODE(XRC, RDMA_WRITE_FIRST),
+	IB_OPCODE(XRC, RDMA_WRITE_MIDDLE),
+	IB_OPCODE(XRC, RDMA_WRITE_LAST),
+	IB_OPCODE(XRC, RDMA_WRITE_LAST_WITH_IMMEDIATE),
+	IB_OPCODE(XRC, RDMA_WRITE_ONLY),
+	IB_OPCODE(XRC, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
+	IB_OPCODE(XRC, RDMA_READ_REQUEST),
+	IB_OPCODE(XRC, RDMA_READ_RESPONSE_FIRST),
+	IB_OPCODE(XRC, RDMA_READ_RESPONSE_MIDDLE),
+	IB_OPCODE(XRC, RDMA_READ_RESPONSE_LAST),
+	IB_OPCODE(XRC, RDMA_READ_RESPONSE_ONLY),
+	IB_OPCODE(XRC, ACKNOWLEDGE),
+	IB_OPCODE(XRC, ATOMIC_ACKNOWLEDGE),
+	IB_OPCODE(XRC, COMPARE_SWAP),
+	IB_OPCODE(XRC, FETCH_ADD),
+	IB_OPCODE(XRC, SEND_LAST_WITH_INVALIDATE),
+	IB_OPCODE(XRC, SEND_ONLY_WITH_INVALIDATE),
 };
 
 enum {
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH for-next 04/13] RDMA/rxe: Extend opcodes and headers to support xrc
  2022-09-17  3:10 [PATCH for-next 01/13] RDMA/rxe: Replace START->FIRST, END->LAST Bob Pearson
  2022-09-17  3:10 ` [PATCH for-next 02/13] RDMA/rxe: Move next_opcode() to rxe_opcode.c Bob Pearson
  2022-09-17  3:10 ` [PATCH for-next 03/13] RDMA: Add xrc opcodes to ib_pack.h Bob Pearson
@ 2022-09-17  3:10 ` Bob Pearson
  2022-09-17  3:10 ` [PATCH for-next 05/13] RDMA/rxe: Add xrc opcodes to next_opcode() Bob Pearson
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 10+ messages in thread
From: Bob Pearson @ 2022-09-17  3:10 UTC (permalink / raw)
  To: jgg, zyjzyj2000, lizhijian, linux-rdma; +Cc: Bob Pearson

Extend rxe_hdr.h to include the xrceth header and
extend opcode tables in rxe_opcode.c to support xrc operations
and qps.

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe_hdr.h    |  36 +++
 drivers/infiniband/sw/rxe/rxe_opcode.c | 379 +++++++++++++++++++++++--
 drivers/infiniband/sw/rxe/rxe_opcode.h |   4 +-
 3 files changed, 395 insertions(+), 24 deletions(-)

diff --git a/drivers/infiniband/sw/rxe/rxe_hdr.h b/drivers/infiniband/sw/rxe/rxe_hdr.h
index e432f9e37795..e947bcf75209 100644
--- a/drivers/infiniband/sw/rxe/rxe_hdr.h
+++ b/drivers/infiniband/sw/rxe/rxe_hdr.h
@@ -900,6 +900,41 @@ static inline void ieth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
 		rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey);
 }
 
+/******************************************************************************
+ * XRC Extended Transport Header
+ ******************************************************************************/
+struct rxe_xrceth {
+	__be32			srqn;
+};
+
+#define XRCETH_SRQN_MASK	(0x00ffffff)
+
+static inline u32 __xrceth_srqn(void *arg)
+{
+	struct rxe_xrceth *xrceth = arg;
+
+	return be32_to_cpu(xrceth->srqn);
+}
+
+static inline void __xrceth_set_srqn(void *arg, u32 srqn)
+{
+	struct rxe_xrceth *xrceth = arg;
+
+	xrceth->srqn = cpu_to_be32(srqn & XRCETH_SRQN_MASK);
+}
+
+static inline u32 xrceth_srqn(struct rxe_pkt_info *pkt)
+{
+	return __xrceth_srqn(pkt->hdr +
+		rxe_opcode[pkt->opcode].offset[RXE_XRCETH]);
+}
+
+static inline void xrceth_set_srqn(struct rxe_pkt_info *pkt, u32 srqn)
+{
+	__xrceth_set_srqn(pkt->hdr +
+		rxe_opcode[pkt->opcode].offset[RXE_XRCETH], srqn);
+}
+
 enum rxe_hdr_length {
 	RXE_BTH_BYTES		= sizeof(struct rxe_bth),
 	RXE_DETH_BYTES		= sizeof(struct rxe_deth),
@@ -909,6 +944,7 @@ enum rxe_hdr_length {
 	RXE_ATMACK_BYTES	= sizeof(struct rxe_atmack),
 	RXE_ATMETH_BYTES	= sizeof(struct rxe_atmeth),
 	RXE_IETH_BYTES		= sizeof(struct rxe_ieth),
+	RXE_XRCETH_BYTES	= sizeof(struct rxe_xrceth),
 	RXE_RDETH_BYTES		= sizeof(struct rxe_rdeth),
 };
 
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c
index 6b1a1f197c4d..4ae926a37ef8 100644
--- a/drivers/infiniband/sw/rxe/rxe_opcode.c
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.c
@@ -15,51 +15,58 @@ struct rxe_wr_opcode_info rxe_wr_opcode_info[] = {
 	[IB_WR_RDMA_WRITE]				= {
 		.name	= "IB_WR_RDMA_WRITE",
 		.mask	= {
-			[IB_QPT_RC]	= WR_INLINE_MASK | WR_WRITE_MASK,
-			[IB_QPT_UC]	= WR_INLINE_MASK | WR_WRITE_MASK,
+			[IB_QPT_RC]		= WR_INLINE_MASK | WR_WRITE_MASK,
+			[IB_QPT_UC]		= WR_INLINE_MASK | WR_WRITE_MASK,
+			[IB_QPT_XRC_INI]	= WR_INLINE_MASK | WR_WRITE_MASK,
 		},
 	},
 	[IB_WR_RDMA_WRITE_WITH_IMM]			= {
 		.name	= "IB_WR_RDMA_WRITE_WITH_IMM",
 		.mask	= {
-			[IB_QPT_RC]	= WR_INLINE_MASK | WR_WRITE_MASK,
-			[IB_QPT_UC]	= WR_INLINE_MASK | WR_WRITE_MASK,
+			[IB_QPT_RC]		= WR_INLINE_MASK | WR_WRITE_MASK,
+			[IB_QPT_UC]		= WR_INLINE_MASK | WR_WRITE_MASK,
+			[IB_QPT_XRC_INI]	= WR_INLINE_MASK | WR_WRITE_MASK,
 		},
 	},
 	[IB_WR_SEND]					= {
 		.name	= "IB_WR_SEND",
 		.mask	= {
-			[IB_QPT_GSI]	= WR_INLINE_MASK | WR_SEND_MASK,
-			[IB_QPT_RC]	= WR_INLINE_MASK | WR_SEND_MASK,
-			[IB_QPT_UC]	= WR_INLINE_MASK | WR_SEND_MASK,
-			[IB_QPT_UD]	= WR_INLINE_MASK | WR_SEND_MASK,
+			[IB_QPT_GSI]		= WR_INLINE_MASK | WR_SEND_MASK,
+			[IB_QPT_RC]		= WR_INLINE_MASK | WR_SEND_MASK,
+			[IB_QPT_UC]		= WR_INLINE_MASK | WR_SEND_MASK,
+			[IB_QPT_UD]		= WR_INLINE_MASK | WR_SEND_MASK,
+			[IB_QPT_XRC_INI]	= WR_INLINE_MASK | WR_SEND_MASK,
 		},
 	},
 	[IB_WR_SEND_WITH_IMM]				= {
 		.name	= "IB_WR_SEND_WITH_IMM",
 		.mask	= {
-			[IB_QPT_GSI]	= WR_INLINE_MASK | WR_SEND_MASK,
-			[IB_QPT_RC]	= WR_INLINE_MASK | WR_SEND_MASK,
-			[IB_QPT_UC]	= WR_INLINE_MASK | WR_SEND_MASK,
-			[IB_QPT_UD]	= WR_INLINE_MASK | WR_SEND_MASK,
+			[IB_QPT_GSI]		= WR_INLINE_MASK | WR_SEND_MASK,
+			[IB_QPT_RC]		= WR_INLINE_MASK | WR_SEND_MASK,
+			[IB_QPT_UC]		= WR_INLINE_MASK | WR_SEND_MASK,
+			[IB_QPT_UD]		= WR_INLINE_MASK | WR_SEND_MASK,
+			[IB_QPT_XRC_INI]	= WR_INLINE_MASK | WR_SEND_MASK,
 		},
 	},
 	[IB_WR_RDMA_READ]				= {
 		.name	= "IB_WR_RDMA_READ",
 		.mask	= {
-			[IB_QPT_RC]	= WR_READ_MASK,
+			[IB_QPT_RC]		= WR_READ_MASK,
+			[IB_QPT_XRC_INI]	= WR_READ_MASK,
 		},
 	},
 	[IB_WR_ATOMIC_CMP_AND_SWP]			= {
 		.name	= "IB_WR_ATOMIC_CMP_AND_SWP",
 		.mask	= {
-			[IB_QPT_RC]	= WR_ATOMIC_MASK,
+			[IB_QPT_RC]		= WR_ATOMIC_MASK,
+			[IB_QPT_XRC_INI]	= WR_ATOMIC_MASK,
 		},
 	},
 	[IB_WR_ATOMIC_FETCH_AND_ADD]			= {
 		.name	= "IB_WR_ATOMIC_FETCH_AND_ADD",
 		.mask	= {
-			[IB_QPT_RC]	= WR_ATOMIC_MASK,
+			[IB_QPT_RC]		= WR_ATOMIC_MASK,
+			[IB_QPT_XRC_INI]	= WR_ATOMIC_MASK,
 		},
 	},
 	[IB_WR_LSO]					= {
@@ -71,34 +78,39 @@ struct rxe_wr_opcode_info rxe_wr_opcode_info[] = {
 	[IB_WR_SEND_WITH_INV]				= {
 		.name	= "IB_WR_SEND_WITH_INV",
 		.mask	= {
-			[IB_QPT_RC]	= WR_INLINE_MASK | WR_SEND_MASK,
-			[IB_QPT_UC]	= WR_INLINE_MASK | WR_SEND_MASK,
-			[IB_QPT_UD]	= WR_INLINE_MASK | WR_SEND_MASK,
+			[IB_QPT_RC]		= WR_INLINE_MASK | WR_SEND_MASK,
+			[IB_QPT_UC]		= WR_INLINE_MASK | WR_SEND_MASK,
+			[IB_QPT_UD]		= WR_INLINE_MASK | WR_SEND_MASK,
+			[IB_QPT_XRC_INI]	= WR_INLINE_MASK | WR_SEND_MASK,
 		},
 	},
 	[IB_WR_RDMA_READ_WITH_INV]			= {
 		.name	= "IB_WR_RDMA_READ_WITH_INV",
 		.mask	= {
-			[IB_QPT_RC]	= WR_READ_MASK,
+			[IB_QPT_RC]		= WR_READ_MASK,
+			[IB_QPT_XRC_INI]	= WR_READ_MASK,
 		},
 	},
 	[IB_WR_LOCAL_INV]				= {
 		.name	= "IB_WR_LOCAL_INV",
 		.mask	= {
-			[IB_QPT_RC]	= WR_LOCAL_OP_MASK,
+			[IB_QPT_RC]		= WR_LOCAL_OP_MASK,
+			[IB_QPT_XRC_INI]	= WR_LOCAL_OP_MASK,
 		},
 	},
 	[IB_WR_REG_MR]					= {
 		.name	= "IB_WR_REG_MR",
 		.mask	= {
-			[IB_QPT_RC]	= WR_LOCAL_OP_MASK,
+			[IB_QPT_RC]		= WR_LOCAL_OP_MASK,
+			[IB_QPT_XRC_INI]	= WR_LOCAL_OP_MASK,
 		},
 	},
 	[IB_WR_BIND_MW]					= {
 		.name	= "IB_WR_BIND_MW",
 		.mask	= {
-			[IB_QPT_RC]	= WR_LOCAL_OP_MASK,
-			[IB_QPT_UC]	= WR_LOCAL_OP_MASK,
+			[IB_QPT_RC]		= WR_LOCAL_OP_MASK,
+			[IB_QPT_UC]		= WR_LOCAL_OP_MASK,
+			[IB_QPT_XRC_INI]	= WR_LOCAL_OP_MASK,
 		},
 	},
 };
@@ -918,6 +930,327 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 		}
 	},
 
+	/* XRC */
+	[IB_OPCODE_XRC_SEND_FIRST]			= {
+		.name	= "IB_OPCODE_XRC_SEND_FIRST",
+		.mask	= RXE_XRCETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+			  RXE_RWR_MASK | RXE_SEND_MASK | RXE_FIRST_MASK,
+		.length = RXE_BTH_BYTES + RXE_XRCETH_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_XRCETH]	= RXE_BTH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_SEND_MIDDLE]		= {
+		.name	= "IB_OPCODE_XRC_SEND_MIDDLE",
+		.mask	= RXE_XRCETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+			  RXE_SEND_MASK | RXE_MIDDLE_MASK,
+		.length = RXE_BTH_BYTES + RXE_XRCETH_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_XRCETH]	= RXE_BTH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_SEND_LAST]			= {
+		.name	= "IB_OPCODE_XRC_SEND_LAST",
+		.mask	= RXE_XRCETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+			  RXE_COMP_MASK | RXE_SEND_MASK | RXE_LAST_MASK,
+		.length = RXE_BTH_BYTES + RXE_XRCETH_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_XRCETH]	= RXE_BTH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_SEND_LAST_WITH_IMMEDIATE]		= {
+		.name	= "IB_OPCODE_XRC_SEND_LAST_WITH_IMMEDIATE",
+		.mask	= RXE_XRCETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+			  RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK |
+			  RXE_LAST_MASK,
+		.length = RXE_BTH_BYTES + RXE_XRCETH_BYTES + RXE_IMMDT_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_XRCETH]	= RXE_BTH_BYTES,
+			[RXE_IMMDT]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES +
+					  RXE_IMMDT_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_SEND_ONLY]			= {
+		.name	= "IB_OPCODE_XRC_SEND_ONLY",
+		.mask	= RXE_XRCETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+			  RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+			  RXE_ONLY_MASK,
+		.length = RXE_BTH_BYTES + RXE_XRCETH_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_XRCETH]	= RXE_BTH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_SEND_ONLY_WITH_IMMEDIATE]		= {
+		.name	= "IB_OPCODE_XRC_SEND_ONLY_WITH_IMMEDIATE",
+		.mask	= RXE_XRCETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+			  RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+			  RXE_SEND_MASK | RXE_ONLY_MASK,
+		.length = RXE_BTH_BYTES + RXE_XRCETH_BYTES + RXE_IMMDT_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_XRCETH]	= RXE_BTH_BYTES,
+			[RXE_IMMDT]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES +
+					  RXE_IMMDT_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_RDMA_WRITE_FIRST]		= {
+		.name	= "IB_OPCODE_XRC_RDMA_WRITE_FIRST",
+		.mask	= RXE_XRCETH_MASK | RXE_RETH_MASK | RXE_PAYLOAD_MASK |
+			  RXE_REQ_MASK | RXE_WRITE_MASK | RXE_FIRST_MASK,
+		.length = RXE_BTH_BYTES + RXE_XRCETH_BYTES + RXE_RETH_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_XRCETH]	= RXE_BTH_BYTES,
+			[RXE_RETH]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES +
+					  RXE_RETH_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_RDMA_WRITE_MIDDLE]		= {
+		.name	= "IB_OPCODE_XRC_RDMA_WRITE_MIDDLE",
+		.mask	= RXE_XRCETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+			  RXE_WRITE_MASK | RXE_MIDDLE_MASK,
+		.length = RXE_BTH_BYTES + RXE_XRCETH_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_XRCETH]	= RXE_BTH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_RDMA_WRITE_LAST]			= {
+		.name	= "IB_OPCODE_XRC_RDMA_WRITE_LAST",
+		.mask	= RXE_XRCETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+			  RXE_WRITE_MASK | RXE_LAST_MASK,
+		.length = RXE_BTH_BYTES + RXE_XRCETH_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_XRCETH]	= RXE_BTH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_RDMA_WRITE_LAST_WITH_IMMEDIATE]		= {
+		.name	= "IB_OPCODE_XRC_RDMA_WRITE_LAST_WITH_IMMEDIATE",
+		.mask	= RXE_XRCETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+			  RXE_REQ_MASK | RXE_WRITE_MASK | RXE_COMP_MASK |
+			  RXE_RWR_MASK | RXE_LAST_MASK,
+		.length = RXE_BTH_BYTES + RXE_XRCETH_BYTES + RXE_IMMDT_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_XRCETH]	= RXE_BTH_BYTES,
+			[RXE_IMMDT]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES +
+					  RXE_IMMDT_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_RDMA_WRITE_ONLY]			= {
+		.name	= "IB_OPCODE_XRC_RDMA_WRITE_ONLY",
+		.mask	= RXE_XRCETH_MASK | RXE_RETH_MASK | RXE_PAYLOAD_MASK |
+			  RXE_REQ_MASK | RXE_WRITE_MASK | RXE_ONLY_MASK,
+		.length = RXE_BTH_BYTES + RXE_XRCETH_BYTES + RXE_RETH_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_XRCETH]	= RXE_BTH_BYTES,
+			[RXE_RETH]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES +
+					  RXE_RETH_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_RDMA_WRITE_ONLY_WITH_IMMEDIATE]		= {
+		.name	= "IB_OPCODE_XRC_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
+		.mask	= RXE_XRCETH_MASK | RXE_RETH_MASK | RXE_IMMDT_MASK |
+			  RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+			  RXE_COMP_MASK | RXE_RWR_MASK | RXE_ONLY_MASK,
+		.length = RXE_BTH_BYTES + RXE_XRCETH_BYTES + RXE_IMMDT_BYTES +
+			  RXE_RETH_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_XRCETH]	= RXE_BTH_BYTES,
+			[RXE_RETH]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES,
+			[RXE_IMMDT]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES +
+					  RXE_RETH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES +
+					  RXE_RETH_BYTES +
+					  RXE_IMMDT_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_RDMA_READ_REQUEST]			= {
+		.name	= "IB_OPCODE_XRC_RDMA_READ_REQUEST",
+		.mask	= RXE_XRCETH_MASK | RXE_RETH_MASK | RXE_REQ_MASK |
+			  RXE_READ_MASK | RXE_ONLY_MASK,
+		.length = RXE_BTH_BYTES + RXE_XRCETH_BYTES + RXE_RETH_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_XRCETH]	= RXE_BTH_BYTES,
+			[RXE_RETH]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES +
+					  RXE_RETH_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_RDMA_READ_RESPONSE_FIRST]		= {
+		.name	= "IB_OPCODE_XRC_RDMA_READ_RESPONSE_FIRST",
+		.mask	= RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+			  RXE_FIRST_MASK,
+		.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_AETH]	= RXE_BTH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_AETH_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_RDMA_READ_RESPONSE_MIDDLE]		= {
+		.name	= "IB_OPCODE_XRC_RDMA_READ_RESPONSE_MIDDLE",
+		.mask	= RXE_PAYLOAD_MASK | RXE_ACK_MASK | RXE_MIDDLE_MASK,
+		.length = RXE_BTH_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_RDMA_READ_RESPONSE_LAST]		= {
+		.name	= "IB_OPCODE_XRC_RDMA_READ_RESPONSE_LAST",
+		.mask	= RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+			  RXE_LAST_MASK,
+		.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_AETH]	= RXE_BTH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_AETH_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_RDMA_READ_RESPONSE_ONLY]		= {
+		.name	= "IB_OPCODE_XRC_RDMA_READ_RESPONSE_ONLY",
+		.mask	= RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+			  RXE_ONLY_MASK,
+		.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_AETH]	= RXE_BTH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_AETH_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_ACKNOWLEDGE]			= {
+		.name	= "IB_OPCODE_XRC_ACKNOWLEDGE",
+		.mask	= RXE_AETH_MASK | RXE_ACK_MASK | RXE_ONLY_MASK,
+		.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_AETH]	= RXE_BTH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_AETH_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_ATOMIC_ACKNOWLEDGE]			= {
+		.name	= "IB_OPCODE_XRC_ATOMIC_ACKNOWLEDGE",
+		.mask	= RXE_AETH_MASK | RXE_ATMACK_MASK | RXE_ACK_MASK |
+			  RXE_ONLY_MASK,
+		.length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_AETH]	= RXE_BTH_BYTES,
+			[RXE_ATMACK]	= RXE_BTH_BYTES +
+					  RXE_AETH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_AETH_BYTES +
+					  RXE_ATMACK_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_COMPARE_SWAP]			= {
+		.name	= "IB_OPCODE_XRC_COMPARE_SWAP",
+		.mask	= RXE_XRCETH_MASK | RXE_ATMETH_MASK | RXE_REQ_MASK |
+			  RXE_ATOMIC_MASK | RXE_ONLY_MASK,
+		.length = RXE_BTH_BYTES + RXE_XRCETH_BYTES + RXE_ATMETH_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_XRCETH]	= RXE_BTH_BYTES,
+			[RXE_ATMETH]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES +
+					  RXE_ATMETH_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_FETCH_ADD]			= {
+		.name	= "IB_OPCODE_XRC_FETCH_ADD",
+		.mask	= RXE_XRCETH_MASK | RXE_ATMETH_MASK | RXE_REQ_MASK |
+			  RXE_ATOMIC_MASK | RXE_ONLY_MASK,
+		.length = RXE_BTH_BYTES + RXE_XRCETH_BYTES + RXE_ATMETH_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_XRCETH]	= RXE_BTH_BYTES,
+			[RXE_ATMETH]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES +
+					  RXE_ATMETH_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_SEND_LAST_WITH_INVALIDATE]		= {
+		.name	= "IB_OPCODE_XRC_SEND_LAST_WITH_INVALIDATE",
+		.mask	= RXE_XRCETH_MASK | RXE_IETH_MASK | RXE_PAYLOAD_MASK |
+			  RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK |
+			  RXE_LAST_MASK,
+		.length = RXE_BTH_BYTES + RXE_XRCETH_BYTES + RXE_IETH_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_XRCETH]	= RXE_BTH_BYTES,
+			[RXE_IETH]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES +
+					  RXE_IETH_BYTES,
+		}
+	},
+	[IB_OPCODE_XRC_SEND_ONLY_WITH_INVALIDATE]		= {
+		.name	= "IB_OPCODE_XRC_SEND_ONLY_INV",
+		.mask	= RXE_XRCETH_MASK | RXE_IETH_MASK | RXE_PAYLOAD_MASK |
+			  RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+			  RXE_SEND_MASK | RXE_ONLY_MASK,
+		.length = RXE_BTH_BYTES + RXE_XRCETH_BYTES + RXE_IETH_BYTES,
+		.offset = {
+			[RXE_BTH]	= 0,
+			[RXE_XRCETH]	= RXE_BTH_BYTES,
+			[RXE_IETH]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES +
+					  RXE_XRCETH_BYTES +
+					  RXE_IETH_BYTES,
+		}
+	},
 };
 
 static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.h b/drivers/infiniband/sw/rxe/rxe_opcode.h
index d2b6a8232e92..5528a47f0266 100644
--- a/drivers/infiniband/sw/rxe/rxe_opcode.h
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.h
@@ -30,7 +30,7 @@ enum rxe_wr_mask {
 
 struct rxe_wr_opcode_info {
 	char			*name;
-	enum rxe_wr_mask	mask[WR_MAX_QPT];
+	enum rxe_wr_mask	mask[IB_QPT_MAX];
 };
 
 extern struct rxe_wr_opcode_info rxe_wr_opcode_info[];
@@ -44,6 +44,7 @@ enum rxe_hdr_type {
 	RXE_ATMETH,
 	RXE_ATMACK,
 	RXE_IETH,
+	RXE_XRCETH,
 	RXE_RDETH,
 	RXE_DETH,
 	RXE_IMMDT,
@@ -61,6 +62,7 @@ enum rxe_hdr_mask {
 	RXE_ATMETH_MASK		= BIT(RXE_ATMETH),
 	RXE_ATMACK_MASK		= BIT(RXE_ATMACK),
 	RXE_IETH_MASK		= BIT(RXE_IETH),
+	RXE_XRCETH_MASK		= BIT(RXE_XRCETH),
 	RXE_RDETH_MASK		= BIT(RXE_RDETH),
 	RXE_DETH_MASK		= BIT(RXE_DETH),
 	RXE_PAYLOAD_MASK	= BIT(RXE_PAYLOAD),
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH for-next 05/13] RDMA/rxe: Add xrc opcodes to next_opcode()
  2022-09-17  3:10 [PATCH for-next 01/13] RDMA/rxe: Replace START->FIRST, END->LAST Bob Pearson
                   ` (2 preceding siblings ...)
  2022-09-17  3:10 ` [PATCH for-next 04/13] RDMA/rxe: Extend opcodes and headers to support xrc Bob Pearson
@ 2022-09-17  3:10 ` Bob Pearson
  2022-09-17  3:10 ` [PATCH for-next 06/13] RDMA/rxe: Implement open_xrcd and close_xrcd Bob Pearson
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 10+ messages in thread
From: Bob Pearson @ 2022-09-17  3:10 UTC (permalink / raw)
  To: jgg, zyjzyj2000, lizhijian, linux-rdma; +Cc: Bob Pearson

Extend next_opcode() to support xrc operations.

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe_opcode.c | 88 ++++++++++++++++++++++++++
 1 file changed, 88 insertions(+)

diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c
index 4ae926a37ef8..c2bac0ce444a 100644
--- a/drivers/infiniband/sw/rxe/rxe_opcode.c
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.c
@@ -1376,6 +1376,91 @@ static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
 	return -EINVAL;
 }
 
+static int next_opcode_xrc(struct rxe_qp *qp, u32 wr_opcode, int fits)
+{
+	switch (wr_opcode) {
+	case IB_WR_RDMA_WRITE:
+		if (qp->req.opcode == IB_OPCODE_XRC_RDMA_WRITE_FIRST ||
+		    qp->req.opcode == IB_OPCODE_XRC_RDMA_WRITE_MIDDLE)
+			return fits ?
+				IB_OPCODE_XRC_RDMA_WRITE_LAST :
+				IB_OPCODE_XRC_RDMA_WRITE_MIDDLE;
+		else
+			return fits ?
+				IB_OPCODE_XRC_RDMA_WRITE_ONLY :
+				IB_OPCODE_XRC_RDMA_WRITE_FIRST;
+
+	case IB_WR_RDMA_WRITE_WITH_IMM:
+		if (qp->req.opcode == IB_OPCODE_XRC_RDMA_WRITE_FIRST ||
+		    qp->req.opcode == IB_OPCODE_XRC_RDMA_WRITE_MIDDLE)
+			return fits ?
+				IB_OPCODE_XRC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
+				IB_OPCODE_XRC_RDMA_WRITE_MIDDLE;
+		else
+			return fits ?
+				IB_OPCODE_XRC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
+				IB_OPCODE_XRC_RDMA_WRITE_FIRST;
+
+	case IB_WR_SEND:
+		if (qp->req.opcode == IB_OPCODE_XRC_SEND_FIRST ||
+		    qp->req.opcode == IB_OPCODE_XRC_SEND_MIDDLE)
+			return fits ?
+				IB_OPCODE_XRC_SEND_LAST :
+				IB_OPCODE_XRC_SEND_MIDDLE;
+		else
+			return fits ?
+				IB_OPCODE_XRC_SEND_ONLY :
+				IB_OPCODE_XRC_SEND_FIRST;
+
+	case IB_WR_SEND_WITH_IMM:
+		if (qp->req.opcode == IB_OPCODE_XRC_SEND_FIRST ||
+		    qp->req.opcode == IB_OPCODE_XRC_SEND_MIDDLE)
+			return fits ?
+				IB_OPCODE_XRC_SEND_LAST_WITH_IMMEDIATE :
+				IB_OPCODE_XRC_SEND_MIDDLE;
+		else
+			return fits ?
+				IB_OPCODE_XRC_SEND_ONLY_WITH_IMMEDIATE :
+				IB_OPCODE_XRC_SEND_FIRST;
+
+	case IB_WR_RDMA_READ:
+		return IB_OPCODE_XRC_RDMA_READ_REQUEST;
+
+	case IB_WR_RDMA_READ_WITH_INV:
+		return IB_OPCODE_XRC_RDMA_READ_REQUEST;
+
+	case IB_WR_ATOMIC_CMP_AND_SWP:
+		return IB_OPCODE_XRC_COMPARE_SWAP;
+
+	case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+		return -EOPNOTSUPP;
+
+	case IB_WR_ATOMIC_FETCH_AND_ADD:
+		return IB_OPCODE_XRC_FETCH_ADD;
+
+	case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
+		return -EOPNOTSUPP;
+
+	case IB_WR_SEND_WITH_INV:
+		if (qp->req.opcode == IB_OPCODE_XRC_SEND_FIRST ||
+		    qp->req.opcode == IB_OPCODE_XRC_SEND_MIDDLE)
+			return fits ?
+				IB_OPCODE_XRC_SEND_LAST_WITH_INVALIDATE :
+				IB_OPCODE_XRC_SEND_MIDDLE;
+		else
+			return fits ?
+				IB_OPCODE_XRC_SEND_ONLY_WITH_INVALIDATE :
+				IB_OPCODE_XRC_SEND_FIRST;
+
+	case IB_WR_LOCAL_INV:
+	case IB_WR_REG_MR:
+	case IB_WR_BIND_MW:
+		return wr_opcode;
+	}
+
+	return -EINVAL;
+}
+
 int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, u32 opcode)
 {
 	int fits = (wqe->dma.resid <= qp->mtu);
@@ -1387,6 +1472,9 @@ int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, u32 opcode)
 	case IB_QPT_UC:
 		return next_opcode_uc(qp, opcode, fits);
 
+	case IB_QPT_XRC_INI:
+		return next_opcode_xrc(qp, opcode, fits);
+
 	case IB_QPT_UD:
 	case IB_QPT_GSI:
 		switch (opcode) {
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH for-next 06/13] RDMA/rxe: Implement open_xrcd and close_xrcd
  2022-09-17  3:10 [PATCH for-next 01/13] RDMA/rxe: Replace START->FIRST, END->LAST Bob Pearson
                   ` (3 preceding siblings ...)
  2022-09-17  3:10 ` [PATCH for-next 05/13] RDMA/rxe: Add xrc opcodes to next_opcode() Bob Pearson
@ 2022-09-17  3:10 ` Bob Pearson
  2022-09-17  3:10 ` [PATCH for-next 07/13] RDMA/rxe: Extend srq verbs to support xrcd Bob Pearson
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 10+ messages in thread
From: Bob Pearson @ 2022-09-17  3:10 UTC (permalink / raw)
  To: jgg, zyjzyj2000, lizhijian, linux-rdma; +Cc: Bob Pearson

Add rxe_open_xrcd() and rxe_close_xrcd() and add xrcd objects
to rxe object pools to implement ib_open_xrcd() and ib_close_xrcd().

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe.c       |  2 ++
 drivers/infiniband/sw/rxe/rxe_param.h |  3 +++
 drivers/infiniband/sw/rxe/rxe_pool.c  |  8 ++++++++
 drivers/infiniband/sw/rxe/rxe_pool.h  |  1 +
 drivers/infiniband/sw/rxe/rxe_verbs.c | 23 +++++++++++++++++++++++
 drivers/infiniband/sw/rxe/rxe_verbs.h | 11 +++++++++++
 6 files changed, 48 insertions(+)

diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 51daac5c4feb..acd22980836e 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -23,6 +23,7 @@ void rxe_dealloc(struct ib_device *ib_dev)
 	rxe_pool_cleanup(&rxe->uc_pool);
 	rxe_pool_cleanup(&rxe->pd_pool);
 	rxe_pool_cleanup(&rxe->ah_pool);
+	rxe_pool_cleanup(&rxe->xrcd_pool);
 	rxe_pool_cleanup(&rxe->srq_pool);
 	rxe_pool_cleanup(&rxe->qp_pool);
 	rxe_pool_cleanup(&rxe->cq_pool);
@@ -120,6 +121,7 @@ static void rxe_init_pools(struct rxe_dev *rxe)
 	rxe_pool_init(rxe, &rxe->uc_pool, RXE_TYPE_UC);
 	rxe_pool_init(rxe, &rxe->pd_pool, RXE_TYPE_PD);
 	rxe_pool_init(rxe, &rxe->ah_pool, RXE_TYPE_AH);
+	rxe_pool_init(rxe, &rxe->xrcd_pool, RXE_TYPE_XRCD);
 	rxe_pool_init(rxe, &rxe->srq_pool, RXE_TYPE_SRQ);
 	rxe_pool_init(rxe, &rxe->qp_pool, RXE_TYPE_QP);
 	rxe_pool_init(rxe, &rxe->cq_pool, RXE_TYPE_CQ);
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 86c7a8bf3cbb..fa4bf177e123 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -86,6 +86,9 @@ enum rxe_device_param {
 	RXE_MAX_QP_INDEX		= DEFAULT_MAX_VALUE,
 	RXE_MAX_QP			= DEFAULT_MAX_VALUE - RXE_MIN_QP_INDEX,
 
+	RXE_MIN_XRCD_INDEX		= 1,
+	RXE_MAX_XRCD_INDEX		= 128,
+	RXE_MAX_XRCD			= 128,
 	RXE_MIN_SRQ_INDEX		= 0x00020001,
 	RXE_MAX_SRQ_INDEX		= DEFAULT_MAX_VALUE,
 	RXE_MAX_SRQ			= DEFAULT_MAX_VALUE - RXE_MIN_SRQ_INDEX,
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index f50620f5a0a1..b54453b68169 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -42,6 +42,14 @@ static const struct rxe_type_info {
 		.max_index	= RXE_MAX_AH_INDEX,
 		.max_elem	= RXE_MAX_AH_INDEX - RXE_MIN_AH_INDEX + 1,
 	},
+	[RXE_TYPE_XRCD] = {
+		.name		= "xrcd",
+		.size		= sizeof(struct rxe_xrcd),
+		.elem_offset	= offsetof(struct rxe_xrcd, elem),
+		.min_index	= RXE_MIN_XRCD_INDEX,
+		.max_index	= RXE_MAX_XRCD_INDEX,
+		.max_elem	= RXE_MAX_XRCD_INDEX - RXE_MIN_XRCD_INDEX + 1,
+	},
 	[RXE_TYPE_SRQ] = {
 		.name		= "srq",
 		.size		= sizeof(struct rxe_srq),
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 9d83cb32092f..35ac0746a4b8 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -11,6 +11,7 @@ enum rxe_elem_type {
 	RXE_TYPE_UC,
 	RXE_TYPE_PD,
 	RXE_TYPE_AH,
+	RXE_TYPE_XRCD,
 	RXE_TYPE_SRQ,
 	RXE_TYPE_QP,
 	RXE_TYPE_CQ,
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 9ebe9decad34..4a5da079bf11 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -281,6 +281,26 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
 	return err;
 }
 
+static int rxe_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
+{
+	struct rxe_dev *rxe = to_rdev(ibxrcd->device);
+	struct rxe_xrcd *xrcd = to_rxrcd(ibxrcd);
+	int err;
+
+	err = rxe_add_to_pool(&rxe->xrcd_pool, xrcd);
+
+	return err;
+}
+
+static int rxe_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
+{
+	struct rxe_xrcd *xrcd = to_rxrcd(ibxrcd);
+
+	rxe_cleanup(xrcd);
+
+	return 0;
+}
+
 static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
 			  struct ib_udata *udata)
 {
@@ -1055,6 +1075,7 @@ static const struct ib_device_ops rxe_dev_ops = {
 	.alloc_mw = rxe_alloc_mw,
 	.alloc_pd = rxe_alloc_pd,
 	.alloc_ucontext = rxe_alloc_ucontext,
+	.alloc_xrcd = rxe_alloc_xrcd,
 	.attach_mcast = rxe_attach_mcast,
 	.create_ah = rxe_create_ah,
 	.create_cq = rxe_create_cq,
@@ -1065,6 +1086,7 @@ static const struct ib_device_ops rxe_dev_ops = {
 	.dealloc_mw = rxe_dealloc_mw,
 	.dealloc_pd = rxe_dealloc_pd,
 	.dealloc_ucontext = rxe_dealloc_ucontext,
+	.dealloc_xrcd = rxe_dealloc_xrcd,
 	.dereg_mr = rxe_dereg_mr,
 	.destroy_ah = rxe_destroy_ah,
 	.destroy_cq = rxe_destroy_cq,
@@ -1103,6 +1125,7 @@ static const struct ib_device_ops rxe_dev_ops = {
 	INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq),
 	INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
 	INIT_RDMA_OBJ_SIZE(ib_qp, rxe_qp, ibqp),
+	INIT_RDMA_OBJ_SIZE(ib_xrcd, rxe_xrcd, ibxrcd),
 	INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
 	INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
 	INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index a51819d0c345..6c4cfb802dd4 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -93,6 +93,11 @@ struct rxe_rq {
 	struct rxe_queue	*queue;
 };
 
+struct rxe_xrcd {
+	struct ib_xrcd		ibxrcd;
+	struct rxe_pool_elem	elem;
+};
+
 struct rxe_srq {
 	struct ib_srq		ibsrq;
 	struct rxe_pool_elem	elem;
@@ -383,6 +388,7 @@ struct rxe_dev {
 	struct rxe_pool		uc_pool;
 	struct rxe_pool		pd_pool;
 	struct rxe_pool		ah_pool;
+	struct rxe_pool		xrcd_pool;
 	struct rxe_pool		srq_pool;
 	struct rxe_pool		qp_pool;
 	struct rxe_pool		cq_pool;
@@ -432,6 +438,11 @@ static inline struct rxe_ah *to_rah(struct ib_ah *ah)
 	return ah ? container_of(ah, struct rxe_ah, ibah) : NULL;
 }
 
+static inline struct rxe_xrcd *to_rxrcd(struct ib_xrcd *ibxrcd)
+{
+	return ibxrcd ? container_of(ibxrcd, struct rxe_xrcd, ibxrcd) : NULL;
+}
+
 static inline struct rxe_srq *to_rsrq(struct ib_srq *srq)
 {
 	return srq ? container_of(srq, struct rxe_srq, ibsrq) : NULL;
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH for-next 07/13] RDMA/rxe: Extend srq verbs to support xrcd
  2022-09-17  3:10 [PATCH for-next 01/13] RDMA/rxe: Replace START->FIRST, END->LAST Bob Pearson
                   ` (4 preceding siblings ...)
  2022-09-17  3:10 ` [PATCH for-next 06/13] RDMA/rxe: Implement open_xrcd and close_xrcd Bob Pearson
@ 2022-09-17  3:10 ` Bob Pearson
  2022-09-17  3:10 ` [PATCH for-next 08/13] RDMA/rxe: Extend rxe_qp.c to support xrc qps Bob Pearson
  2022-09-17  3:10 ` [PATCH for-next 09/13] RDMA/rxe: Extend rxe_recv.c to support xrc Bob Pearson
  7 siblings, 0 replies; 10+ messages in thread
From: Bob Pearson @ 2022-09-17  3:10 UTC (permalink / raw)
  To: jgg, zyjzyj2000, lizhijian, linux-rdma; +Cc: Bob Pearson

Extend srq to support xrcd in create verb

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe_srq.c   | 131 ++++++++++++++------------
 drivers/infiniband/sw/rxe/rxe_verbs.c |  13 +--
 drivers/infiniband/sw/rxe/rxe_verbs.h |   8 +-
 include/uapi/rdma/rdma_user_rxe.h     |   4 +-
 4 files changed, 83 insertions(+), 73 deletions(-)

diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index 02b39498c370..fcd1a58c3900 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -11,61 +11,85 @@
 int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init)
 {
 	struct ib_srq_attr *attr = &init->attr;
+	int err = -EINVAL;
 
-	if (attr->max_wr > rxe->attr.max_srq_wr) {
-		pr_warn("max_wr(%d) > max_srq_wr(%d)\n",
-			attr->max_wr, rxe->attr.max_srq_wr);
-		goto err1;
+	if (init->srq_type == IB_SRQT_TM) {
+		err = -EOPNOTSUPP;
+		goto err_out;
 	}
 
-	if (attr->max_wr <= 0) {
-		pr_warn("max_wr(%d) <= 0\n", attr->max_wr);
-		goto err1;
+	if (init->srq_type == IB_SRQT_XRC) {
+		if (!init->ext.cq || !init->ext.xrc.xrcd)
+			goto err_out;
 	}
 
+	if (attr->max_wr > rxe->attr.max_srq_wr)
+		goto err_out;
+
+	if (attr->max_wr <= 0)
+		goto err_out;
+
 	if (attr->max_wr < RXE_MIN_SRQ_WR)
 		attr->max_wr = RXE_MIN_SRQ_WR;
 
-	if (attr->max_sge > rxe->attr.max_srq_sge) {
-		pr_warn("max_sge(%d) > max_srq_sge(%d)\n",
-			attr->max_sge, rxe->attr.max_srq_sge);
-		goto err1;
-	}
+	if (attr->max_sge > rxe->attr.max_srq_sge)
+		goto err_out;
 
 	if (attr->max_sge < RXE_MIN_SRQ_SGE)
 		attr->max_sge = RXE_MIN_SRQ_SGE;
 
 	return 0;
 
-err1:
-	return -EINVAL;
+err_out:
+	pr_debug("%s: failed err = %d\n", __func__, err);
+	return err;
 }
 
 int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
 		      struct ib_srq_init_attr *init, struct ib_udata *udata,
 		      struct rxe_create_srq_resp __user *uresp)
 {
-	int err;
-	int srq_wqe_size;
+	struct rxe_pd *pd = to_rpd(srq->ibsrq.pd);
+	struct rxe_cq *cq;
+	struct rxe_xrcd *xrcd;
 	struct rxe_queue *q;
-	enum queue_type type;
+	int srq_wqe_size;
+	int err;
+
+	rxe_get(pd);
+	srq->pd = pd;
 
 	srq->ibsrq.event_handler	= init->event_handler;
 	srq->ibsrq.srq_context		= init->srq_context;
 	srq->limit		= init->attr.srq_limit;
-	srq->srq_num		= srq->elem.index;
 	srq->rq.max_wr		= init->attr.max_wr;
 	srq->rq.max_sge		= init->attr.max_sge;
 
-	srq_wqe_size		= rcv_wqe_size(srq->rq.max_sge);
+	if (init->srq_type == IB_SRQT_XRC) {
+		cq = to_rcq(init->ext.cq);
+		if (cq) {
+			rxe_get(cq);
+			srq->cq = to_rcq(init->ext.cq);
+		} else {
+			return -EINVAL;
+		}
+		xrcd = to_rxrcd(init->ext.xrc.xrcd);
+		if (xrcd) {
+			rxe_get(xrcd);
+			srq->xrcd = to_rxrcd(init->ext.xrc.xrcd);
+		}
+		srq->ibsrq.ext.xrc.srq_num = srq->elem.index;
+	}
 
 	spin_lock_init(&srq->rq.producer_lock);
 	spin_lock_init(&srq->rq.consumer_lock);
 
-	type = QUEUE_TYPE_FROM_CLIENT;
-	q = rxe_queue_init(rxe, &srq->rq.max_wr, srq_wqe_size, type);
+	srq_wqe_size = rcv_wqe_size(srq->rq.max_sge);
+	q = rxe_queue_init(rxe, &srq->rq.max_wr, srq_wqe_size,
+			   QUEUE_TYPE_FROM_CLIENT);
 	if (!q) {
-		pr_warn("unable to allocate queue for srq\n");
+		pr_debug("%s: srq#%d: unable to allocate queue\n",
+				__func__, srq->elem.index);
 		return -ENOMEM;
 	}
 
@@ -79,66 +103,45 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
 		return err;
 	}
 
-	if (uresp) {
-		if (copy_to_user(&uresp->srq_num, &srq->srq_num,
-				 sizeof(uresp->srq_num))) {
-			rxe_queue_cleanup(q);
-			return -EFAULT;
-		}
-	}
-
 	return 0;
 }
 
 int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
 		     struct ib_srq_attr *attr, enum ib_srq_attr_mask mask)
 {
-	if (srq->error) {
-		pr_warn("srq in error state\n");
-		goto err1;
-	}
+	int err = -EINVAL;
+
+	if (srq->error)
+		goto err_out;
 
 	if (mask & IB_SRQ_MAX_WR) {
-		if (attr->max_wr > rxe->attr.max_srq_wr) {
-			pr_warn("max_wr(%d) > max_srq_wr(%d)\n",
-				attr->max_wr, rxe->attr.max_srq_wr);
-			goto err1;
-		}
+		if (attr->max_wr > rxe->attr.max_srq_wr)
+			goto err_out;
 
-		if (attr->max_wr <= 0) {
-			pr_warn("max_wr(%d) <= 0\n", attr->max_wr);
-			goto err1;
-		}
+		if (attr->max_wr <= 0)
+			goto err_out;
 
-		if (srq->limit && (attr->max_wr < srq->limit)) {
-			pr_warn("max_wr (%d) < srq->limit (%d)\n",
-				attr->max_wr, srq->limit);
-			goto err1;
-		}
+		if (srq->limit && (attr->max_wr < srq->limit))
+			goto err_out;
 
 		if (attr->max_wr < RXE_MIN_SRQ_WR)
 			attr->max_wr = RXE_MIN_SRQ_WR;
 	}
 
 	if (mask & IB_SRQ_LIMIT) {
-		if (attr->srq_limit > rxe->attr.max_srq_wr) {
-			pr_warn("srq_limit(%d) > max_srq_wr(%d)\n",
-				attr->srq_limit, rxe->attr.max_srq_wr);
-			goto err1;
-		}
+		if (attr->srq_limit > rxe->attr.max_srq_wr)
+			goto err_out;
 
-		if (attr->srq_limit > srq->rq.queue->buf->index_mask) {
-			pr_warn("srq_limit (%d) > cur limit(%d)\n",
-				attr->srq_limit,
-				srq->rq.queue->buf->index_mask);
-			goto err1;
-		}
+		if (attr->srq_limit > srq->rq.queue->buf->index_mask)
+			goto err_out;
 	}
 
 	return 0;
 
-err1:
-	return -EINVAL;
+err_out:
+	pr_debug("%s: srq#%d: failed err = %d\n", __func__,
+			srq->elem.index, err);
+	return err;
 }
 
 int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
@@ -182,6 +185,12 @@ void rxe_srq_cleanup(struct rxe_pool_elem *elem)
 	if (srq->pd)
 		rxe_put(srq->pd);
 
+	if (srq->cq)
+		rxe_put(srq->cq);
+
+	if (srq->xrcd)
+		rxe_put(srq->xrcd);
+
 	if (srq->rq.queue)
 		rxe_queue_cleanup(srq->rq.queue);
 }
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 4a5da079bf11..ef86f0c5890e 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -306,7 +306,6 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
 {
 	int err;
 	struct rxe_dev *rxe = to_rdev(ibsrq->device);
-	struct rxe_pd *pd = to_rpd(ibsrq->pd);
 	struct rxe_srq *srq = to_rsrq(ibsrq);
 	struct rxe_create_srq_resp __user *uresp = NULL;
 
@@ -316,9 +315,6 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
 		uresp = udata->outbuf;
 	}
 
-	if (init->srq_type != IB_SRQT_BASIC)
-		return -EOPNOTSUPP;
-
 	err = rxe_srq_chk_init(rxe, init);
 	if (err)
 		return err;
@@ -327,13 +323,11 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
 	if (err)
 		return err;
 
-	rxe_get(pd);
-	srq->pd = pd;
-
 	err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
 	if (err)
 		goto err_cleanup;
 
+	rxe_finalize(srq);
 	return 0;
 
 err_cleanup:
@@ -367,6 +361,7 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
 	err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata);
 	if (err)
 		return err;
+
 	return 0;
 }
 
@@ -380,6 +375,7 @@ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
 	attr->max_wr = srq->rq.queue->buf->index_mask;
 	attr->max_sge = srq->rq.max_sge;
 	attr->srq_limit = srq->limit;
+
 	return 0;
 }
 
@@ -546,7 +542,6 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
 			 const struct ib_send_wr *ibwr)
 {
 	wr->wr_id = ibwr->wr_id;
-	wr->num_sge = ibwr->num_sge;
 	wr->opcode = ibwr->opcode;
 	wr->send_flags = ibwr->send_flags;
 
@@ -628,6 +623,8 @@ static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
 		return;
 	}
 
+	wqe->dma.num_sge = ibwr->num_sge;
+
 	if (unlikely(ibwr->send_flags & IB_SEND_INLINE))
 		copy_inline_data_to_wqe(wqe, ibwr);
 	else
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 6c4cfb802dd4..7dab7fa3ba6c 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -102,13 +102,19 @@ struct rxe_srq {
 	struct ib_srq		ibsrq;
 	struct rxe_pool_elem	elem;
 	struct rxe_pd		*pd;
+	struct rxe_xrcd		*xrcd;		/* xrc only */
+	struct rxe_cq		*cq;		/* xrc only */
 	struct rxe_rq		rq;
-	u32			srq_num;
 
 	int			limit;
 	int			error;
 };
 
+static inline u32 srq_num(struct rxe_srq *srq)
+{
+	return srq->ibsrq.ext.xrc.srq_num;
+}
+
 enum rxe_qp_state {
 	QP_STATE_RESET,
 	QP_STATE_INIT,
diff --git a/include/uapi/rdma/rdma_user_rxe.h b/include/uapi/rdma/rdma_user_rxe.h
index f09c5c9e3dd5..514a1b6976fe 100644
--- a/include/uapi/rdma/rdma_user_rxe.h
+++ b/include/uapi/rdma/rdma_user_rxe.h
@@ -74,7 +74,7 @@ struct rxe_av {
 
 struct rxe_send_wr {
 	__aligned_u64		wr_id;
-	__u32			num_sge;
+	__u32			srq_num;	/* xrc only */
 	__u32			opcode;
 	__u32			send_flags;
 	union {
@@ -191,8 +191,6 @@ struct rxe_create_qp_resp {
 
 struct rxe_create_srq_resp {
 	struct mminfo mi;
-	__u32 srq_num;
-	__u32 reserved;
 };
 
 struct rxe_modify_srq_cmd {
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH for-next 08/13] RDMA/rxe: Extend rxe_qp.c to support xrc qps
  2022-09-17  3:10 [PATCH for-next 01/13] RDMA/rxe: Replace START->FIRST, END->LAST Bob Pearson
                   ` (5 preceding siblings ...)
  2022-09-17  3:10 ` [PATCH for-next 07/13] RDMA/rxe: Extend srq verbs to support xrcd Bob Pearson
@ 2022-09-17  3:10 ` Bob Pearson
  2022-09-17  3:10 ` [PATCH for-next 09/13] RDMA/rxe: Extend rxe_recv.c to support xrc Bob Pearson
  7 siblings, 0 replies; 10+ messages in thread
From: Bob Pearson @ 2022-09-17  3:10 UTC (permalink / raw)
  To: jgg, zyjzyj2000, lizhijian, linux-rdma; +Cc: Bob Pearson

Extend code in rxe_qp.c to support xrc qp types.

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe_av.c    |   3 +-
 drivers/infiniband/sw/rxe/rxe_loc.h   |   7 +-
 drivers/infiniband/sw/rxe/rxe_qp.c    | 307 +++++++++++++++-----------
 drivers/infiniband/sw/rxe/rxe_verbs.c |  22 +-
 drivers/infiniband/sw/rxe/rxe_verbs.h |   1 +
 5 files changed, 200 insertions(+), 140 deletions(-)

diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
index 3b05314ca739..c8f3ec53aa79 100644
--- a/drivers/infiniband/sw/rxe/rxe_av.c
+++ b/drivers/infiniband/sw/rxe/rxe_av.c
@@ -110,7 +110,8 @@ struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt, struct rxe_ah **ahp)
 	if (!pkt || !pkt->qp)
 		return NULL;
 
-	if (qp_type(pkt->qp) == IB_QPT_RC || qp_type(pkt->qp) == IB_QPT_UC)
+	if (qp_type(pkt->qp) == IB_QPT_RC || qp_type(pkt->qp) == IB_QPT_UC ||
+			qp_type(pkt->qp) == IB_QPT_XRC_INI)
 		return &pkt->qp->pri_av;
 
 	if (!pkt->wqe)
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 5526d83697c7..c6fb93a749ad 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -103,11 +103,12 @@ const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num);
 int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, u32 opcode);
 
 /* rxe_qp.c */
-int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);
-int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
+int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp *ibqp,
+		    struct ib_qp_init_attr *init);
+int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp,
 		     struct ib_qp_init_attr *init,
 		     struct rxe_create_qp_resp __user *uresp,
-		     struct ib_pd *ibpd, struct ib_udata *udata);
+		     struct ib_udata *udata);
 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init);
 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
 		    struct ib_qp_attr *attr, int mask);
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 1dcbeacb3122..6cbc842b8cbb 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -56,30 +56,42 @@ static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
 	return -EINVAL;
 }
 
-int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
+int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp *ibqp,
+		    struct ib_qp_init_attr *init)
 {
+	struct ib_pd *ibpd = ibqp->pd;
 	struct ib_qp_cap *cap = &init->cap;
 	struct rxe_port *port;
 	int port_num = init->port_num;
 
+	if (init->create_flags)
+		return -EOPNOTSUPP;
+
 	switch (init->qp_type) {
 	case IB_QPT_GSI:
 	case IB_QPT_RC:
 	case IB_QPT_UC:
 	case IB_QPT_UD:
+		if (!ibpd || !init->recv_cq || !init->send_cq)
+			return -EINVAL;
+		break;
+	case IB_QPT_XRC_INI:
+		if (!init->send_cq)
+			return -EINVAL;
+		break;
+	case IB_QPT_XRC_TGT:
+		if (!init->xrcd)
+			return -EINVAL;
 		break;
 	default:
 		return -EOPNOTSUPP;
 	}
 
-	if (!init->recv_cq || !init->send_cq) {
-		pr_warn("missing cq\n");
-		goto err1;
+	if (init->qp_type != IB_QPT_XRC_TGT) {
+		if (rxe_qp_chk_cap(rxe, cap, !!(init->srq || init->xrcd)))
+			goto err1;
 	}
 
-	if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
-		goto err1;
-
 	if (init->qp_type == IB_QPT_GSI) {
 		if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
 			pr_warn("invalid port = %d\n", port_num);
@@ -148,49 +160,83 @@ static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
 			     struct ib_qp_init_attr *init)
 {
-	struct rxe_port *port;
-	u32 qpn;
-
+	qp->ibqp.qp_type	= init->qp_type;
 	qp->sq_sig_type		= init->sq_sig_type;
 	qp->attr.path_mtu	= 1;
 	qp->mtu			= ib_mtu_enum_to_int(qp->attr.path_mtu);
 
-	qpn			= qp->elem.index;
-	port			= &rxe->port;
-
 	switch (init->qp_type) {
 	case IB_QPT_GSI:
 		qp->ibqp.qp_num		= 1;
-		port->qp_gsi_index	= qpn;
+		rxe->port.qp_gsi_index	= qp->elem.index;
 		qp->attr.port_num	= init->port_num;
 		break;
 
 	default:
-		qp->ibqp.qp_num		= qpn;
+		qp->ibqp.qp_num		= qp->elem.index;
 		break;
 	}
 
 	spin_lock_init(&qp->state_lock);
 
-	spin_lock_init(&qp->req.task.state_lock);
-	spin_lock_init(&qp->resp.task.state_lock);
-	spin_lock_init(&qp->comp.task.state_lock);
-
-	spin_lock_init(&qp->sq.sq_lock);
-	spin_lock_init(&qp->rq.producer_lock);
-	spin_lock_init(&qp->rq.consumer_lock);
-
 	atomic_set(&qp->ssn, 0);
 	atomic_set(&qp->skb_out, 0);
 }
 
+static int rxe_prepare_send_queue(struct rxe_dev *rxe, struct rxe_qp *qp,
+			struct ib_qp_init_attr *init, struct ib_udata *udata,
+			struct rxe_create_qp_resp __user *uresp)
+{
+	struct rxe_queue *q;
+	int wqe_size;
+	int err;
+
+	qp->sq.max_wr = init->cap.max_send_wr;
+
+	wqe_size = init->cap.max_send_sge*sizeof(struct ib_sge);
+	wqe_size = max_t(int, wqe_size, init->cap.max_inline_data);
+
+	qp->sq.max_sge = wqe_size/sizeof(struct ib_sge);
+	qp->sq.max_inline = wqe_size;
+	wqe_size += sizeof(struct rxe_send_wqe);
+
+	q = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size,
+			   QUEUE_TYPE_FROM_CLIENT);
+	if (!q)
+		return -ENOMEM;
+
+	err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
+			   q->buf, q->buf_size, &q->ip);
+
+	if (err) {
+		vfree(q->buf);
+		kfree(q);
+		return err;
+	}
+
+	init->cap.max_send_sge = qp->sq.max_sge;
+	init->cap.max_inline_data = qp->sq.max_inline;
+
+	qp->sq.queue = q;
+
+	return 0;
+}
+
 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
 			   struct ib_qp_init_attr *init, struct ib_udata *udata,
 			   struct rxe_create_qp_resp __user *uresp)
 {
 	int err;
-	int wqe_size;
-	enum queue_type type;
+
+	err = rxe_prepare_send_queue(rxe, qp, init, udata, uresp);
+	if (err)
+		return err;
+
+	spin_lock_init(&qp->sq.sq_lock);
+	spin_lock_init(&qp->req.task.state_lock);
+	spin_lock_init(&qp->comp.task.state_lock);
+
+	skb_queue_head_init(&qp->resp_pkts);
 
 	err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
 	if (err < 0)
@@ -205,32 +251,6 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
 	 * (0xc000 - 0xffff).
 	 */
 	qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff);
-	qp->sq.max_wr		= init->cap.max_send_wr;
-
-	/* These caps are limited by rxe_qp_chk_cap() done by the caller */
-	wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
-			 init->cap.max_inline_data);
-	qp->sq.max_sge = init->cap.max_send_sge =
-		wqe_size / sizeof(struct ib_sge);
-	qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
-	wqe_size += sizeof(struct rxe_send_wqe);
-
-	type = QUEUE_TYPE_FROM_CLIENT;
-	qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr,
-				wqe_size, type);
-	if (!qp->sq.queue)
-		return -ENOMEM;
-
-	err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
-			   qp->sq.queue->buf, qp->sq.queue->buf_size,
-			   &qp->sq.queue->ip);
-
-	if (err) {
-		vfree(qp->sq.queue->buf);
-		kfree(qp->sq.queue);
-		qp->sq.queue = NULL;
-		return err;
-	}
 
 	qp->req.wqe_index = queue_get_producer(qp->sq.queue,
 					       QUEUE_TYPE_FROM_CLIENT);
@@ -240,57 +260,71 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
 	qp->req.opcode		= -1;
 	qp->comp.opcode		= -1;
 
-	skb_queue_head_init(&qp->req_pkts);
-
 	rxe_init_task(&qp->req.task, qp,
 		      rxe_requester, "req");
 	rxe_init_task(&qp->comp.task, qp,
 		      rxe_completer, "comp");
 
 	qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
-	if (init->qp_type == IB_QPT_RC) {
+	if (init->qp_type == IB_QPT_RC || init->qp_type == IB_QPT_XRC_INI) {
 		timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
 		timer_setup(&qp->retrans_timer, retransmit_timer, 0);
 	}
 	return 0;
 }
 
+static int rxe_prepare_recv_queue(struct rxe_dev *rxe, struct rxe_qp *qp,
+			struct ib_qp_init_attr *init, struct ib_udata *udata,
+			struct rxe_create_qp_resp __user *uresp)
+{
+	struct rxe_queue *q;
+	int wqe_size;
+	int err;
+
+	qp->rq.max_wr = init->cap.max_recv_wr;
+	qp->rq.max_sge = init->cap.max_recv_sge;
+
+	wqe_size = sizeof(struct rxe_recv_wqe) +
+		   qp->rq.max_sge*sizeof(struct ib_sge);
+
+	q = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size,
+			   QUEUE_TYPE_FROM_CLIENT);
+	if (!q)
+		return -ENOMEM;
+
+	err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
+			   q->buf, q->buf_size, &q->ip);
+
+	if (err) {
+		vfree(q->buf);
+		kfree(q);
+		return err;
+	}
+
+	qp->rq.queue = q;
+
+	return 0;
+}
+
 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
 			    struct ib_qp_init_attr *init,
 			    struct ib_udata *udata,
 			    struct rxe_create_qp_resp __user *uresp)
 {
 	int err;
-	int wqe_size;
-	enum queue_type type;
 
-	if (!qp->srq) {
-		qp->rq.max_wr		= init->cap.max_recv_wr;
-		qp->rq.max_sge		= init->cap.max_recv_sge;
-
-		wqe_size = rcv_wqe_size(qp->rq.max_sge);
-
-		pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
-			 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
-
-		type = QUEUE_TYPE_FROM_CLIENT;
-		qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr,
-					wqe_size, type);
-		if (!qp->rq.queue)
-			return -ENOMEM;
-
-		err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
-				   qp->rq.queue->buf, qp->rq.queue->buf_size,
-				   &qp->rq.queue->ip);
-		if (err) {
-			vfree(qp->rq.queue->buf);
-			kfree(qp->rq.queue);
-			qp->rq.queue = NULL;
+	if (!qp->srq && qp_type(qp) != IB_QPT_XRC_TGT) {
+		err = rxe_prepare_recv_queue(rxe, qp, init, udata, uresp);
+		if (err)
 			return err;
-		}
+
+		spin_lock_init(&qp->rq.producer_lock);
+		spin_lock_init(&qp->rq.consumer_lock);
 	}
 
-	skb_queue_head_init(&qp->resp_pkts);
+	spin_lock_init(&qp->resp.task.state_lock);
+
+	skb_queue_head_init(&qp->req_pkts);
 
 	rxe_init_task(&qp->resp.task, qp,
 		      rxe_responder, "resp");
@@ -303,64 +337,82 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
 }
 
 /* called by the create qp verb */
-int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
+int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp,
 		     struct ib_qp_init_attr *init,
 		     struct rxe_create_qp_resp __user *uresp,
-		     struct ib_pd *ibpd,
 		     struct ib_udata *udata)
 {
 	int err;
+	struct rxe_pd *pd = to_rpd(qp->ibqp.pd);
 	struct rxe_cq *rcq = to_rcq(init->recv_cq);
 	struct rxe_cq *scq = to_rcq(init->send_cq);
-	struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
+	struct rxe_srq *srq = to_rsrq(init->srq);
+	struct rxe_xrcd *xrcd = to_rxrcd(init->xrcd);
 
-	rxe_get(pd);
-	rxe_get(rcq);
-	rxe_get(scq);
-	if (srq)
+	if (pd) {
+		rxe_get(pd);
+		qp->pd = pd;
+	}
+	if (rcq) {
+		rxe_get(rcq);
+		qp->rcq = rcq;
+		atomic_inc(&rcq->num_wq);
+	}
+	if (scq) {
+		rxe_get(scq);
+		qp->scq = scq;
+		atomic_inc(&scq->num_wq);
+	}
+	if (srq) {
 		rxe_get(srq);
-
-	qp->pd			= pd;
-	qp->rcq			= rcq;
-	qp->scq			= scq;
-	qp->srq			= srq;
-
-	atomic_inc(&rcq->num_wq);
-	atomic_inc(&scq->num_wq);
+		qp->srq = srq;
+	}
+	if (xrcd) {
+		rxe_get(xrcd);
+		qp->xrcd = xrcd;
+	}
 
 	rxe_qp_init_misc(rxe, qp, init);
 
-	err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
-	if (err)
-		goto err1;
+	switch (init->qp_type) {
+	case IB_QPT_RC:
+	case IB_QPT_UC:
+	case IB_QPT_GSI:
+	case IB_QPT_UD:
+		err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
+		if (err)
+			goto err_out;
 
-	err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
-	if (err)
-		goto err2;
+		err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
+		if (err)
+			goto err_unwind;
+		break;
+	case IB_QPT_XRC_INI:
+		err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
+		if (err)
+			goto err_out;
+		break;
+	case IB_QPT_XRC_TGT:
+		err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
+		if (err)
+			goto err_out;
+		break;
+	default:
+		/* not reached */
+		err = -EOPNOTSUPP;
+		goto err_out;
+	};
 
 	qp->attr.qp_state = IB_QPS_RESET;
 	qp->valid = 1;
 
 	return 0;
 
-err2:
+err_unwind:
 	rxe_queue_cleanup(qp->sq.queue);
 	qp->sq.queue = NULL;
-err1:
-	atomic_dec(&rcq->num_wq);
-	atomic_dec(&scq->num_wq);
-
-	qp->pd = NULL;
-	qp->rcq = NULL;
-	qp->scq = NULL;
-	qp->srq = NULL;
-
-	if (srq)
-		rxe_put(srq);
-	rxe_put(scq);
-	rxe_put(rcq);
-	rxe_put(pd);
-
+err_out:
+	/* rxe_qp_cleanup handles the rest */
 	return err;
 }
 
@@ -486,7 +538,8 @@ static void rxe_qp_reset(struct rxe_qp *qp)
 
 	/* stop request/comp */
 	if (qp->sq.queue) {
-		if (qp_type(qp) == IB_QPT_RC)
+		if (qp_type(qp) == IB_QPT_RC ||
+		    qp_type(qp) == IB_QPT_XRC_INI)
 			rxe_disable_task(&qp->comp.task);
 		rxe_disable_task(&qp->req.task);
 	}
@@ -530,7 +583,8 @@ static void rxe_qp_reset(struct rxe_qp *qp)
 	rxe_enable_task(&qp->resp.task);
 
 	if (qp->sq.queue) {
-		if (qp_type(qp) == IB_QPT_RC)
+		if (qp_type(qp) == IB_QPT_RC ||
+		    qp_type(qp) == IB_QPT_XRC_INI)
 			rxe_enable_task(&qp->comp.task);
 
 		rxe_enable_task(&qp->req.task);
@@ -543,7 +597,8 @@ static void rxe_qp_drain(struct rxe_qp *qp)
 	if (qp->sq.queue) {
 		if (qp->req.state != QP_STATE_DRAINED) {
 			qp->req.state = QP_STATE_DRAIN;
-			if (qp_type(qp) == IB_QPT_RC)
+			if (qp_type(qp) == IB_QPT_RC ||
+			    qp_type(qp) == IB_QPT_XRC_INI)
 				rxe_run_task(&qp->comp.task, 1);
 			else
 				__rxe_do_task(&qp->comp.task);
@@ -563,7 +618,7 @@ void rxe_qp_error(struct rxe_qp *qp)
 	/* drain work and packet queues */
 	rxe_run_task(&qp->resp.task, 1);
 
-	if (qp_type(qp) == IB_QPT_RC)
+	if (qp_type(qp) == IB_QPT_RC || qp_type(qp) == IB_QPT_XRC_INI)
 		rxe_run_task(&qp->comp.task, 1);
 	else
 		__rxe_do_task(&qp->comp.task);
@@ -673,7 +728,8 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
 		qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
 		qp->req.psn = qp->attr.sq_psn;
 		qp->comp.psn = qp->attr.sq_psn;
-		pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
+		pr_debug("qp#%d set req psn = %d comp psn = %d\n", qp_num(qp),
+				qp->req.psn, qp->comp.psn);
 	}
 
 	if (mask & IB_QP_PATH_MIG_STATE)
@@ -788,7 +844,7 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
 	qp->qp_timeout_jiffies = 0;
 	rxe_cleanup_task(&qp->resp.task);
 
-	if (qp_type(qp) == IB_QPT_RC) {
+	if (qp_type(qp) == IB_QPT_RC || qp_type(qp) == IB_QPT_XRC_INI) {
 		del_timer_sync(&qp->retrans_timer);
 		del_timer_sync(&qp->rnr_nak_timer);
 	}
@@ -808,6 +864,9 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
 	if (qp->sq.queue)
 		rxe_queue_cleanup(qp->sq.queue);
 
+	if (qp->xrcd)
+		rxe_put(qp->xrcd);
+
 	if (qp->srq)
 		rxe_put(qp->srq);
 
@@ -830,7 +889,7 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
 	if (qp->resp.mr)
 		rxe_put(qp->resp.mr);
 
-	if (qp_type(qp) == IB_QPT_RC)
+	if (qp_type(qp) == IB_QPT_RC || qp_type(qp) == IB_QPT_XRC_INI)
 		sk_dst_reset(qp->sk->sk);
 
 	free_rd_atomic_resources(qp);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index ef86f0c5890e..59ba11e52bac 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -416,7 +416,6 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
 {
 	int err;
 	struct rxe_dev *rxe = to_rdev(ibqp->device);
-	struct rxe_pd *pd = to_rpd(ibqp->pd);
 	struct rxe_qp *qp = to_rqp(ibqp);
 	struct rxe_create_qp_resp __user *uresp = NULL;
 
@@ -424,16 +423,7 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
 		if (udata->outlen < sizeof(*uresp))
 			return -EINVAL;
 		uresp = udata->outbuf;
-	}
-
-	if (init->create_flags)
-		return -EOPNOTSUPP;
 
-	err = rxe_qp_chk_init(rxe, init);
-	if (err)
-		return err;
-
-	if (udata) {
 		if (udata->inlen)
 			return -EINVAL;
 
@@ -442,11 +432,15 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
 		qp->is_user = false;
 	}
 
+	err = rxe_qp_chk_init(rxe, ibqp, init);
+	if (err)
+		return err;
+
 	err = rxe_add_to_pool(&rxe->qp_pool, qp);
 	if (err)
 		return err;
 
-	err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata);
+	err = rxe_qp_from_init(rxe, qp, init, uresp, udata);
 	if (err)
 		goto qp_init;
 
@@ -517,6 +511,9 @@ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
 	int num_sge = ibwr->num_sge;
 	struct rxe_sq *sq = &qp->sq;
 
+	if (unlikely(qp_type(qp) == IB_QPT_XRC_TGT))
+		return -EOPNOTSUPP;
+
 	if (unlikely(num_sge > sq->max_sge))
 		goto err1;
 
@@ -740,8 +737,9 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
 		/* Utilize process context to do protocol processing */
 		rxe_run_task(&qp->req.task, 0);
 		return 0;
-	} else
+	} else {
 		return rxe_post_send_kernel(qp, wr, bad_wr);
+	}
 }
 
 static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 7dab7fa3ba6c..ee482a0569b8 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -230,6 +230,7 @@ struct rxe_qp {
 	struct rxe_srq		*srq;
 	struct rxe_cq		*scq;
 	struct rxe_cq		*rcq;
+	struct rxe_xrcd		*xrcd;
 
 	enum ib_sig_type	sq_sig_type;
 
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH for-next 09/13] RDMA/rxe: Extend rxe_recv.c to support xrc
  2022-09-17  3:10 [PATCH for-next 01/13] RDMA/rxe: Replace START->FIRST, END->LAST Bob Pearson
                   ` (6 preceding siblings ...)
  2022-09-17  3:10 ` [PATCH for-next 08/13] RDMA/rxe: Extend rxe_qp.c to support xrc qps Bob Pearson
@ 2022-09-17  3:10 ` Bob Pearson
  7 siblings, 0 replies; 10+ messages in thread
From: Bob Pearson @ 2022-09-17  3:10 UTC (permalink / raw)
  To: jgg, zyjzyj2000, lizhijian, linux-rdma; +Cc: Bob Pearson

Extend rxe_recv.c to support xrc packets. Add checks for qp type
and check qp->xrcd matches srq->xrcd.

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe_hdr.h  |  5 +-
 drivers/infiniband/sw/rxe/rxe_recv.c | 79 +++++++++++++++++++++-------
 2 files changed, 63 insertions(+), 21 deletions(-)

diff --git a/drivers/infiniband/sw/rxe/rxe_hdr.h b/drivers/infiniband/sw/rxe/rxe_hdr.h
index e947bcf75209..fb9959d91b8d 100644
--- a/drivers/infiniband/sw/rxe/rxe_hdr.h
+++ b/drivers/infiniband/sw/rxe/rxe_hdr.h
@@ -14,7 +14,10 @@
 struct rxe_pkt_info {
 	struct rxe_dev		*rxe;		/* device that owns packet */
 	struct rxe_qp		*qp;		/* qp that owns packet */
-	struct rxe_send_wqe	*wqe;		/* send wqe */
+	union {
+		struct rxe_send_wqe	*wqe;	/* send wqe */
+		struct rxe_srq		*srq;	/* srq for recvd xrc packets */
+	};
 	u8			*hdr;		/* points to bth */
 	u32			mask;		/* useful info about pkt */
 	u32			psn;		/* bth psn of packet */
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index f3ad7b6dbd97..4f35757d3c52 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -13,49 +13,51 @@
 static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
 			    struct rxe_qp *qp)
 {
-	unsigned int pkt_type;
+	unsigned int pkt_type = pkt->opcode & IB_OPCODE_TYPE;
 
 	if (unlikely(!qp->valid))
-		goto err1;
+		goto err_out;
 
-	pkt_type = pkt->opcode & 0xe0;
 
 	switch (qp_type(qp)) {
 	case IB_QPT_RC:
-		if (unlikely(pkt_type != IB_OPCODE_RC)) {
-			pr_warn_ratelimited("bad qp type\n");
-			goto err1;
-		}
+		if (unlikely(pkt_type != IB_OPCODE_RC))
+			goto err_out;
 		break;
 	case IB_QPT_UC:
-		if (unlikely(pkt_type != IB_OPCODE_UC)) {
-			pr_warn_ratelimited("bad qp type\n");
-			goto err1;
-		}
+		if (unlikely(pkt_type != IB_OPCODE_UC))
+			goto err_out;
 		break;
 	case IB_QPT_UD:
 	case IB_QPT_GSI:
-		if (unlikely(pkt_type != IB_OPCODE_UD)) {
-			pr_warn_ratelimited("bad qp type\n");
-			goto err1;
-		}
+		if (unlikely(pkt_type != IB_OPCODE_UD))
+			goto err_out;
+		break;
+	case IB_QPT_XRC_INI:
+		if (unlikely(pkt_type != IB_OPCODE_XRC))
+			goto err_out;
+		break;
+	case IB_QPT_XRC_TGT:
+		if (unlikely(pkt_type != IB_OPCODE_XRC))
+			goto err_out;
 		break;
 	default:
-		pr_warn_ratelimited("unsupported qp type\n");
-		goto err1;
+		goto err_out;
 	}
 
 	if (pkt->mask & RXE_REQ_MASK) {
 		if (unlikely(qp->resp.state != QP_STATE_READY))
-			goto err1;
+			goto err_out;
 	} else if (unlikely(qp->req.state < QP_STATE_READY ||
 				qp->req.state > QP_STATE_DRAINED)) {
-		goto err1;
+		goto err_out;
 	}
 
 	return 0;
 
-err1:
+err_out:
+	pr_debug("%s: failed qp#%d: opcode = 0x%02x\n", __func__,
+			qp->elem.index, pkt->opcode);
 	return -EINVAL;
 }
 
@@ -166,6 +168,37 @@ static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
 	return -EINVAL;
 }
 
+static int check_xrcd(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+		      struct rxe_qp *qp)
+{
+	int err;
+
+	struct rxe_xrcd *xrcd = qp->xrcd;
+	u32 srqn = xrceth_srqn(pkt);
+	struct rxe_srq *srq;
+
+	srq = rxe_pool_get_index(&rxe->srq_pool, srqn);
+	if (unlikely(!srq)) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	if (unlikely(srq->xrcd != xrcd)) {
+		rxe_put(srq);
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	pkt->srq = srq;
+
+	return 0;
+
+err_out:
+	pr_debug("%s: qp#%d: failed err = %d\n", __func__,
+			qp->elem.index, err);
+	return err;
+}
+
 static int hdr_check(struct rxe_pkt_info *pkt)
 {
 	struct rxe_dev *rxe = pkt->rxe;
@@ -205,6 +238,12 @@ static int hdr_check(struct rxe_pkt_info *pkt)
 		err = check_keys(rxe, pkt, qpn, qp);
 		if (unlikely(err))
 			goto err2;
+
+		if (qp_type(qp) == IB_QPT_XRC_TGT) {
+			err = check_xrcd(rxe, pkt, qp);
+			if (unlikely(err))
+				goto err2;
+		}
 	} else {
 		if (unlikely((pkt->mask & RXE_GRH_MASK) == 0)) {
 			pr_warn_ratelimited("no grh for mcast qpn\n");
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH for-next 02/13] RDMA/rxe: Move next_opcode() to rxe_opcode.c
  2022-09-17  3:10 [PATCH for-next 00/13] Implement the xrc transport Bob Pearson
@ 2022-09-17  3:10 ` Bob Pearson
  0 siblings, 0 replies; 10+ messages in thread
From: Bob Pearson @ 2022-09-17  3:10 UTC (permalink / raw)
  To: jgg, zyjzyj2000, lizhijian, linux-rdma; +Cc: Bob Pearson

Move next_opcode() from rxe_req.c to rxe_opcode.c.

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe_loc.h    |   3 +
 drivers/infiniband/sw/rxe/rxe_opcode.c | 156 ++++++++++++++++++++++++-
 drivers/infiniband/sw/rxe/rxe_req.c    | 156 -------------------------
 3 files changed, 157 insertions(+), 158 deletions(-)

diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 22f6cc31d1d6..5526d83697c7 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -99,6 +99,9 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
 		    struct sk_buff *skb);
 const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num);
 
+/* opcode.c */
+int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, u32 opcode);
+
 /* rxe_qp.c */
 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);
 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c
index 0ea587c15931..6b1a1f197c4d 100644
--- a/drivers/infiniband/sw/rxe/rxe_opcode.c
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.c
@@ -5,8 +5,8 @@
  */
 
 #include <rdma/ib_pack.h>
-#include "rxe_opcode.h"
-#include "rxe_hdr.h"
+
+#include "rxe.h"
 
 /* useful information about work request opcodes and pkt opcodes in
  * table form
@@ -919,3 +919,155 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 	},
 
 };
+
+static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
+{
+	switch (opcode) {
+	case IB_WR_RDMA_WRITE:
+		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
+		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
+			return fits ?
+				IB_OPCODE_RC_RDMA_WRITE_LAST :
+				IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
+		else
+			return fits ?
+				IB_OPCODE_RC_RDMA_WRITE_ONLY :
+				IB_OPCODE_RC_RDMA_WRITE_FIRST;
+
+	case IB_WR_RDMA_WRITE_WITH_IMM:
+		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
+		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
+			return fits ?
+				IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
+				IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
+		else
+			return fits ?
+				IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
+				IB_OPCODE_RC_RDMA_WRITE_FIRST;
+
+	case IB_WR_SEND:
+		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
+		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
+			return fits ?
+				IB_OPCODE_RC_SEND_LAST :
+				IB_OPCODE_RC_SEND_MIDDLE;
+		else
+			return fits ?
+				IB_OPCODE_RC_SEND_ONLY :
+				IB_OPCODE_RC_SEND_FIRST;
+
+	case IB_WR_SEND_WITH_IMM:
+		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
+		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
+			return fits ?
+				IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
+				IB_OPCODE_RC_SEND_MIDDLE;
+		else
+			return fits ?
+				IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
+				IB_OPCODE_RC_SEND_FIRST;
+
+	case IB_WR_RDMA_READ:
+		return IB_OPCODE_RC_RDMA_READ_REQUEST;
+
+	case IB_WR_ATOMIC_CMP_AND_SWP:
+		return IB_OPCODE_RC_COMPARE_SWAP;
+
+	case IB_WR_ATOMIC_FETCH_AND_ADD:
+		return IB_OPCODE_RC_FETCH_ADD;
+
+	case IB_WR_SEND_WITH_INV:
+		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
+		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
+			return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
+				IB_OPCODE_RC_SEND_MIDDLE;
+		else
+			return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
+				IB_OPCODE_RC_SEND_FIRST;
+	case IB_WR_REG_MR:
+	case IB_WR_LOCAL_INV:
+		return opcode;
+	}
+
+	return -EINVAL;
+}
+
+static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
+{
+	switch (opcode) {
+	case IB_WR_RDMA_WRITE:
+		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
+		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
+			return fits ?
+				IB_OPCODE_UC_RDMA_WRITE_LAST :
+				IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
+		else
+			return fits ?
+				IB_OPCODE_UC_RDMA_WRITE_ONLY :
+				IB_OPCODE_UC_RDMA_WRITE_FIRST;
+
+	case IB_WR_RDMA_WRITE_WITH_IMM:
+		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
+		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
+			return fits ?
+				IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
+				IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
+		else
+			return fits ?
+				IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
+				IB_OPCODE_UC_RDMA_WRITE_FIRST;
+
+	case IB_WR_SEND:
+		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
+		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
+			return fits ?
+				IB_OPCODE_UC_SEND_LAST :
+				IB_OPCODE_UC_SEND_MIDDLE;
+		else
+			return fits ?
+				IB_OPCODE_UC_SEND_ONLY :
+				IB_OPCODE_UC_SEND_FIRST;
+
+	case IB_WR_SEND_WITH_IMM:
+		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
+		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
+			return fits ?
+				IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
+				IB_OPCODE_UC_SEND_MIDDLE;
+		else
+			return fits ?
+				IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
+				IB_OPCODE_UC_SEND_FIRST;
+	}
+
+	return -EINVAL;
+}
+
+int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, u32 opcode)
+{
+	int fits = (wqe->dma.resid <= qp->mtu);
+
+	switch (qp_type(qp)) {
+	case IB_QPT_RC:
+		return next_opcode_rc(qp, opcode, fits);
+
+	case IB_QPT_UC:
+		return next_opcode_uc(qp, opcode, fits);
+
+	case IB_QPT_UD:
+	case IB_QPT_GSI:
+		switch (opcode) {
+		case IB_WR_SEND:
+			return IB_OPCODE_UD_SEND_ONLY;
+
+		case IB_WR_SEND_WITH_IMM:
+			return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index e136abc802af..d2a9abfed596 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -11,9 +11,6 @@
 #include "rxe_loc.h"
 #include "rxe_queue.h"
 
-static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
-		       u32 opcode);
-
 static inline void retry_first_write_send(struct rxe_qp *qp,
 					  struct rxe_send_wqe *wqe, int npsn)
 {
@@ -194,159 +191,6 @@ static int rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
 		atomic_read(&qp->req.rd_atomic) != qp->attr.max_rd_atomic;
 }
 
-static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
-{
-	switch (opcode) {
-	case IB_WR_RDMA_WRITE:
-		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
-		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
-			return fits ?
-				IB_OPCODE_RC_RDMA_WRITE_LAST :
-				IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
-		else
-			return fits ?
-				IB_OPCODE_RC_RDMA_WRITE_ONLY :
-				IB_OPCODE_RC_RDMA_WRITE_FIRST;
-
-	case IB_WR_RDMA_WRITE_WITH_IMM:
-		if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
-		    qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
-			return fits ?
-				IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
-				IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
-		else
-			return fits ?
-				IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
-				IB_OPCODE_RC_RDMA_WRITE_FIRST;
-
-	case IB_WR_SEND:
-		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
-		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
-			return fits ?
-				IB_OPCODE_RC_SEND_LAST :
-				IB_OPCODE_RC_SEND_MIDDLE;
-		else
-			return fits ?
-				IB_OPCODE_RC_SEND_ONLY :
-				IB_OPCODE_RC_SEND_FIRST;
-
-	case IB_WR_SEND_WITH_IMM:
-		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
-		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
-			return fits ?
-				IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
-				IB_OPCODE_RC_SEND_MIDDLE;
-		else
-			return fits ?
-				IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
-				IB_OPCODE_RC_SEND_FIRST;
-
-	case IB_WR_RDMA_READ:
-		return IB_OPCODE_RC_RDMA_READ_REQUEST;
-
-	case IB_WR_ATOMIC_CMP_AND_SWP:
-		return IB_OPCODE_RC_COMPARE_SWAP;
-
-	case IB_WR_ATOMIC_FETCH_AND_ADD:
-		return IB_OPCODE_RC_FETCH_ADD;
-
-	case IB_WR_SEND_WITH_INV:
-		if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
-		    qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
-			return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
-				IB_OPCODE_RC_SEND_MIDDLE;
-		else
-			return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
-				IB_OPCODE_RC_SEND_FIRST;
-	case IB_WR_REG_MR:
-	case IB_WR_LOCAL_INV:
-		return opcode;
-	}
-
-	return -EINVAL;
-}
-
-static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
-{
-	switch (opcode) {
-	case IB_WR_RDMA_WRITE:
-		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
-		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
-			return fits ?
-				IB_OPCODE_UC_RDMA_WRITE_LAST :
-				IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
-		else
-			return fits ?
-				IB_OPCODE_UC_RDMA_WRITE_ONLY :
-				IB_OPCODE_UC_RDMA_WRITE_FIRST;
-
-	case IB_WR_RDMA_WRITE_WITH_IMM:
-		if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
-		    qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
-			return fits ?
-				IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
-				IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
-		else
-			return fits ?
-				IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
-				IB_OPCODE_UC_RDMA_WRITE_FIRST;
-
-	case IB_WR_SEND:
-		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
-		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
-			return fits ?
-				IB_OPCODE_UC_SEND_LAST :
-				IB_OPCODE_UC_SEND_MIDDLE;
-		else
-			return fits ?
-				IB_OPCODE_UC_SEND_ONLY :
-				IB_OPCODE_UC_SEND_FIRST;
-
-	case IB_WR_SEND_WITH_IMM:
-		if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
-		    qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
-			return fits ?
-				IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
-				IB_OPCODE_UC_SEND_MIDDLE;
-		else
-			return fits ?
-				IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
-				IB_OPCODE_UC_SEND_FIRST;
-	}
-
-	return -EINVAL;
-}
-
-static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
-		       u32 opcode)
-{
-	int fits = (wqe->dma.resid <= qp->mtu);
-
-	switch (qp_type(qp)) {
-	case IB_QPT_RC:
-		return next_opcode_rc(qp, opcode, fits);
-
-	case IB_QPT_UC:
-		return next_opcode_uc(qp, opcode, fits);
-
-	case IB_QPT_UD:
-	case IB_QPT_GSI:
-		switch (opcode) {
-		case IB_WR_SEND:
-			return IB_OPCODE_UD_SEND_ONLY;
-
-		case IB_WR_SEND_WITH_IMM:
-			return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
-		}
-		break;
-
-	default:
-		break;
-	}
-
-	return -EINVAL;
-}
-
 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
 {
 	int depth;
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2022-09-17  3:11 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-09-17  3:10 [PATCH for-next 01/13] RDMA/rxe: Replace START->FIRST, END->LAST Bob Pearson
2022-09-17  3:10 ` [PATCH for-next 02/13] RDMA/rxe: Move next_opcode() to rxe_opcode.c Bob Pearson
2022-09-17  3:10 ` [PATCH for-next 03/13] RDMA: Add xrc opcodes to ib_pack.h Bob Pearson
2022-09-17  3:10 ` [PATCH for-next 04/13] RDMA/rxe: Extend opcodes and headers to support xrc Bob Pearson
2022-09-17  3:10 ` [PATCH for-next 05/13] RDMA/rxe: Add xrc opcodes to next_opcode() Bob Pearson
2022-09-17  3:10 ` [PATCH for-next 06/13] RDMA/rxe: Implement open_xrcd and close_xrcd Bob Pearson
2022-09-17  3:10 ` [PATCH for-next 07/13] RDMA/rxe: Extend srq verbs to support xrcd Bob Pearson
2022-09-17  3:10 ` [PATCH for-next 08/13] RDMA/rxe: Extend rxe_qp.c to support xrc qps Bob Pearson
2022-09-17  3:10 ` [PATCH for-next 09/13] RDMA/rxe: Extend rxe_recv.c to support xrc Bob Pearson
2022-09-17  3:10 [PATCH for-next 00/13] Implement the xrc transport Bob Pearson
2022-09-17  3:10 ` [PATCH for-next 02/13] RDMA/rxe: Move next_opcode() to rxe_opcode.c Bob Pearson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.