linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Bob Pearson <rpearsonhpe@gmail.com>
To: jgg@nvidia.com, zyjzyj2000@gmail.com, linux-rdma@vger.kernel.org
Cc: Bob Pearson <rpearsonhpe@gmail.com>
Subject: [PATCH for-next 09/13] RDMA/rxe: Extend SRQs to support extensions
Date: Thu, 29 Jul 2021 17:49:12 -0500	[thread overview]
Message-ID: <20210729224915.38986-10-rpearsonhpe@gmail.com> (raw)
In-Reply-To: <20210729224915.38986-1-rpearsonhpe@gmail.com>

Extend create_srq to support basic and xrc SRQs. Drop srq->pd in favor
of the PD referenced by rdma-core.

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe_loc.h   |  5 +-
 drivers/infiniband/sw/rxe/rxe_srq.c   | 71 ++++++++++++++-------------
 drivers/infiniband/sw/rxe/rxe_verbs.c | 10 ++--
 drivers/infiniband/sw/rxe/rxe_verbs.h | 22 ++++++++-
 4 files changed, 66 insertions(+), 42 deletions(-)

diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index b4d45c592bd7..eac56e0c64ba 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -161,15 +161,12 @@ void retransmit_timer(struct timer_list *t);
 void rnr_nak_timer(struct timer_list *t);
 
 /* rxe_srq.c */
-#define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT)
-
+int rxe_srq_chk_init_attr(struct rxe_dev *rxe, struct ib_srq_init_attr *init);
 int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
 		     struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);
-
 int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
 		      struct ib_srq_init_attr *init, struct ib_udata *udata,
 		      struct rxe_create_srq_resp __user *uresp);
-
 int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
 		      struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
 		      struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata);
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index a9e7817e2732..edbfda0cc242 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -9,6 +9,32 @@
 #include "rxe_loc.h"
 #include "rxe_queue.h"
 
+int rxe_srq_chk_init_attr(struct rxe_dev *rxe, struct ib_srq_init_attr *init)
+{
+	switch (init->srq_type) {
+	case IB_SRQT_BASIC:
+	case IB_SRQT_XRC:
+		break;
+	case IB_SRQT_TM:
+		pr_warn("Tag matching SRQ not supported\n");
+		return -EOPNOTSUPP;
+	default:
+		pr_warn("Unexpected SRQ type (%d)\n", init->srq_type);
+		return -EINVAL;
+	}
+
+	if (init->attr.max_sge > rxe->attr.max_srq_sge) {
+		pr_warn("max_sge(%d) > max_srq_sge(%d)\n",
+			init->attr.max_sge, rxe->attr.max_srq_sge);
+		return -EINVAL;
+	}
+
+	if (init->attr.max_sge < RXE_MIN_SRQ_SGE)
+		init->attr.max_sge = RXE_MIN_SRQ_SGE;
+
+	return rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_MAX_WR);
+}
+
 int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
 		     struct ib_srq_attr *attr, enum ib_srq_attr_mask mask)
 {
@@ -48,23 +74,12 @@ int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
 
 		if (srq && (attr->srq_limit > srq->rq.queue->buf->index_mask)) {
 			pr_warn("srq_limit (%d) > cur limit(%d)\n",
-				attr->srq_limit,
-				 srq->rq.queue->buf->index_mask);
+					attr->srq_limit,
+					srq->rq.queue->buf->index_mask);
 			goto err1;
 		}
 	}
 
-	if (mask == IB_SRQ_INIT_MASK) {
-		if (attr->max_sge > rxe->attr.max_srq_sge) {
-			pr_warn("max_sge(%d) > max_srq_sge(%d)\n",
-				attr->max_sge, rxe->attr.max_srq_sge);
-			goto err1;
-		}
-
-		if (attr->max_sge < RXE_MIN_SRQ_SGE)
-			attr->max_sge = RXE_MIN_SRQ_SGE;
-	}
-
 	return 0;
 
 err1:
@@ -78,24 +93,22 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
 	int err;
 	int srq_wqe_size;
 	struct rxe_queue *q;
-	enum queue_type type;
 
-	srq->ibsrq.event_handler	= init->event_handler;
-	srq->ibsrq.srq_context		= init->srq_context;
-	srq->limit		= init->attr.srq_limit;
-	srq->srq_num		= srq->pelem.index;
-	srq->rq.max_wr		= init->attr.max_wr;
-	srq->rq.max_sge		= init->attr.max_sge;
-	srq->rq.is_user		= srq->is_user;
+	srq->limit = init->attr.srq_limit;
+	srq->rq.max_wr = init->attr.max_wr;
+	srq->rq.max_sge = init->attr.max_sge;
+	srq->rq.is_user = srq->is_user;
 
-	srq_wqe_size		= rcv_wqe_size(srq->rq.max_sge);
+	if (init->srq_type == IB_SRQT_XRC)
+		srq->ibsrq.ext.xrc.srq_num = srq->pelem.index;
+
+	srq_wqe_size = rcv_wqe_size(srq->rq.max_sge);
 
 	spin_lock_init(&srq->rq.producer_lock);
 	spin_lock_init(&srq->rq.consumer_lock);
 
-	type = QUEUE_TYPE_FROM_CLIENT;
-	q = rxe_queue_init(rxe, &srq->rq.max_wr,
-			srq_wqe_size, type);
+	q = rxe_queue_init(rxe, &srq->rq.max_wr, srq_wqe_size,
+			   QUEUE_TYPE_FROM_CLIENT);
 	if (!q) {
 		pr_warn("unable to allocate queue for srq\n");
 		return -ENOMEM;
@@ -111,14 +124,6 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
 		return err;
 	}
 
-	if (uresp) {
-		if (copy_to_user(&uresp->srq_num, &srq->srq_num,
-				 sizeof(uresp->srq_num))) {
-			rxe_queue_cleanup(q);
-			return -EFAULT;
-		}
-	}
-
 	return 0;
 }
 
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index b4b993f1ce92..fbd1e2d70682 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -307,9 +307,6 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
 	struct rxe_srq *srq = to_rsrq(ibsrq);
 	struct rxe_create_srq_resp __user *uresp = NULL;
 
-	if (init->srq_type != IB_SRQT_BASIC)
-		return -EOPNOTSUPP;
-
 	if (udata) {
 		if (udata->outlen < sizeof(*uresp))
 			return -EINVAL;
@@ -319,7 +316,7 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
 		srq->is_user = false;
 	}
 
-	err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
+	err = rxe_srq_chk_init_attr(rxe, init);
 	if (err)
 		goto err_out;
 
@@ -327,6 +324,8 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
 	if (err)
 		goto err_out;
 
+	rxe_add_index(srq);
+
 	err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
 	if (err)
 		goto err_drop_srq_ref;
@@ -334,6 +333,7 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
 	return 0;
 
 err_drop_srq_ref:
+	rxe_drop_index(srq);
 	rxe_drop_ref(srq);
 err_out:
 	return err;
@@ -391,7 +391,9 @@ static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 	if (srq->rq.queue)
 		rxe_queue_cleanup(srq->rq.queue);
 
+	rxe_drop_index(srq);
 	rxe_drop_ref(srq);
+
 	return 0;
 }
 
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 5b75de74a992..52599f398ddd 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -104,7 +104,6 @@ struct rxe_srq {
 	struct ib_srq		ibsrq;
 	struct rxe_pool_entry	pelem;
 	struct rxe_rq		rq;
-	u32			srq_num;
 	bool			is_user;
 
 	int			limit;
@@ -542,11 +541,32 @@ static inline enum ib_qp_type rxe_qp_type(struct rxe_qp *qp)
 	return qp->ibqp.qp_type;
 }
 
+/* SRQ extractors */
+static inline struct rxe_cq *rxe_srq_cq(struct rxe_srq *srq)
+{
+	return to_rcq(srq->ibsrq.ext.cq);
+}
+
+static inline int rxe_srq_num(struct rxe_srq *srq)
+{
+	return srq->ibsrq.ext.xrc.srq_num;
+}
+
 static inline struct rxe_pd *rxe_srq_pd(struct rxe_srq *srq)
 {
 	return to_rpd(srq->ibsrq.pd);
 }
 
+static inline enum ib_srq_type rxe_srq_type(struct rxe_srq *srq)
+{
+	return srq->ibsrq.srq_type;
+}
+
+static inline struct rxe_xrcd *rxe_srq_xrcd(struct rxe_srq *srq)
+{
+	return to_rxrcd(srq->ibsrq.ext.xrc.xrcd);
+}
+
 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
 
 void rxe_mc_cleanup(struct rxe_pool_entry *arg);
-- 
2.30.2


  parent reply	other threads:[~2021-07-29 22:50 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-29 22:49 [PATCH for-next 00/13] RDMA:rxe: Implement XRC for rxe Bob Pearson
2021-07-29 22:49 ` [PATCH for-next 01/13] RDMA/rxe: Decouple rxe_pkt_info from sk_buff Bob Pearson
2021-08-27 13:01   ` Jason Gunthorpe
2021-07-29 22:49 ` [PATCH for-next 02/13] IB/core: Add xrc opcodes to ib_pack.h Bob Pearson
2021-07-29 22:49 ` [PATCH for-next 03/13] RDMA/rxe: Extend rxe_send_wr to support xrceth Bob Pearson
2021-07-29 22:49 ` [PATCH for-next 04/13] RDMA/rxe: Extend rxe_opcode.h to support xrc Bob Pearson
2021-07-29 22:49 ` [PATCH for-next 05/13] RDMA/rxe: Add XRC ETH to rxe_hdr.h Bob Pearson
2021-07-29 22:49 ` [PATCH for-next 06/13] RDMA/rxe: Add XRC QP type to rxe_wr_opcode_info Bob Pearson
2021-07-29 22:49 ` [PATCH for-next 07/13] RDMA/rxe: Add XRC opcodes to rxe_opcode_info Bob Pearson
2021-07-29 22:49 ` [PATCH for-next 08/13] RDMA/rxe: Support alloc/dealloc xrcd Bob Pearson
2021-07-29 22:49 ` Bob Pearson [this message]
2021-07-29 22:49 ` [PATCH for-next 10/13] RDMA/rxe: Compute next opcode for XRC Bob Pearson
2021-07-29 22:49 ` [PATCH for-next 11/13] RDMA/rxe: Extend rxe_verbs and rxe_qp to support XRC Bob Pearson
2021-07-29 22:49 ` [PATCH for-next 12/13] RDMA/rxe: Extend rxe send XRC packets Bob Pearson
2021-07-29 22:49 ` [PATCH for-next 13/13] RDMA/rxe: Enable receiving " Bob Pearson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210729224915.38986-10-rpearsonhpe@gmail.com \
    --to=rpearsonhpe@gmail.com \
    --cc=jgg@nvidia.com \
    --cc=linux-rdma@vger.kernel.org \
    --cc=zyjzyj2000@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).