All of lore.kernel.org
 help / color / mirror / Atom feed
From: Bob Pearson <rpearsonhpe@gmail.com>
To: jgg@nvidia.com, zyjzyj2000@gmail.com, linux-rdma@vger.kernel.org
Cc: Bob Pearson <rpearsonhpe@gmail.com>
Subject: [PATCH for-next v14 01/10] RDMA/rxe: Remove IB_SRQ_INIT_MASK
Date: Wed, 20 Apr 2022 20:40:34 -0500	[thread overview]
Message-ID: <20220421014042.26985-2-rpearsonhpe@gmail.com> (raw)
In-Reply-To: <20220421014042.26985-1-rpearsonhpe@gmail.com>

Currently the #define IB_SRQ_INIT_MASK is used to distinguish
the rxe_create_srq verb from the rxe_modify_srq verb so that some code
can be shared between these two subroutines.

This commit splits rxe_srq_chk_attr into two subroutines: rxe_srq_chk_init
and rxe_srq_chk_attr which handle the create_srq and modify_srq verbs
separately.

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe_loc.h   |   9 +-
 drivers/infiniband/sw/rxe/rxe_srq.c   | 118 +++++++++++++++-----------
 drivers/infiniband/sw/rxe/rxe_verbs.c |   4 +-
 3 files changed, 74 insertions(+), 57 deletions(-)

diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 2ffbe3390668..ff6cae2c2949 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -159,15 +159,12 @@ void retransmit_timer(struct timer_list *t);
 void rnr_nak_timer(struct timer_list *t);
 
 /* rxe_srq.c */
-#define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT)
-
-int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
-		     struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);
-
+int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init);
 int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
 		      struct ib_srq_init_attr *init, struct ib_udata *udata,
 		      struct rxe_create_srq_resp __user *uresp);
-
+int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
+		     struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);
 int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
 		      struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
 		      struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata);
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index 0c0721f04357..e2dcfc5d97e3 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -6,64 +6,34 @@
 
 #include <linux/vmalloc.h>
 #include "rxe.h"
-#include "rxe_loc.h"
 #include "rxe_queue.h"
 
-int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
-		     struct ib_srq_attr *attr, enum ib_srq_attr_mask mask)
+int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init)
 {
-	if (srq && srq->error) {
-		pr_warn("srq in error state\n");
+	struct ib_srq_attr *attr = &init->attr;
+
+	if (attr->max_wr > rxe->attr.max_srq_wr) {
+		pr_warn("max_wr(%d) > max_srq_wr(%d)\n",
+			attr->max_wr, rxe->attr.max_srq_wr);
 		goto err1;
 	}
 
-	if (mask & IB_SRQ_MAX_WR) {
-		if (attr->max_wr > rxe->attr.max_srq_wr) {
-			pr_warn("max_wr(%d) > max_srq_wr(%d)\n",
-				attr->max_wr, rxe->attr.max_srq_wr);
-			goto err1;
-		}
-
-		if (attr->max_wr <= 0) {
-			pr_warn("max_wr(%d) <= 0\n", attr->max_wr);
-			goto err1;
-		}
-
-		if (srq && srq->limit && (attr->max_wr < srq->limit)) {
-			pr_warn("max_wr (%d) < srq->limit (%d)\n",
-				attr->max_wr, srq->limit);
-			goto err1;
-		}
-
-		if (attr->max_wr < RXE_MIN_SRQ_WR)
-			attr->max_wr = RXE_MIN_SRQ_WR;
+	if (attr->max_wr <= 0) {
+		pr_warn("max_wr(%d) <= 0\n", attr->max_wr);
+		goto err1;
 	}
 
-	if (mask & IB_SRQ_LIMIT) {
-		if (attr->srq_limit > rxe->attr.max_srq_wr) {
-			pr_warn("srq_limit(%d) > max_srq_wr(%d)\n",
-				attr->srq_limit, rxe->attr.max_srq_wr);
-			goto err1;
-		}
+	if (attr->max_wr < RXE_MIN_SRQ_WR)
+		attr->max_wr = RXE_MIN_SRQ_WR;
 
-		if (srq && (attr->srq_limit > srq->rq.queue->buf->index_mask)) {
-			pr_warn("srq_limit (%d) > cur limit(%d)\n",
-				attr->srq_limit,
-				 srq->rq.queue->buf->index_mask);
-			goto err1;
-		}
+	if (attr->max_sge > rxe->attr.max_srq_sge) {
+		pr_warn("max_sge(%d) > max_srq_sge(%d)\n",
+			attr->max_sge, rxe->attr.max_srq_sge);
+		goto err1;
 	}
 
-	if (mask == IB_SRQ_INIT_MASK) {
-		if (attr->max_sge > rxe->attr.max_srq_sge) {
-			pr_warn("max_sge(%d) > max_srq_sge(%d)\n",
-				attr->max_sge, rxe->attr.max_srq_sge);
-			goto err1;
-		}
-
-		if (attr->max_sge < RXE_MIN_SRQ_SGE)
-			attr->max_sge = RXE_MIN_SRQ_SGE;
-	}
+	if (attr->max_sge < RXE_MIN_SRQ_SGE)
+		attr->max_sge = RXE_MIN_SRQ_SGE;
 
 	return 0;
 
@@ -93,8 +63,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
 	spin_lock_init(&srq->rq.consumer_lock);
 
 	type = QUEUE_TYPE_FROM_CLIENT;
-	q = rxe_queue_init(rxe, &srq->rq.max_wr,
-			srq_wqe_size, type);
+	q = rxe_queue_init(rxe, &srq->rq.max_wr, srq_wqe_size, type);
 	if (!q) {
 		pr_warn("unable to allocate queue for srq\n");
 		return -ENOMEM;
@@ -121,6 +90,57 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
 	return 0;
 }
 
+int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
+		     struct ib_srq_attr *attr, enum ib_srq_attr_mask mask)
+{
+	if (srq->error) {
+		pr_warn("srq in error state\n");
+		goto err1;
+	}
+
+	if (mask & IB_SRQ_MAX_WR) {
+		if (attr->max_wr > rxe->attr.max_srq_wr) {
+			pr_warn("max_wr(%d) > max_srq_wr(%d)\n",
+				attr->max_wr, rxe->attr.max_srq_wr);
+			goto err1;
+		}
+
+		if (attr->max_wr <= 0) {
+			pr_warn("max_wr(%d) <= 0\n", attr->max_wr);
+			goto err1;
+		}
+
+		if (srq->limit && (attr->max_wr < srq->limit)) {
+			pr_warn("max_wr (%d) < srq->limit (%d)\n",
+				attr->max_wr, srq->limit);
+			goto err1;
+		}
+
+		if (attr->max_wr < RXE_MIN_SRQ_WR)
+			attr->max_wr = RXE_MIN_SRQ_WR;
+	}
+
+	if (mask & IB_SRQ_LIMIT) {
+		if (attr->srq_limit > rxe->attr.max_srq_wr) {
+			pr_warn("srq_limit(%d) > max_srq_wr(%d)\n",
+				attr->srq_limit, rxe->attr.max_srq_wr);
+			goto err1;
+		}
+
+		if (attr->srq_limit > srq->rq.queue->buf->index_mask) {
+			pr_warn("srq_limit (%d) > cur limit(%d)\n",
+				attr->srq_limit,
+				srq->rq.queue->buf->index_mask);
+			goto err1;
+		}
+	}
+
+	return 0;
+
+err1:
+	return -EINVAL;
+}
+
 int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
 		      struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
 		      struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata)
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 58e4412b1d16..2ddfd99dd020 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -7,8 +7,8 @@
 #include <linux/dma-mapping.h>
 #include <net/addrconf.h>
 #include <rdma/uverbs_ioctl.h>
+
 #include "rxe.h"
-#include "rxe_loc.h"
 #include "rxe_queue.h"
 #include "rxe_hw_counters.h"
 
@@ -295,7 +295,7 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
 		uresp = udata->outbuf;
 	}
 
-	err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
+	err = rxe_srq_chk_init(rxe, init);
 	if (err)
 		goto err1;
 
-- 
2.32.0


  reply	other threads:[~2022-04-21  1:41 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-21  1:40 [PATCH for-next v14 00/10] Fix race conditions in rxe_pool Bob Pearson
2022-04-21  1:40 ` Bob Pearson [this message]
2022-04-21  1:40 ` [PATCH for-next v14 02/10] RDMA/rxe: Add rxe_srq_cleanup() Bob Pearson
2022-05-09 12:02   ` Jason Gunthorpe
2022-04-21  1:40 ` [PATCH for-next v14 03/10] RDMA/rxe: Check rxe_get() return value Bob Pearson
2022-04-21  1:40 ` [PATCH for-next v14 04/10] RDMA/rxe: Move qp cleanup code to rxe_qp_do_cleanup() Bob Pearson
2022-04-21  1:40 ` [PATCH for-next v14 05/10] RDMA/rxe: Move mr cleanup code to rxe_mr_cleanup() Bob Pearson
2022-04-21  1:40 ` [PATCH for-next v14 06/10] RDMA/rxe: Move mw cleanup code to rxe_mw_cleanup() Bob Pearson
2022-05-09 12:08   ` Jason Gunthorpe
2022-04-21  1:40 ` [PATCH for-next v14 07/10] RDMA/rxe: Enforce IBA C11-17 Bob Pearson
2022-04-21  1:40 ` [PATCH for-next v14 08/10] RDMA/rxe: Stop lookup of partially built objects Bob Pearson
2022-05-09 12:21   ` Jason Gunthorpe
2022-04-21  1:40 ` [PATCH for-next v14 09/10] RDMA/rxe: Convert read side locking to rcu Bob Pearson
2022-05-09 11:58   ` Jason Gunthorpe
2022-04-21  1:40 ` [PATCH for-next v14 10/10] RDMA/rxe: Cleanup rxe_pool.c Bob Pearson
2022-05-09 12:17   ` Jason Gunthorpe
2022-05-09 18:23 ` [PATCH for-next v14 00/10] Fix race conditions in rxe_pool Jason Gunthorpe
2022-05-10  0:35   ` Yanjun Zhu
2022-05-10 12:43     ` Jason Gunthorpe
2022-05-24  3:53   ` yangx.jy
2022-05-24 11:57     ` Jason Gunthorpe
2022-05-24 18:22       ` Bob Pearson
2022-05-24 18:26         ` Jason Gunthorpe
2022-05-24 18:27           ` Bob Pearson
2022-05-24 18:41             ` Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220421014042.26985-2-rpearsonhpe@gmail.com \
    --to=rpearsonhpe@gmail.com \
    --cc=jgg@nvidia.com \
    --cc=linux-rdma@vger.kernel.org \
    --cc=zyjzyj2000@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.