All of lore.kernel.org
 help / color / mirror / Atom feed
From: Leon Romanovsky <leon@kernel.org>
To: Doug Ledford <dledford@redhat.com>, Jason Gunthorpe <jgg@mellanox.com>
Cc: Leon Romanovsky <leonro@mellanox.com>,
	linux-rdma@vger.kernel.org, Maor Gottlieb <maorg@mellanox.com>
Subject: [PATCH rdma-next 15/18] RDMA/mlx5: Use flags_en mechanism to mark QP created with WQE signature
Date: Mon, 20 Apr 2020 18:11:02 +0300	[thread overview]
Message-ID: <20200420151105.282848-16-leon@kernel.org> (raw)
In-Reply-To: <20200420151105.282848-1-leon@kernel.org>

From: Leon Romanovsky <leonro@mellanox.com>

MLX5_QP_FLAG_SIGNATURE is exposed to the users but in the kernel
the create_qp flow treated it differently from other MLX5_QP_FLAG_*s.
Fix it by ditching wq_sig boolean variable and use general flag_en
mechanism.

Reviewed-by: Maor Gottlieb <maorg@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
---
 drivers/infiniband/hw/mlx5/mlx5_ib.h |  1 -
 drivers/infiniband/hw/mlx5/odp.c     |  2 +-
 drivers/infiniband/hw/mlx5/qp.c      | 36 +++++++++++++++++-----------
 3 files changed, 23 insertions(+), 16 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 4492630e7638..251380ff5706 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -446,7 +446,6 @@ struct mlx5_ib_qp {
 	u32			flags;
 	u8			port;
 	u8			state;
-	int			wq_sig;
 	int			scat_cqe;
 	int			max_inline_data;
 	struct mlx5_bf	        bf;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 16af1105cfcf..e4759310c0e2 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1190,7 +1190,7 @@ static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
 	struct mlx5_ib_wq *wq = &qp->rq;
 	int wqe_size = 1 << wq->wqe_shift;
 
-	if (qp->wq_sig) {
+	if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) {
 		mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
 		return -EFAULT;
 	}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 82152e86c9d6..a3c693ce1865 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -41,9 +41,6 @@
 #include "cmd.h"
 #include "qp.h"
 
-/* not supported currently */
-static int wq_signature;
-
 enum {
 	MLX5_IB_ACK_REQ_FREQ	= 8,
 };
@@ -392,17 +389,26 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
 		cap->max_recv_wr = 0;
 		cap->max_recv_sge = 0;
 	} else {
+		int wq_sig = !!(qp->flags_en & MLX5_QP_FLAG_SIGNATURE);
+
 		if (ucmd) {
 			qp->rq.wqe_cnt = ucmd->rq_wqe_count;
 			if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
 				return -EINVAL;
 			qp->rq.wqe_shift = ucmd->rq_wqe_shift;
-			if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig)
+			if ((1 << qp->rq.wqe_shift) /
+				    sizeof(struct mlx5_wqe_data_seg) <
+			    wq_sig)
 				return -EINVAL;
-			qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
+			qp->rq.max_gs =
+				(1 << qp->rq.wqe_shift) /
+					sizeof(struct mlx5_wqe_data_seg) -
+				wq_sig;
 			qp->rq.max_post = qp->rq.wqe_cnt;
 		} else {
-			wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
+			wqe_size =
+				wq_sig ? sizeof(struct mlx5_wqe_signature_seg) :
+					 0;
 			wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
 			wqe_size = roundup_pow_of_two(wqe_size);
 			wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
@@ -416,7 +422,10 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
 				return -EINVAL;
 			}
 			qp->rq.wqe_shift = ilog2(wqe_size);
-			qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
+			qp->rq.max_gs =
+				(1 << qp->rq.wqe_shift) /
+					sizeof(struct mlx5_wqe_data_seg) -
+				wq_sig;
 			qp->rq.max_post = qp->rq.wqe_cnt;
 		}
 	}
@@ -2008,7 +2017,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 		if (err)
 			return err;
 
-		qp->wq_sig = !!(ucmd->flags & MLX5_QP_FLAG_SIGNATURE);
+		if (ucmd->flags & MLX5_QP_FLAG_SIGNATURE)
+			qp->flags_en |= MLX5_QP_FLAG_SIGNATURE;
 		if (MLX5_CAP_GEN(dev->mdev, sctr_data_cqe))
 			qp->scat_cqe =
 				!!(ucmd->flags & MLX5_QP_FLAG_SCATTER_CQE);
@@ -2045,8 +2055,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 			}
 			qp->flags_en |= MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE;
 		}
-	} else {
-		qp->wq_sig = !!wq_signature;
 	}
 
 	if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
@@ -2115,7 +2123,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 		MLX5_SET(qpc, qpc, latency_sensitive, 1);
 
 
-	if (qp->wq_sig)
+	if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
 		MLX5_SET(qpc, qpc, wq_signature, 1);
 
 	if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
@@ -5050,7 +5058,7 @@ static void finish_wqe(struct mlx5_ib_qp *qp,
 					     mlx5_opcode | ((u32)opmod << 24));
 	ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
 	ctrl->fm_ce_se |= fence;
-	if (unlikely(qp->wq_sig))
+	if (unlikely(qp->flags_en & MLX5_QP_FLAG_SIGNATURE))
 		ctrl->signature = wq_sig(ctrl);
 
 	qp->sq.wrid[idx] = wr_id;
@@ -5502,7 +5510,7 @@ static int _mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
 		}
 
 		scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind);
-		if (qp->wq_sig)
+		if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
 			scat++;
 
 		for (i = 0; i < wr->num_sge; i++)
@@ -5514,7 +5522,7 @@ static int _mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
 			scat[i].addr       = 0;
 		}
 
-		if (qp->wq_sig) {
+		if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) {
 			sig = (struct mlx5_rwqe_sig *)scat;
 			set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
 		}
-- 
2.25.2


  parent reply	other threads:[~2020-04-20 15:12 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-20 15:10 [PATCH rdma-next 00/18] Refactor mlx5_ib_create_qp (Part I) Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 01/18] RDMA/mlx5: Organize QP types checks in one place Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 02/18] RDMA/mlx5: Delete impossible GSI port check Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 03/18] RDMA/mlx5: Perform check if QP creation flow is valid Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 04/18] RDMA/mlx5: Prepare QP allocation for future removal Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 05/18] RDMA/mlx5: Avoid setting redundant NULL for XRC QPs Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 06/18] RDMA/mlx5: Set QP subtype immediately when it is known Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 07/18] RDMA/mlx5: Separate create QP flows to be based on type Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 08/18] RDMA/mlx5: Split scatter CQE configuration for DCT QP Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 09/18] RDMA/mlx5: Update all DRIVER QP places to use QP subtype Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 10/18] RDMA/mlx5: Move DRIVER QP flags check into separate function Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 11/18] RDMA/mlx5: Remove second copy from user for non RSS RAW QPs Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 12/18] RDMA/mlx5: Initial separation of RAW_PACKET QP from common flow Leon Romanovsky
2020-04-20 15:11 ` [PATCH rdma-next 13/18] RDMA/mlx5: Delete create QP flags obfuscation Leon Romanovsky
2020-04-20 15:11 ` [PATCH rdma-next 14/18] RDMA/mlx5: Process create QP flags in one place Leon Romanovsky
2020-04-23 18:53   ` Leon Romanovsky
2020-04-24 19:51   ` Jason Gunthorpe
2020-04-24 20:23     ` Leon Romanovsky
2020-04-20 15:11 ` Leon Romanovsky [this message]
2020-04-20 15:11 ` [PATCH rdma-next 16/18] RDMA/mlx5: Change scatter CQE flag to be set like other vendor flags Leon Romanovsky
2020-04-20 15:11 ` [PATCH rdma-next 17/18] RDMA/mlx5: Return all configured create flags through query QP Leon Romanovsky
2020-04-20 15:11 ` [PATCH rdma-next 18/18] RDMA/mlx5: Process all vendor flags in one place Leon Romanovsky
2020-04-24 19:54 ` [PATCH rdma-next 00/18] Refactor mlx5_ib_create_qp (Part I) Jason Gunthorpe
2020-04-24 20:26   ` Leon Romanovsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200420151105.282848-16-leon@kernel.org \
    --to=leon@kernel.org \
    --cc=dledford@redhat.com \
    --cc=jgg@mellanox.com \
    --cc=leonro@mellanox.com \
    --cc=linux-rdma@vger.kernel.org \
    --cc=maorg@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.