All of lore.kernel.org
 help / color / mirror / Atom feed
From: Leon Romanovsky <leon@kernel.org>
To: Doug Ledford <dledford@redhat.com>, Jason Gunthorpe <jgg@mellanox.com>
Cc: Leon Romanovsky <leonro@mellanox.com>,
	linux-rdma@vger.kernel.org, Maor Gottlieb <maorg@mellanox.com>
Subject: [PATCH rdma-next 18/18] RDMA/mlx5: Process all vendor flags in one place
Date: Mon, 20 Apr 2020 18:11:05 +0300	[thread overview]
Message-ID: <20200420151105.282848-19-leon@kernel.org> (raw)
In-Reply-To: <20200420151105.282848-1-leon@kernel.org>

From: Leon Romanovsky <leonro@mellanox.com>

Check that vendor flags provided through ucmd are valid.

Reviewed-by: Maor Gottlieb <maorg@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
---
 drivers/infiniband/hw/mlx5/qp.c | 156 +++++++++++++++-----------------
 1 file changed, 71 insertions(+), 85 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 15c476e858c5..eb9e1944263c 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1430,13 +1430,6 @@ static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
 	mlx5_core_destroy_rq_tracked(dev, &rq->base.mqp);
 }
 
-static bool tunnel_offload_supported(struct mlx5_core_dev *dev)
-{
-	return  (MLX5_CAP_ETH(dev, tunnel_stateless_vxlan) ||
-		 MLX5_CAP_ETH(dev, tunnel_stateless_gre) ||
-		 MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx));
-}
-
 static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
 				      struct mlx5_ib_rq *rq,
 				      u32 qp_flags_en,
@@ -1693,27 +1686,20 @@ static int create_rss_raw_qp_tir(struct ib_pd *pd, struct mlx5_ib_qp *qp,
 		return -EOPNOTSUPP;
 	}
 
-	if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS &&
-	    !tunnel_offload_supported(dev->mdev)) {
-		mlx5_ib_dbg(dev, "tunnel offloads isn't supported\n");
-		return -EOPNOTSUPP;
-	}
-
 	if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER &&
 	    !(ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) {
 		mlx5_ib_dbg(dev, "Tunnel offloads must be set for inner RSS\n");
 		return -EOPNOTSUPP;
 	}
 
-	if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC || dev->is_rep) {
-		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+	if (dev->is_rep)
 		qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
-	}
 
-	if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
+	if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC)
+		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+
+	if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
 		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
-		qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
-	}
 
 	err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
 	if (err) {
@@ -1959,11 +1945,6 @@ static int get_atomic_mode(struct mlx5_ib_dev *dev,
 	return atomic_mode;
 }
 
-static inline bool check_flags_mask(uint64_t input, uint64_t supported)
-{
-	return (input & ~supported) == 0;
-}
-
 static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 			    struct ib_qp_init_attr *init_attr,
 			    struct mlx5_ib_create_qp *ucmd,
@@ -1999,63 +1980,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 		qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
 
 	if (udata) {
-		if (!check_flags_mask(ucmd->flags,
-				      MLX5_QP_FLAG_ALLOW_SCATTER_CQE |
-				      MLX5_QP_FLAG_BFREG_INDEX |
-				      MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE |
-				      MLX5_QP_FLAG_SCATTER_CQE |
-				      MLX5_QP_FLAG_SIGNATURE |
-				      MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC |
-				      MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
-				      MLX5_QP_FLAG_TUNNEL_OFFLOADS |
-				      MLX5_QP_FLAG_UAR_PAGE_INDEX |
-				      MLX5_QP_FLAG_TYPE_DCI |
-				      MLX5_QP_FLAG_TYPE_DCT))
-			return -EINVAL;
-
 		err = get_qp_user_index(ucontext, ucmd, udata->inlen, &uidx);
 		if (err)
 			return err;
-
-		if (ucmd->flags & MLX5_QP_FLAG_SIGNATURE)
-			qp->flags_en |= MLX5_QP_FLAG_SIGNATURE;
-		if (ucmd->flags & MLX5_QP_FLAG_SCATTER_CQE &&
-		    MLX5_CAP_GEN(dev->mdev, sctr_data_cqe))
-			qp->flags_en |= MLX5_QP_FLAG_SCATTER_CQE;
-
-		if (ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
-			if (init_attr->qp_type != IB_QPT_RAW_PACKET ||
-			    !tunnel_offload_supported(mdev)) {
-				mlx5_ib_dbg(dev, "Tunnel offload isn't supported\n");
-				return -EOPNOTSUPP;
-			}
-			qp->flags_en |= MLX5_QP_FLAG_TUNNEL_OFFLOADS;
-		}
-
-		if (ucmd->flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC) {
-			if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
-				mlx5_ib_dbg(dev, "Self-LB UC isn't supported\n");
-				return -EOPNOTSUPP;
-			}
-			qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
-		}
-
-		if (ucmd->flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
-			if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
-				mlx5_ib_dbg(dev, "Self-LB UM isn't supported\n");
-				return -EOPNOTSUPP;
-			}
-			qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
-		}
-
-		if (ucmd->flags & MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE) {
-			if (init_attr->qp_type != IB_QPT_RC ||
-				!MLX5_CAP_GEN(dev->mdev, qp_packet_based)) {
-				mlx5_ib_dbg(dev, "packet based credit mode isn't supported\n");
-				return -EOPNOTSUPP;
-			}
-			qp->flags_en |= MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE;
-		}
 	}
 
 	if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
@@ -2474,7 +2401,7 @@ static int create_dct(struct ib_pd *pd, struct mlx5_ib_qp *qp,
 	MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
 	MLX5_SET(dctc, dctc, user_index, uidx);
 
-	if (ucmd->flags & MLX5_QP_FLAG_SCATTER_CQE) {
+	if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) {
 		int rcqe_sz = mlx5_ib_get_cqe_size(attr->recv_cq);
 
 		if (rcqe_sz == 128)
@@ -2577,22 +2504,81 @@ static int check_valid_flow(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 	return 0;
 }
 
-static int process_vendor_flags(struct mlx5_ib_qp *qp,
+static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
+				bool cond, struct mlx5_ib_qp *qp)
+{
+	if (!(*flags & flag))
+		return;
+
+	if (cond) {
+		qp->flags_en |= flag;
+		*flags &= ~flag;
+		return;
+	}
+
+	if (flag == MLX5_QP_FLAG_SCATTER_CQE) {
+		/*
+		 * We don't return error if this flag was provided,
+		 * and mlx5 doesn't have right capability.
+		 */
+		*flags &= ~MLX5_QP_FLAG_SCATTER_CQE;
+		return;
+	}
+	mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag);
+}
+
+static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
 				struct ib_qp_init_attr *attr,
 				struct mlx5_ib_create_qp *ucmd)
 {
-	switch (ucmd->flags & (MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI)) {
+	struct mlx5_core_dev *mdev = dev->mdev;
+	int flags = ucmd->flags;
+	bool cond;
+
+	switch (flags & (MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI)) {
 	case MLX5_QP_FLAG_TYPE_DCI:
 		qp->qp_sub_type = MLX5_IB_QPT_DCI;
 		break;
 	case MLX5_QP_FLAG_TYPE_DCT:
 		qp->qp_sub_type = MLX5_IB_QPT_DCT;
-		break;
+		fallthrough;
 	default:
+		break;
+	}
+
+	if (attr->qp_type == IB_QPT_DRIVER && !qp->qp_sub_type)
 		return -EINVAL;
+
+	process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCI, true, qp);
+	process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCT, true, qp);
+
+	process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
+	process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
+			    MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
+
+	if (attr->qp_type == IB_QPT_RAW_PACKET) {
+		cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||
+		       MLX5_CAP_ETH(mdev, tunnel_stateless_gre) ||
+		       MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx);
+		process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TUNNEL_OFFLOADS,
+				    cond, qp);
+		process_vendor_flag(dev, &flags,
+				    MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC, true,
+				    qp);
+		process_vendor_flag(dev, &flags,
+				    MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC, true,
+				    qp);
 	}
 
-	return 0;
+	if (attr->qp_type == IB_QPT_RC)
+		process_vendor_flag(dev, &flags,
+				    MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE,
+				    MLX5_CAP_GEN(mdev, qp_packet_based), qp);
+
+	if (flags)
+		mlx5_ib_dbg(dev, "udata has unsupported flags 0x%X\n", flags);
+
+	return (flags) ? -EINVAL : 0;
 }
 
 static void process_create_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
@@ -2774,8 +2760,8 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
 	if (!qp)
 		return ERR_PTR(-ENOMEM);
 
-	if (init_attr->qp_type == IB_QPT_DRIVER) {
-		err = process_vendor_flags(qp, init_attr, &ucmd);
+	if (udata) {
+		err = process_vendor_flags(dev, qp, init_attr, &ucmd);
 		if (err)
 			goto free_qp;
 	}
-- 
2.25.2


  parent reply	other threads:[~2020-04-20 15:12 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-20 15:10 [PATCH rdma-next 00/18] Refactor mlx5_ib_create_qp (Part I) Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 01/18] RDMA/mlx5: Organize QP types checks in one place Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 02/18] RDMA/mlx5: Delete impossible GSI port check Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 03/18] RDMA/mlx5: Perform check if QP creation flow is valid Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 04/18] RDMA/mlx5: Prepare QP allocation for future removal Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 05/18] RDMA/mlx5: Avoid setting redundant NULL for XRC QPs Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 06/18] RDMA/mlx5: Set QP subtype immediately when it is known Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 07/18] RDMA/mlx5: Separate create QP flows to be based on type Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 08/18] RDMA/mlx5: Split scatter CQE configuration for DCT QP Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 09/18] RDMA/mlx5: Update all DRIVER QP places to use QP subtype Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 10/18] RDMA/mlx5: Move DRIVER QP flags check into separate function Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 11/18] RDMA/mlx5: Remove second copy from user for non RSS RAW QPs Leon Romanovsky
2020-04-20 15:10 ` [PATCH rdma-next 12/18] RDMA/mlx5: Initial separation of RAW_PACKET QP from common flow Leon Romanovsky
2020-04-20 15:11 ` [PATCH rdma-next 13/18] RDMA/mlx5: Delete create QP flags obfuscation Leon Romanovsky
2020-04-20 15:11 ` [PATCH rdma-next 14/18] RDMA/mlx5: Process create QP flags in one place Leon Romanovsky
2020-04-23 18:53   ` Leon Romanovsky
2020-04-24 19:51   ` Jason Gunthorpe
2020-04-24 20:23     ` Leon Romanovsky
2020-04-20 15:11 ` [PATCH rdma-next 15/18] RDMA/mlx5: Use flags_en mechanism to mark QP created with WQE signature Leon Romanovsky
2020-04-20 15:11 ` [PATCH rdma-next 16/18] RDMA/mlx5: Change scatter CQE flag to be set like other vendor flags Leon Romanovsky
2020-04-20 15:11 ` [PATCH rdma-next 17/18] RDMA/mlx5: Return all configured create flags through query QP Leon Romanovsky
2020-04-20 15:11 ` Leon Romanovsky [this message]
2020-04-24 19:54 ` [PATCH rdma-next 00/18] Refactor mlx5_ib_create_qp (Part I) Jason Gunthorpe
2020-04-24 20:26   ` Leon Romanovsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200420151105.282848-19-leon@kernel.org \
    --to=leon@kernel.org \
    --cc=dledford@redhat.com \
    --cc=jgg@mellanox.com \
    --cc=leonro@mellanox.com \
    --cc=linux-rdma@vger.kernel.org \
    --cc=maorg@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.