All of lore.kernel.org
 help / color / mirror / Atom feed
From: Maor Gottlieb <maorg@mellanox.com>
To: davem@davemloft.net, jgg@mellanox.com, dledford@redhat.com,
	j.vosburgh@gmail.com, vfalico@gmail.com, andy@greyhouse.net,
	kuba@kernel.org, jiri@mellanox.com, dsahern@kernel.org
Cc: leonro@mellanox.com, saeedm@mellanox.com,
	linux-rdma@vger.kernel.org, netdev@vger.kernel.org,
	alexr@mellanox.com, Maor Gottlieb <maorg@mellanox.com>
Subject: [PATCH V4 mlx5-next 14/15] RDMA/mlx5: Refactor affinity related code
Date: Wed, 22 Apr 2020 11:39:50 +0300	[thread overview]
Message-ID: <20200422083951.17424-15-maorg@mellanox.com> (raw)
In-Reply-To: <20200422083951.17424-1-maorg@mellanox.com>

Move affinity related code in modify qp to function.
It's a preparation for next patch the extend the affinity
calculation to consider the xmit slave.

Signed-off-by: Maor Gottlieb <maorg@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
---
 drivers/infiniband/hw/mlx5/qp.c | 90 +++++++++++++++++++--------------
 1 file changed, 53 insertions(+), 37 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 1456db4b6295..a45499809903 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3416,33 +3416,61 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
 	return 0;
 }
 
-static unsigned int get_tx_affinity(struct mlx5_ib_dev *dev,
-				    struct mlx5_ib_pd *pd,
-				    struct mlx5_ib_qp_base *qp_base,
-				    u8 port_num, struct ib_udata *udata)
+static unsigned int get_tx_affinity_rr(struct mlx5_ib_dev *dev,
+				       struct ib_udata *udata)
 {
 	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
 		udata, struct mlx5_ib_ucontext, ibucontext);
-	unsigned int tx_port_affinity;
+	u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+	atomic_t *tx_port_affinity;
 
-	if (ucontext) {
-		tx_port_affinity = (unsigned int)atomic_add_return(
-					   1, &ucontext->tx_port_affinity) %
-					   MLX5_MAX_PORTS +
-				   1;
+	if (ucontext)
+		tx_port_affinity = &ucontext->tx_port_affinity;
+	else
+		tx_port_affinity = &dev->port[port_num].roce.tx_port_affinity;
+
+	return (unsigned int)atomic_add_return(1, tx_port_affinity) %
+		MLX5_MAX_PORTS + 1;
+}
+
+static bool qp_supports_affinity(struct ib_qp *qp)
+{
+	struct mlx5_ib_qp *mqp = to_mqp(qp);
+
+	if ((qp->qp_type == IB_QPT_RC) ||
+	    (qp->qp_type == IB_QPT_UD &&
+	     !(mqp->flags & MLX5_IB_QP_SQPN_QP1)) ||
+	    (qp->qp_type == IB_QPT_UC) ||
+	    (qp->qp_type == IB_QPT_RAW_PACKET) ||
+	    (qp->qp_type == IB_QPT_XRC_INI) ||
+	    (qp->qp_type == IB_QPT_XRC_TGT))
+		return true;
+	return false;
+}
+
+static unsigned int get_tx_affinity(struct ib_qp *qp, u8 init,
+				    struct ib_udata *udata)
+{
+	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+		udata, struct mlx5_ib_ucontext, ibucontext);
+	struct mlx5_ib_dev *dev = to_mdev(qp->device);
+	struct mlx5_ib_qp *mqp = to_mqp(qp);
+	struct mlx5_ib_qp_base *qp_base;
+	unsigned int tx_affinity;
+
+	if (!(dev->lag_active && init && qp_supports_affinity(qp)))
+		return 0;
+
+	tx_affinity = get_tx_affinity_rr(dev, udata);
+
+	qp_base = &mqp->trans_qp.base;
+	if (ucontext)
 		mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n",
-				tx_port_affinity, qp_base->mqp.qpn, ucontext);
-	} else {
-		tx_port_affinity =
-			(unsigned int)atomic_add_return(
-				1, &dev->port[port_num].roce.tx_port_affinity) %
-				MLX5_MAX_PORTS +
-			1;
+			    tx_affinity, qp_base->mqp.qpn, ucontext);
+	else
 		mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n",
-				tx_port_affinity, qp_base->mqp.qpn);
-	}
-
-	return tx_port_affinity;
+			    tx_affinity, qp_base->mqp.qpn);
+	return tx_affinity;
 }
 
 static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
@@ -3554,22 +3582,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
 		}
 	}
 
-	if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
-		if ((ibqp->qp_type == IB_QPT_RC) ||
-		    (ibqp->qp_type == IB_QPT_UD &&
-		     !(qp->flags & MLX5_IB_QP_SQPN_QP1)) ||
-		    (ibqp->qp_type == IB_QPT_UC) ||
-		    (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
-		    (ibqp->qp_type == IB_QPT_XRC_INI) ||
-		    (ibqp->qp_type == IB_QPT_XRC_TGT)) {
-			if (dev->lag_active) {
-				u8 p = mlx5_core_native_port_num(dev->mdev) - 1;
-				tx_affinity = get_tx_affinity(dev, pd, base, p,
-							      udata);
-				context->flags |= cpu_to_be32(tx_affinity << 24);
-			}
-		}
-	}
+	tx_affinity = get_tx_affinity(ibqp,
+				      cur_state == IB_QPS_RESET &&
+				      new_state == IB_QPS_INIT, udata);
+	context->flags |= cpu_to_be32(tx_affinity << 24);
 
 	if (is_sqp(ibqp->qp_type)) {
 		context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
-- 
2.17.2


  parent reply	other threads:[~2020-04-22  8:40 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-22  8:39 [PATCH V4 mlx5-next 00/15] Add support to get xmit slave Maor Gottlieb
2020-04-22  8:39 ` [PATCH V4 mlx5-next 01/15] net/core: Introduce netdev_get_xmit_slave Maor Gottlieb
2020-04-22 12:50   ` Jiri Pirko
2020-04-22 15:09   ` David Ahern
2020-04-22  8:39 ` [PATCH V4 mlx5-next 02/15] bonding: Export skip slave logic to function Maor Gottlieb
2020-04-22  8:39 ` [PATCH V4 mlx5-next 03/15] bonding: Rename slave_arr to usable_slaves Maor Gottlieb
2020-04-22  8:39 ` [PATCH V4 mlx5-next 04/15] bonding/alb: Add helper functions to get the xmit slave Maor Gottlieb
2020-04-22  8:39 ` [PATCH V4 mlx5-next 05/15] bonding: Add helper function to get the xmit slave based on hash Maor Gottlieb
2020-04-22  8:39 ` [PATCH V4 mlx5-next 06/15] bonding: Add helper function to get the xmit slave in rr mode Maor Gottlieb
2020-04-22  8:39 ` [PATCH V4 mlx5-next 07/15] bonding: Add function to get the xmit slave in active-backup mode Maor Gottlieb
2020-04-22  8:39 ` [PATCH V4 mlx5-next mlx5-next 08/15] bonding: Add array of all slaves Maor Gottlieb
2020-04-22  8:39 ` [PATCH V4 mlx5-next mlx5-next 09/15] bonding: Implement ndo_get_xmit_slave Maor Gottlieb
2020-04-22 12:53   ` Jiri Pirko
2020-04-22  8:39 ` [PATCH V4 mlx5-next 10/15] RDMA/core: Add LAG functionality Maor Gottlieb
2020-04-22 12:50   ` Jason Gunthorpe
2020-04-22 13:06     ` Maor Gottlieb
2020-04-22 15:12   ` David Ahern
2020-04-22  8:39 ` [PATCH V4 mlx5-next 11/15] RDMA/core: Get xmit slave for LAG Maor Gottlieb
2020-04-22 13:01   ` Jason Gunthorpe
2020-04-22  8:39 ` [PATCH V4 mlx5-next 12/15] net/mlx5: Change lag mutex lock to spin lock Maor Gottlieb
2020-04-22  8:39 ` [PATCH V4 mlx5-next 13/15] net/mlx5: Add support to get lag physical port Maor Gottlieb
2020-04-22  8:39 ` Maor Gottlieb [this message]
2020-04-22  8:39 ` [PATCH V4 mlx5-next 15/15] RDMA/mlx5: Set lag tx affinity according to slave Maor Gottlieb
2020-04-22 12:46 ` [PATCH V4 mlx5-next 00/15] Add support to get xmit slave Jiri Pirko
2020-04-22 12:56   ` Maor Gottlieb

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200422083951.17424-15-maorg@mellanox.com \
    --to=maorg@mellanox.com \
    --cc=alexr@mellanox.com \
    --cc=andy@greyhouse.net \
    --cc=davem@davemloft.net \
    --cc=dledford@redhat.com \
    --cc=dsahern@kernel.org \
    --cc=j.vosburgh@gmail.com \
    --cc=jgg@mellanox.com \
    --cc=jiri@mellanox.com \
    --cc=kuba@kernel.org \
    --cc=leonro@mellanox.com \
    --cc=linux-rdma@vger.kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=saeedm@mellanox.com \
    --cc=vfalico@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.