All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH rdma-next 0/6] mlx5 profile infrastructure to add and remove stages
@ 2017-12-24 12:40 Leon Romanovsky
       [not found] ` <20171224124015.31917-1-leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
  0 siblings, 1 reply; 17+ messages in thread
From: Leon Romanovsky @ 2017-12-24 12:40 UTC (permalink / raw)
  To: Doug Ledford, Jason Gunthorpe
  Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA, Mark Bloch

Mark's patches continues e-switch series introduced in shared pull request
and refactors the mlx5_ib initialization and cleanup logic to work in the
stages.

Such modular separation provides an easy way to construct any initialization
sequences and skip unneeded stages as we will have in RDMA/IB representors code.

The patches are available in the git repository at:
  git.kernel.org/pub/scm/linux/kernel/git/leon/linux-rdma.git tags/rdma-next-2017-12-24

	Thanks
---------------------------------------

Mark Bloch (6):
  IB/mlx5: Create profile infrastructure to add and remove stages
  IB/mlx5: Move RoCE/ETH initialization to the corresponding stage
  IB/mlx5: Move ODP initialization to the corresponding stage
  IB/mlx5: Move hardware counters initialization to the corresponding
    stage
  IB/mlx5: Move loopback initialization to the corresponding stage
  IB/mlx5: Move locks initialization to the corresponding stage

 drivers/infiniband/hw/mlx5/main.c    | 370 ++++++++++++++++++++++++-----------
 drivers/infiniband/hw/mlx5/mlx5_ib.h |  51 ++++-
 drivers/infiniband/hw/mlx5/mr.c      |   7 +-
 drivers/infiniband/hw/mlx5/odp.c     |  38 ++--
 4 files changed, 324 insertions(+), 142 deletions(-)

-- 
2.15.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH rdma-next 1/6] IB/mlx5: Create profile infrastructure to add and remove stages
       [not found] ` <20171224124015.31917-1-leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
@ 2017-12-24 12:40   ` Leon Romanovsky
       [not found]     ` <20171224124015.31917-2-leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
  2017-12-24 12:40   ` [PATCH rdma-next 2/6] IB/mlx5: Move RoCE/ETH initialization to the corresponding stage Leon Romanovsky
                     ` (4 subsequent siblings)
  5 siblings, 1 reply; 17+ messages in thread
From: Leon Romanovsky @ 2017-12-24 12:40 UTC (permalink / raw)
  To: Doug Ledford, Jason Gunthorpe
  Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA, Mark Bloch

From: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>

Today we have single function which is used when we add an IB interface,
break this function into multiple functions.

Create stages and a generic mechanism to execute each stage.
This is in preparation for RDMA/IB representors which might not need
all stages or will do things differently in some of the stages.

This patch doesn't change any functionality.

Signed-off-by: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
Signed-off-by: Leon Romanovsky <leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
---
 drivers/infiniband/hw/mlx5/main.c    | 344 +++++++++++++++++++++++++----------
 drivers/infiniband/hw/mlx5/mlx5_ib.h |  31 ++++
 2 files changed, 282 insertions(+), 93 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 543d0a4c8bf3..16ea7778df55 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3949,30 +3949,21 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
 	return mlx5_get_vector_affinity(dev->mdev, comp_vector);
 }
 
-static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
+static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
 {
-	struct mlx5_ib_dev *dev;
-	enum rdma_link_layer ll;
-	int port_type_cap;
+	kfree(dev->port);
+}
+
+static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
+{
+	struct mlx5_core_dev *mdev = dev->mdev;
 	const char *name;
 	int err;
-	int i;
-
-	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
-	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
-
-	printk_once(KERN_INFO "%s", mlx5_version);
-
-	dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
-	if (!dev)
-		return NULL;
-
-	dev->mdev = mdev;
 
 	dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port),
 			    GFP_KERNEL);
 	if (!dev->port)
-		goto err_dealloc;
+		return -ENOMEM;
 
 	rwlock_init(&dev->roce.netdev_lock);
 	err = get_port_caps(dev);
@@ -3997,6 +3988,24 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 		dev->mdev->priv.eq_table.num_comp_vectors;
 	dev->ib_dev.dev.parent		= &mdev->pdev->dev;
 
+	return 0;
+
+err_free_port:
+	kfree(dev->port);
+
+	return -ENOMEM;
+}
+
+static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
+{
+	struct mlx5_core_dev *mdev = dev->mdev;
+	enum rdma_link_layer ll;
+	int port_type_cap;
+	int err;
+
+	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+
 	dev->ib_dev.uverbs_abi_ver	= MLX5_IB_UVERBS_ABI_VERSION;
 	dev->ib_dev.uverbs_cmd_mask	=
 		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
@@ -4140,139 +4149,288 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 	}
 	err = init_node_data(dev);
 	if (err)
-		goto err_free_port;
+		return err;
 
 	mutex_init(&dev->flow_db.lock);
 	mutex_init(&dev->cap_mask_mutex);
 	INIT_LIST_HEAD(&dev->qp_list);
 	spin_lock_init(&dev->reset_flow_resource_lock);
 
+	return 0;
+}
+
+static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
+{
+	struct mlx5_core_dev *mdev = dev->mdev;
+	enum rdma_link_layer ll;
+	int port_type_cap;
+	int err;
+
+	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+
 	if (ll == IB_LINK_LAYER_ETHERNET) {
 		err = mlx5_enable_eth(dev);
 		if (err)
-			goto err_free_port;
+			return err;
 		dev->roce.last_port_state = IB_PORT_DOWN;
 	}
 
-	err = create_dev_resources(&dev->devr);
-	if (err)
-		goto err_disable_eth;
+	return 0;
+}
 
-	err = mlx5_ib_odp_init_one(dev);
-	if (err)
-		goto err_rsrc;
+static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
+{
+	struct mlx5_core_dev *mdev = dev->mdev;
+	enum rdma_link_layer ll;
+	int port_type_cap;
 
-	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
-		err = mlx5_ib_alloc_counters(dev);
-		if (err)
-			goto err_odp;
+	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+
+	if (ll == IB_LINK_LAYER_ETHERNET) {
+		mlx5_disable_eth(dev);
+		mlx5_remove_netdev_notifier(dev);
 	}
+}
 
-	err = mlx5_ib_init_cong_debugfs(dev);
-	if (err)
-		goto err_cnt;
+static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
+{
+	return create_dev_resources(&dev->devr);
+}
+
+static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
+{
+	destroy_dev_resources(&dev->devr);
+}
+
+static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
+{
+	return mlx5_ib_odp_init_one(dev);
+}
 
+static void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev)
+{
+	mlx5_ib_odp_remove_one(dev);
+}
+
+static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
+{
+	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
+		return  mlx5_ib_alloc_counters(dev);
+
+	return 0;
+}
+
+static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
+{
+	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
+		mlx5_ib_dealloc_counters(dev);
+}
+
+static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
+{
+	return mlx5_ib_init_cong_debugfs(dev);
+}
+
+static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
+{
+	mlx5_ib_cleanup_cong_debugfs(dev);
+}
+
+static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
+{
 	dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
 	if (!dev->mdev->priv.uar)
-		goto err_cong;
+		return -ENOMEM;
+	return 0;
+}
+
+static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
+{
+	mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
+}
+
+static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
+{
+	int err;
 
 	err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
 	if (err)
-		goto err_uar_page;
+		return err;
 
 	err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
 	if (err)
-		goto err_bfreg;
+		mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
 
-	err = ib_register_device(&dev->ib_dev, NULL);
-	if (err)
-		goto err_fp_bfreg;
+	return err;
+}
 
-	err = create_umr_res(dev);
-	if (err)
-		goto err_dev;
+static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
+{
+	mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+	mlx5_free_bfreg(dev->mdev, &dev->bfreg);
+}
 
+static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
+{
+	return ib_register_device(&dev->ib_dev, NULL);
+}
+
+static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
+{
+	ib_unregister_device(&dev->ib_dev);
+}
+
+static int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev)
+{
+	return create_umr_res(dev);
+}
+
+static void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev)
+{
+	destroy_umrc_res(dev);
+}
+
+static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
+{
 	init_delay_drop(dev);
 
+	return 0;
+}
+
+static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
+{
+	cancel_delay_drop(dev);
+}
+
+static int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev)
+{
+	int err;
+	int i;
+
 	for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
 		err = device_create_file(&dev->ib_dev.dev,
 					 mlx5_class_attributes[i]);
 		if (err)
-			goto err_delay_drop;
+			return err;
 	}
 
-	if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
-	    MLX5_CAP_GEN(mdev, disable_local_lb))
+	return 0;
+}
+
+static int mlx5_ib_stage_loopback_init(struct mlx5_ib_dev *dev)
+{
+	if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
+	    MLX5_CAP_GEN(dev->mdev, disable_local_lb))
 		mutex_init(&dev->lb_mutex);
 
-	dev->ib_active = true;
+	return 0;
+}
 
-	return dev;
+static void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
+			     struct mlx5_ib_profile *profile,
+			     int stage)
+{
+	/* Number of stages to cleanup */
+	while (stage) {
+		stage--;
+		if (profile->stage[stage].cleanup)
+			profile->stage[stage].cleanup(dev);
+	}
 
-err_delay_drop:
-	cancel_delay_drop(dev);
-	destroy_umrc_res(dev);
+	ib_dealloc_device((struct ib_device *)dev);
+}
 
-err_dev:
-	ib_unregister_device(&dev->ib_dev);
+static void *__mlx5_ib_add(struct mlx5_core_dev *mdev,
+			   struct mlx5_ib_profile *profile)
+{
+	struct mlx5_ib_dev *dev;
+	int err;
+	int i;
 
-err_fp_bfreg:
-	mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+	printk_once(KERN_INFO "%s", mlx5_version);
 
-err_bfreg:
-	mlx5_free_bfreg(dev->mdev, &dev->bfreg);
+	dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
+	if (!dev)
+		return NULL;
 
-err_uar_page:
-	mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
+	dev->mdev = mdev;
 
-err_cong:
-	mlx5_ib_cleanup_cong_debugfs(dev);
-err_cnt:
-	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
-		mlx5_ib_dealloc_counters(dev);
+	for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
+		if (profile->stage[i].init) {
+			err = profile->stage[i].init(dev);
+			if (err)
+				goto err_out;
+		}
+	}
 
-err_odp:
-	mlx5_ib_odp_remove_one(dev);
+	dev->profile = profile;
+	dev->ib_active = true;
 
-err_rsrc:
-	destroy_dev_resources(&dev->devr);
+	return dev;
 
-err_disable_eth:
-	if (ll == IB_LINK_LAYER_ETHERNET) {
-		mlx5_disable_eth(dev);
-		mlx5_remove_netdev_notifier(dev);
-	}
+err_out:
+	__mlx5_ib_remove(dev, profile, i);
 
-err_free_port:
-	kfree(dev->port);
+	return NULL;
+}
 
-err_dealloc:
-	ib_dealloc_device((struct ib_device *)dev);
+static struct mlx5_ib_profile pf_profile = {
+	STAGE_CREATE(MLX5_IB_STAGE_INIT,
+		     mlx5_ib_stage_init_init,
+		     mlx5_ib_stage_init_cleanup),
+	STAGE_CREATE(MLX5_IB_STAGE_CAPS,
+		     mlx5_ib_stage_caps_init,
+		     NULL),
+	STAGE_CREATE(MLX5_IB_STAGE_ROCE,
+		     mlx5_ib_stage_roce_init,
+		     mlx5_ib_stage_roce_cleanup),
+	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
+		     mlx5_ib_stage_dev_res_init,
+		     mlx5_ib_stage_dev_res_cleanup),
+	STAGE_CREATE(MLX5_IB_STAGE_ODP,
+		     mlx5_ib_stage_odp_init,
+		     mlx5_ib_stage_odp_cleanup),
+	STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
+		     mlx5_ib_stage_counters_init,
+		     mlx5_ib_stage_counters_cleanup),
+	STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
+		     mlx5_ib_stage_cong_debugfs_init,
+		     mlx5_ib_stage_cong_debugfs_cleanup),
+	STAGE_CREATE(MLX5_IB_STAGE_UAR,
+		     mlx5_ib_stage_uar_init,
+		     mlx5_ib_stage_uar_cleanup),
+	STAGE_CREATE(MLX5_IB_STAGE_BFREG,
+		     mlx5_ib_stage_bfrag_init,
+		     mlx5_ib_stage_bfrag_cleanup),
+	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
+		     mlx5_ib_stage_ib_reg_init,
+		     mlx5_ib_stage_ib_reg_cleanup),
+	STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES,
+		     mlx5_ib_stage_umr_res_init,
+		     mlx5_ib_stage_umr_res_cleanup),
+	STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
+		     mlx5_ib_stage_delay_drop_init,
+		     mlx5_ib_stage_delay_drop_cleanup),
+	STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
+		     mlx5_ib_stage_class_attr_init,
+		     NULL),
+	STAGE_CREATE(MLX5_IB_STAGE_LOOPBACK,
+		     mlx5_ib_stage_loopback_init,
+		     NULL),
+};
 
-	return NULL;
+static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
+{
+	return __mlx5_ib_add(mdev, &pf_profile);
 }
 
 static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
 {
 	struct mlx5_ib_dev *dev = context;
-	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
 
-	cancel_delay_drop(dev);
-	mlx5_remove_netdev_notifier(dev);
-	ib_unregister_device(&dev->ib_dev);
-	mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
-	mlx5_free_bfreg(dev->mdev, &dev->bfreg);
-	mlx5_put_uars_page(dev->mdev, mdev->priv.uar);
-	mlx5_ib_cleanup_cong_debugfs(dev);
-	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
-		mlx5_ib_dealloc_counters(dev);
-	destroy_umrc_res(dev);
-	mlx5_ib_odp_remove_one(dev);
-	destroy_dev_resources(&dev->devr);
-	if (ll == IB_LINK_LAYER_ETHERNET)
-		mlx5_disable_eth(dev);
-	kfree(dev->port);
-	ib_dealloc_device(&dev->ib_dev);
+	__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
 }
 
 static struct mlx5_interface mlx5_ib_interface = {
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 6dd8cac78de2..8f97213e5b4c 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -705,6 +705,36 @@ struct mlx5_ib_delay_drop {
 	struct mlx5_ib_dbg_delay_drop *dbg;
 };
 
+enum mlx5_ib_stages {
+	MLX5_IB_STAGE_INIT,
+	MLX5_IB_STAGE_CAPS,
+	MLX5_IB_STAGE_ROCE,
+	MLX5_IB_STAGE_DEVICE_RESOURCES,
+	MLX5_IB_STAGE_ODP,
+	MLX5_IB_STAGE_COUNTERS,
+	MLX5_IB_STAGE_CONG_DEBUGFS,
+	MLX5_IB_STAGE_UAR,
+	MLX5_IB_STAGE_BFREG,
+	MLX5_IB_STAGE_IB_REG,
+	MLX5_IB_STAGE_UMR_RESOURCES,
+	MLX5_IB_STAGE_DELAY_DROP,
+	MLX5_IB_STAGE_CLASS_ATTR,
+	MLX5_IB_STAGE_LOOPBACK,
+	MLX5_IB_STAGE_MAX,
+};
+
+struct mlx5_ib_stage {
+	int (*init)(struct mlx5_ib_dev *dev);
+	void (*cleanup)(struct mlx5_ib_dev *dev);
+};
+
+#define STAGE_CREATE(_stage, _init, _cleanup) \
+	.stage[_stage] = {.init = _init, .cleanup = _cleanup}
+
+struct mlx5_ib_profile {
+	struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
+};
+
 struct mlx5_ib_dev {
 	struct ib_device		ib_dev;
 	struct mlx5_core_dev		*mdev;
@@ -743,6 +773,7 @@ struct mlx5_ib_dev {
 	struct mlx5_sq_bfreg	fp_bfreg;
 	struct mlx5_ib_delay_drop	delay_drop;
 	struct mlx5_ib_dbg_cc_params	*dbg_cc_params;
+	struct mlx5_ib_profile	*profile;
 
 	/* protect the user_td */
 	struct mutex		lb_mutex;
-- 
2.15.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH rdma-next 2/6] IB/mlx5: Move RoCE/ETH initialization to the corresponding stage
       [not found] ` <20171224124015.31917-1-leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
  2017-12-24 12:40   ` [PATCH rdma-next 1/6] IB/mlx5: Create " Leon Romanovsky
@ 2017-12-24 12:40   ` Leon Romanovsky
  2017-12-24 12:40   ` [PATCH rdma-next 3/6] IB/mlx5: Move ODP " Leon Romanovsky
                     ` (3 subsequent siblings)
  5 siblings, 0 replies; 17+ messages in thread
From: Leon Romanovsky @ 2017-12-24 12:40 UTC (permalink / raw)
  To: Doug Ledford, Jason Gunthorpe
  Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA, Mark Bloch

From: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>

Now that we have a stage just for RoCE/ETH, move all relevant
initialization logic into one place.

Signed-off-by: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
Signed-off-by: Leon Romanovsky <leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
---
 drivers/infiniband/hw/mlx5/main.c | 33 ++++++++++++---------------------
 1 file changed, 12 insertions(+), 21 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 16ea7778df55..ac72f62a9342 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3999,13 +3999,8 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
 static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
 {
 	struct mlx5_core_dev *mdev = dev->mdev;
-	enum rdma_link_layer ll;
-	int port_type_cap;
 	int err;
 
-	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
-	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
-
 	dev->ib_dev.uverbs_abi_ver	= MLX5_IB_UVERBS_ABI_VERSION;
 	dev->ib_dev.uverbs_cmd_mask	=
 		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
@@ -4044,8 +4039,6 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
 	dev->ib_dev.query_device	= mlx5_ib_query_device;
 	dev->ib_dev.query_port		= mlx5_ib_query_port;
 	dev->ib_dev.get_link_layer	= mlx5_ib_port_link_layer;
-	if (ll == IB_LINK_LAYER_ETHERNET)
-		dev->ib_dev.get_netdev	= mlx5_ib_get_netdev;
 	dev->ib_dev.query_gid		= mlx5_ib_query_gid;
 	dev->ib_dev.add_gid		= mlx5_ib_add_gid;
 	dev->ib_dev.del_gid		= mlx5_ib_del_gid;
@@ -4133,20 +4126,6 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
 			(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
 			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
 
-	if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
-	    IB_LINK_LAYER_ETHERNET) {
-		dev->ib_dev.create_wq	 = mlx5_ib_create_wq;
-		dev->ib_dev.modify_wq	 = mlx5_ib_modify_wq;
-		dev->ib_dev.destroy_wq	 = mlx5_ib_destroy_wq;
-		dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
-		dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
-		dev->ib_dev.uverbs_ex_cmd_mask |=
-			(1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
-			(1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
-			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
-			(1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
-			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
-	}
 	err = init_node_data(dev);
 	if (err)
 		return err;
@@ -4170,6 +4149,18 @@ static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
 
 	if (ll == IB_LINK_LAYER_ETHERNET) {
+		dev->ib_dev.get_netdev	= mlx5_ib_get_netdev;
+		dev->ib_dev.create_wq	 = mlx5_ib_create_wq;
+		dev->ib_dev.modify_wq	 = mlx5_ib_modify_wq;
+		dev->ib_dev.destroy_wq	 = mlx5_ib_destroy_wq;
+		dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
+		dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
+		dev->ib_dev.uverbs_ex_cmd_mask |=
+			(1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
+			(1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
+			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
+			(1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
+			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
 		err = mlx5_enable_eth(dev);
 		if (err)
 			return err;
-- 
2.15.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH rdma-next 3/6] IB/mlx5: Move ODP initialization to the corresponding stage
       [not found] ` <20171224124015.31917-1-leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
  2017-12-24 12:40   ` [PATCH rdma-next 1/6] IB/mlx5: Create " Leon Romanovsky
  2017-12-24 12:40   ` [PATCH rdma-next 2/6] IB/mlx5: Move RoCE/ETH initialization to the corresponding stage Leon Romanovsky
@ 2017-12-24 12:40   ` Leon Romanovsky
  2017-12-24 12:40   ` [PATCH rdma-next 4/6] IB/mlx5: Move hardware counters " Leon Romanovsky
                     ` (2 subsequent siblings)
  5 siblings, 0 replies; 17+ messages in thread
From: Leon Romanovsky @ 2017-12-24 12:40 UTC (permalink / raw)
  To: Doug Ledford, Jason Gunthorpe
  Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA, Mark Bloch

From: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>

Now that we have a stage just for ODP, move all relevant
initialization logic into one place.

In addition, ODP structs might not be always initialized, the
srcu which is used to sync between page faults and mkeys deletion
might not be initialized, wrap the call with a callback which
is only initialized when the ODP stage is used.

Signed-off-by: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
Signed-off-by: Leon Romanovsky <leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
---
 drivers/infiniband/hw/mlx5/main.c    |  6 +++---
 drivers/infiniband/hw/mlx5/mlx5_ib.h | 21 ++++++++++++--------
 drivers/infiniband/hw/mlx5/mr.c      |  7 ++++---
 drivers/infiniband/hw/mlx5/odp.c     | 38 +++++++++++++++++++++---------------
 4 files changed, 42 insertions(+), 30 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index ac72f62a9342..6fbd72b6d624 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -760,7 +760,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 	if (MLX5_CAP_GEN(mdev, pg))
 		props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
-	props->odp_caps = dev->odp_caps;
+	props->odp_caps = dev->odp.caps;
 #endif
 
 	if (MLX5_CAP_GEN(mdev, cd))
@@ -4095,8 +4095,6 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
 
 	dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext;
 
-	mlx5_ib_internal_fill_odp_caps(dev);
-
 	dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
 
 	if (MLX5_CAP_GEN(mdev, imaicl)) {
@@ -4197,6 +4195,8 @@ static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
 
 static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
 {
+	mlx5_ib_internal_fill_odp_caps(dev);
+
 	return mlx5_ib_odp_init_one(dev);
 }
 
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 8f97213e5b4c..e0d9c03f4432 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -735,6 +735,18 @@ struct mlx5_ib_profile {
 	struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
 };
 
+struct mlx5_ib_odp {
+	struct ib_odp_caps	caps;
+	u64			max_size;
+	/*
+	 * Sleepable RCU that prevents destruction of MRs while they are still
+	 * being used by a page fault handler.
+	 */
+	struct srcu_struct      mr_srcu;
+	u32			null_mkey;
+	void			(*sync)(struct mlx5_ib_dev *dev);
+};
+
 struct mlx5_ib_dev {
 	struct ib_device		ib_dev;
 	struct mlx5_core_dev		*mdev;
@@ -754,14 +766,7 @@ struct mlx5_ib_dev {
 	struct mutex			slow_path_mutex;
 	int				fill_delay;
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-	struct ib_odp_caps	odp_caps;
-	u64			odp_max_size;
-	/*
-	 * Sleepable RCU that prevents destruction of MRs while they are still
-	 * being used by a page fault handler.
-	 */
-	struct srcu_struct      mr_srcu;
-	u32			null_mkey;
+	struct mlx5_ib_odp		odp;
 #endif
 	struct mlx5_ib_flow_db	flow_db;
 	/* protect resources needed as part of reset flow */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index ad37d8441fa2..059771bfb415 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -58,7 +58,8 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 	/* Wait until all page fault handlers using the mr complete. */
-	synchronize_srcu(&dev->mr_srcu);
+	if (dev->odp.sync)
+		dev->odp.sync(dev);
 #endif
 
 	return err;
@@ -1215,7 +1216,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 	if (!start && length == U64_MAX) {
 		if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
-		    !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
+		    !(dev->odp.caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
 			return ERR_PTR(-EINVAL);
 
 		mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
@@ -1521,7 +1522,7 @@ static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 		/* Prevent new page faults from succeeding */
 		mr->live = 0;
 		/* Wait for all running page-fault handlers to finish. */
-		synchronize_srcu(&dev->mr_srcu);
+		dev->odp.sync(dev);
 		/* Destroy all page mappings */
 		if (umem->odp_data->page_list)
 			mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index e2197bdda89c..117f87d06919 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -125,7 +125,7 @@ void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
 	if (flags & MLX5_IB_UPD_XLT_ZAP) {
 		for (i = 0; i < nentries; i++, pklm++) {
 			pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
-			pklm->key = cpu_to_be32(dev->null_mkey);
+			pklm->key = cpu_to_be32(dev->odp.null_mkey);
 			pklm->va = 0;
 		}
 		return;
@@ -143,7 +143,7 @@ void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
 			pklm->key = cpu_to_be32(mtt->ibmr.lkey);
 			odp = odp_next(odp);
 		} else {
-			pklm->key = cpu_to_be32(dev->null_mkey);
+			pklm->key = cpu_to_be32(dev->odp.null_mkey);
 		}
 		mlx5_ib_dbg(dev, "[%d] va %lx key %x\n",
 			    i, va, be32_to_cpu(pklm->key));
@@ -157,7 +157,7 @@ static void mr_leaf_free_action(struct work_struct *work)
 	struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent;
 
 	mr->parent = NULL;
-	synchronize_srcu(&mr->dev->mr_srcu);
+	mr->dev->odp.sync(mr->dev);
 
 	ib_umem_release(odp->umem);
 	if (imr->live)
@@ -249,7 +249,7 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
 
 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
 {
-	struct ib_odp_caps *caps = &dev->odp_caps;
+	struct ib_odp_caps *caps = &dev->odp.caps;
 
 	memset(caps, 0, sizeof(*caps));
 
@@ -259,9 +259,9 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
 	caps->general_caps = IB_ODP_SUPPORT;
 
 	if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
-		dev->odp_max_size = U64_MAX;
+		dev->odp.max_size = U64_MAX;
 	else
-		dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
+		dev->odp.max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
 
 	if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
 		caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
@@ -641,7 +641,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
 	u32 *out = NULL;
 	size_t offset;
 
-	srcu_key = srcu_read_lock(&dev->mr_srcu);
+	srcu_key = srcu_read_lock(&dev->odp.mr_srcu);
 
 	io_virt += *bytes_committed;
 	bcnt -= *bytes_committed;
@@ -754,7 +754,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
 	}
 	kfree(out);
 
-	srcu_read_unlock(&dev->mr_srcu, srcu_key);
+	srcu_read_unlock(&dev->odp.mr_srcu, srcu_key);
 	*bytes_committed = 0;
 	return ret ? ret : npages;
 }
@@ -919,10 +919,10 @@ static int mlx5_ib_mr_initiator_pfault_handler(
 
 	switch (qp->ibqp.qp_type) {
 	case IB_QPT_RC:
-		transport_caps = dev->odp_caps.per_transport_caps.rc_odp_caps;
+		transport_caps = dev->odp.caps.per_transport_caps.rc_odp_caps;
 		break;
 	case IB_QPT_UD:
-		transport_caps = dev->odp_caps.per_transport_caps.ud_odp_caps;
+		transport_caps = dev->odp.caps.per_transport_caps.ud_odp_caps;
 		break;
 	default:
 		mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport 0x%x\n",
@@ -989,7 +989,7 @@ static int mlx5_ib_mr_responder_pfault_handler(
 
 	switch (qp->ibqp.qp_type) {
 	case IB_QPT_RC:
-		if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
+		if (!(dev->odp.caps.per_transport_caps.rc_odp_caps &
 		      IB_ODP_SUPPORT_RECV))
 			goto invalid_transport_or_opcode;
 		break;
@@ -1179,7 +1179,7 @@ void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
 
 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
 {
-	if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
+	if (!(ent->dev->odp.caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
 		return;
 
 	switch (ent->order - 2) {
@@ -1203,28 +1203,34 @@ void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
 	}
 }
 
+static void mlx5_ib_odp_sync(struct mlx5_ib_dev *dev)
+{
+	synchronize_srcu(&dev->odp.mr_srcu);
+}
+
 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
 {
 	int ret;
 
-	ret = init_srcu_struct(&dev->mr_srcu);
+	ret = init_srcu_struct(&dev->odp.mr_srcu);
 	if (ret)
 		return ret;
 
-	if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
-		ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
+	if (dev->odp.caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
+		ret = mlx5_cmd_null_mkey(dev->mdev, &dev->odp.null_mkey);
 		if (ret) {
 			mlx5_ib_err(dev, "Error getting null_mkey %d\n", ret);
 			return ret;
 		}
 	}
+	dev->odp.sync = mlx5_ib_odp_sync;
 
 	return 0;
 }
 
 void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *dev)
 {
-	cleanup_srcu_struct(&dev->mr_srcu);
+	cleanup_srcu_struct(&dev->odp.mr_srcu);
 }
 
 int mlx5_ib_odp_init(void)
-- 
2.15.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH rdma-next 4/6] IB/mlx5: Move hardware counters initialization to the corresponding stage
       [not found] ` <20171224124015.31917-1-leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
                     ` (2 preceding siblings ...)
  2017-12-24 12:40   ` [PATCH rdma-next 3/6] IB/mlx5: Move ODP " Leon Romanovsky
@ 2017-12-24 12:40   ` Leon Romanovsky
  2017-12-24 12:40   ` [PATCH rdma-next 5/6] IB/mlx5: Move loopback " Leon Romanovsky
  2017-12-24 12:40   ` [PATCH rdma-next 6/6] IB/mlx5: Move locks " Leon Romanovsky
  5 siblings, 0 replies; 17+ messages in thread
From: Leon Romanovsky @ 2017-12-24 12:40 UTC (permalink / raw)
  To: Doug Ledford, Jason Gunthorpe
  Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA, Mark Bloch

From: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>

Now that we have a stage just for hardware counters, move all relevant
initialization logic into one place.

Signed-off-by: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
Signed-off-by: Leon Romanovsky <leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
---
 drivers/infiniband/hw/mlx5/main.c | 13 ++++++-------
 1 file changed, 6 insertions(+), 7 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 6fbd72b6d624..c68dd1041aaa 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -4105,11 +4105,6 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
 			(1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
 	}
 
-	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
-		dev->ib_dev.get_hw_stats	= mlx5_ib_get_hw_stats;
-		dev->ib_dev.alloc_hw_stats	= mlx5_ib_alloc_hw_stats;
-	}
-
 	if (MLX5_CAP_GEN(mdev, xrc)) {
 		dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
 		dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
@@ -4207,8 +4202,12 @@ static void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev)
 
 static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
 {
-	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
-		return  mlx5_ib_alloc_counters(dev);
+	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
+		dev->ib_dev.get_hw_stats	= mlx5_ib_get_hw_stats;
+		dev->ib_dev.alloc_hw_stats	= mlx5_ib_alloc_hw_stats;
+
+		return mlx5_ib_alloc_counters(dev);
+	}
 
 	return 0;
 }
-- 
2.15.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH rdma-next 5/6] IB/mlx5: Move loopback initialization to the corresponding stage
       [not found] ` <20171224124015.31917-1-leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
                     ` (3 preceding siblings ...)
  2017-12-24 12:40   ` [PATCH rdma-next 4/6] IB/mlx5: Move hardware counters " Leon Romanovsky
@ 2017-12-24 12:40   ` Leon Romanovsky
  2017-12-24 12:40   ` [PATCH rdma-next 6/6] IB/mlx5: Move locks " Leon Romanovsky
  5 siblings, 0 replies; 17+ messages in thread
From: Leon Romanovsky @ 2017-12-24 12:40 UTC (permalink / raw)
  To: Doug Ledford, Jason Gunthorpe
  Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA, Mark Bloch

From: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>

The loopback stage only initializes a lock, move it to be in
the CAPS initialization phase and get rid loopback step completely.

Signed-off-by: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
Signed-off-by: Leon Romanovsky <leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
---
 drivers/infiniband/hw/mlx5/main.c    | 16 ++++------------
 drivers/infiniband/hw/mlx5/mlx5_ib.h |  1 -
 2 files changed, 4 insertions(+), 13 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index c68dd1041aaa..d7ccf8d5b2d2 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -4128,6 +4128,10 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
 	INIT_LIST_HEAD(&dev->qp_list);
 	spin_lock_init(&dev->reset_flow_resource_lock);
 
+	if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
+	    MLX5_CAP_GEN(dev->mdev, disable_local_lb))
+		mutex_init(&dev->lb_mutex);
+
 	return 0;
 }
 
@@ -4309,15 +4313,6 @@ static int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev)
 	return 0;
 }
 
-static int mlx5_ib_stage_loopback_init(struct mlx5_ib_dev *dev)
-{
-	if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
-	    MLX5_CAP_GEN(dev->mdev, disable_local_lb))
-		mutex_init(&dev->lb_mutex);
-
-	return 0;
-}
-
 static void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
 			     struct mlx5_ib_profile *profile,
 			     int stage)
@@ -4406,9 +4401,6 @@ static struct mlx5_ib_profile pf_profile = {
 	STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
 		     mlx5_ib_stage_class_attr_init,
 		     NULL),
-	STAGE_CREATE(MLX5_IB_STAGE_LOOPBACK,
-		     mlx5_ib_stage_loopback_init,
-		     NULL),
 };
 
 static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index e0d9c03f4432..8f762ac4a659 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -719,7 +719,6 @@ enum mlx5_ib_stages {
 	MLX5_IB_STAGE_UMR_RESOURCES,
 	MLX5_IB_STAGE_DELAY_DROP,
 	MLX5_IB_STAGE_CLASS_ATTR,
-	MLX5_IB_STAGE_LOOPBACK,
 	MLX5_IB_STAGE_MAX,
 };
 
-- 
2.15.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH rdma-next 6/6] IB/mlx5: Move locks initialization to the corresponding stage
       [not found] ` <20171224124015.31917-1-leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
                     ` (4 preceding siblings ...)
  2017-12-24 12:40   ` [PATCH rdma-next 5/6] IB/mlx5: Move loopback " Leon Romanovsky
@ 2017-12-24 12:40   ` Leon Romanovsky
       [not found]     ` <20171224124015.31917-7-leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
  5 siblings, 1 reply; 17+ messages in thread
From: Leon Romanovsky @ 2017-12-24 12:40 UTC (permalink / raw)
  To: Doug Ledford, Jason Gunthorpe
  Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA, Mark Bloch

From: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>

Unconditional locks/list initialization should be done in the INIT stage.
Remove those from the CAPS stage and move them to the proper stage.

Signed-off-by: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
Signed-off-by: Leon Romanovsky <leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
---
 drivers/infiniband/hw/mlx5/main.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d7ccf8d5b2d2..6775e440e23f 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3988,6 +3988,11 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
 		dev->mdev->priv.eq_table.num_comp_vectors;
 	dev->ib_dev.dev.parent		= &mdev->pdev->dev;
 
+	mutex_init(&dev->flow_db.lock);
+	mutex_init(&dev->cap_mask_mutex);
+	INIT_LIST_HEAD(&dev->qp_list);
+	spin_lock_init(&dev->reset_flow_resource_lock);
+
 	return 0;
 
 err_free_port:
@@ -4123,11 +4128,6 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
 	if (err)
 		return err;
 
-	mutex_init(&dev->flow_db.lock);
-	mutex_init(&dev->cap_mask_mutex);
-	INIT_LIST_HEAD(&dev->qp_list);
-	spin_lock_init(&dev->reset_flow_resource_lock);
-
 	if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
 	    MLX5_CAP_GEN(dev->mdev, disable_local_lb))
 		mutex_init(&dev->lb_mutex);
-- 
2.15.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH rdma-next 1/6] IB/mlx5: Create profile infrastructure to add and remove stages
       [not found]     ` <20171224124015.31917-2-leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
@ 2017-12-28  5:13       ` Jason Gunthorpe
  0 siblings, 0 replies; 17+ messages in thread
From: Jason Gunthorpe @ 2017-12-28  5:13 UTC (permalink / raw)
  To: Leon Romanovsky
  Cc: Doug Ledford, linux-rdma-u79uwXL29TY76Z2rM5mHXA, Mark Bloch

On Sun, Dec 24, 2017 at 02:40:10PM +0200, Leon Romanovsky wrote:

> +static struct mlx5_ib_profile pf_profile = {

static const please

Jason
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH rdma-next 6/6] IB/mlx5: Move locks initialization to the corresponding stage
       [not found]     ` <20171224124015.31917-7-leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
@ 2017-12-28  5:18       ` Jason Gunthorpe
       [not found]         ` <20171228051806.GP25436-uk2M96/98Pc@public.gmane.org>
  0 siblings, 1 reply; 17+ messages in thread
From: Jason Gunthorpe @ 2017-12-28  5:18 UTC (permalink / raw)
  To: Leon Romanovsky
  Cc: Doug Ledford, linux-rdma-u79uwXL29TY76Z2rM5mHXA, Mark Bloch

On Sun, Dec 24, 2017 at 02:40:15PM +0200, Leon Romanovsky wrote:
> From: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
> 
> Unconditional locks/list initialization should be done in the INIT stage.
> Remove those from the CAPS stage and move them to the proper stage.

So why not always initialize the srcu instead of using that wonky
callback function?

Jason
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH rdma-next 6/6] IB/mlx5: Move locks initialization to the corresponding stage
       [not found]         ` <20171228051806.GP25436-uk2M96/98Pc@public.gmane.org>
@ 2017-12-28  5:32           ` Leon Romanovsky
       [not found]             ` <20171228053259.GP3494-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
  0 siblings, 1 reply; 17+ messages in thread
From: Leon Romanovsky @ 2017-12-28  5:32 UTC (permalink / raw)
  To: Jason Gunthorpe
  Cc: Doug Ledford, linux-rdma-u79uwXL29TY76Z2rM5mHXA, Mark Bloch

[-- Attachment #1: Type: text/plain, Size: 575 bytes --]

On Wed, Dec 27, 2017 at 10:18:06PM -0700, Jason Gunthorpe wrote:
> On Sun, Dec 24, 2017 at 02:40:15PM +0200, Leon Romanovsky wrote:
> > From: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
> >
> > Unconditional locks/list initialization should be done in the INIT stage.
> > Remove those from the CAPS stage and move them to the proper stage.
>
> So why not always initialize the srcu instead of using that wonky
> callback function?

Maybe, but now, it is out of scope for this series, which more or less
moves code from one place to another.

Thanks

>
> Jason

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH rdma-next 6/6] IB/mlx5: Move locks initialization to the corresponding stage
       [not found]             ` <20171228053259.GP3494-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
@ 2017-12-28  5:35               ` Jason Gunthorpe
       [not found]                 ` <20171228053523.GR25436-uk2M96/98Pc@public.gmane.org>
  0 siblings, 1 reply; 17+ messages in thread
From: Jason Gunthorpe @ 2017-12-28  5:35 UTC (permalink / raw)
  To: Leon Romanovsky
  Cc: Doug Ledford, linux-rdma-u79uwXL29TY76Z2rM5mHXA, Mark Bloch

On Thu, Dec 28, 2017 at 07:32:59AM +0200, Leon Romanovsky wrote:
> On Wed, Dec 27, 2017 at 10:18:06PM -0700, Jason Gunthorpe wrote:
> > On Sun, Dec 24, 2017 at 02:40:15PM +0200, Leon Romanovsky wrote:
> > > From: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
> > >
> > > Unconditional locks/list initialization should be done in the INIT stage.
> > > Remove those from the CAPS stage and move them to the proper stage.
> >
> > So why not always initialize the srcu instead of using that wonky
> > callback function?
> 
> Maybe, but now, it is out of scope for this series, which more or less
> moves code from one place to another.

So the possibility of an uninited SRCU existed before and this is
fixing a bug? I had the impression this series was introducing that
possibility...

Jason
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH rdma-next 6/6] IB/mlx5: Move locks initialization to the corresponding stage
       [not found]                 ` <20171228053523.GR25436-uk2M96/98Pc@public.gmane.org>
@ 2017-12-28  7:51                   ` Mark Bloch
       [not found]                     ` <3ad02325-5244-31c9-eb6d-139a17b064b6-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
  0 siblings, 1 reply; 17+ messages in thread
From: Mark Bloch @ 2017-12-28  7:51 UTC (permalink / raw)
  To: Jason Gunthorpe, Leon Romanovsky
  Cc: Doug Ledford, linux-rdma-u79uwXL29TY76Z2rM5mHXA



On 28/12/2017 07:35, Jason Gunthorpe wrote:
> On Thu, Dec 28, 2017 at 07:32:59AM +0200, Leon Romanovsky wrote:
>> On Wed, Dec 27, 2017 at 10:18:06PM -0700, Jason Gunthorpe wrote:
>>> On Sun, Dec 24, 2017 at 02:40:15PM +0200, Leon Romanovsky wrote:
>>>> From: Mark Bloch <markb-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
>>>>
>>>> Unconditional locks/list initialization should be done in the INIT stage.
>>>> Remove those from the CAPS stage and move them to the proper stage.
>>>
>>> So why not always initialize the srcu instead of using that wonky
>>> callback function?
>>
>> Maybe, but now, it is out of scope for this series, which more or less
>> moves code from one place to another.
> 
> So the possibility of an uninited SRCU existed before and this is
> fixing a bug? I had the impression this series was introducing that
> possibility...
> 

Yes, let me explain.
This entire series aims to allow to turn on/off certain features and to
group the init/cleanup of those features together.
If you take that into account, the srcu (which is needed only for ODP)
needs to be done in the ODP stage, but the issue is that not only ODP code
uses the srcu, destroy_mkey() will sync on it.

So I can move the srcu initialization to INIT, but this way we lose
context when looking at the ODP stage.

> Jason
> 

Mark
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH rdma-next 6/6] IB/mlx5: Move locks initialization to the corresponding stage
       [not found]                     ` <3ad02325-5244-31c9-eb6d-139a17b064b6-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
@ 2017-12-28 16:03                       ` Jason Gunthorpe
       [not found]                         ` <20171228160310.GW25436-uk2M96/98Pc@public.gmane.org>
  0 siblings, 1 reply; 17+ messages in thread
From: Jason Gunthorpe @ 2017-12-28 16:03 UTC (permalink / raw)
  To: Mark Bloch
  Cc: Leon Romanovsky, Doug Ledford, linux-rdma-u79uwXL29TY76Z2rM5mHXA

On Thu, Dec 28, 2017 at 09:51:24AM +0200, Mark Bloch wrote:

> Yes, let me explain.
> This entire series aims to allow to turn on/off certain features and to
> group the init/cleanup of those features together.
> If you take that into account, the srcu (which is needed only for ODP)
> needs to be done in the ODP stage, but the issue is that not only ODP code
> uses the srcu, destroy_mkey() will sync on it.

Okay, right, so this series creates the united srcu possibility. The
commit messages are not totally clear..

> So I can move the srcu initialization to INIT, but this way we lose
> context when looking at the ODP stage.

If something is being used by multiple stages then it really seems
like it should be common.

Introducing a function call back just to avoid initing a srcu in
common code for the sole reason of 'keeping like together' seems
like bad taste to me..

Jason
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH rdma-next 6/6] IB/mlx5: Move locks initialization to the corresponding stage
       [not found]                         ` <20171228160310.GW25436-uk2M96/98Pc@public.gmane.org>
@ 2017-12-28 17:10                           ` Mark Bloch
       [not found]                             ` <9f8a7042-7c6a-9929-fa10-0edf5c6e366e-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
  0 siblings, 1 reply; 17+ messages in thread
From: Mark Bloch @ 2017-12-28 17:10 UTC (permalink / raw)
  To: Jason Gunthorpe
  Cc: Leon Romanovsky, Doug Ledford, linux-rdma-u79uwXL29TY76Z2rM5mHXA



On 28/12/2017 18:03, Jason Gunthorpe wrote:
> On Thu, Dec 28, 2017 at 09:51:24AM +0200, Mark Bloch wrote:
> 
>> Yes, let me explain.
>> This entire series aims to allow to turn on/off certain features and to
>> group the init/cleanup of those features together.
>> If you take that into account, the srcu (which is needed only for ODP)
>> needs to be done in the ODP stage, but the issue is that not only ODP code
>> uses the srcu, destroy_mkey() will sync on it.
> 
> Okay, right, so this series creates the united srcu possibility. The
> commit messages are not totally clear..
> 
>> So I can move the srcu initialization to INIT, but this way we lose
>> context when looking at the ODP stage.
> 
> If something is being used by multiple stages then it really seems
> like it should be common.
> 
> Introducing a function call back just to avoid initing a srcu in
> common code for the sole reason of 'keeping like together' seems
> like bad taste to me..

Well, it's not only that, moving the init srcu to the INIT stage
will also make me wrap it around with CONFIG_INFINIBAND_ON_DEMAND_PAGING
which I really don't like seeing in .c files, but we are already doing that
so .... :)

If you feel that strongly about it, let me know and I'll change that,
as well as the comment you had about the const in a different patch
and we'll send v1 with the fixes.

Thanks for the comments!

> Jason
> 

Mark
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH rdma-next 6/6] IB/mlx5: Move locks initialization to the corresponding stage
       [not found]                             ` <9f8a7042-7c6a-9929-fa10-0edf5c6e366e-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
@ 2017-12-28 17:14                               ` Jason Gunthorpe
       [not found]                                 ` <20171228171428.GA25436-uk2M96/98Pc@public.gmane.org>
  0 siblings, 1 reply; 17+ messages in thread
From: Jason Gunthorpe @ 2017-12-28 17:14 UTC (permalink / raw)
  To: Mark Bloch
  Cc: Leon Romanovsky, Doug Ledford, linux-rdma-u79uwXL29TY76Z2rM5mHXA

On Thu, Dec 28, 2017 at 07:10:31PM +0200, Mark Bloch wrote:

> Well, it's not only that, moving the init srcu to the INIT stage
> will also make me wrap it around with CONFIG_INFINIBAND_ON_DEMAND_PAGING
> which I really don't like seeing in .c files, but we are already doing that
> so .... :)

Don't micro-optimize so much. Who cares if the few struct members are
allocated when !CONFIG_INFINIBAND_ON_DEMAND_PAGING ? Always allocate
the SRCU and always initialize it.

Maybe even get rid of CONFIG_INFINIBAND_ON_DEMAND_PAGING ?
Never understood why we had it in the first place.

> If you feel that strongly about it, let me know and I'll change that,
> as well as the comment you had about the const in a different patch
> and we'll send v1 with the fixes.

The function call back thing is really not nice, so I would be happy
to see it go away.

Jason
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH rdma-next 6/6] IB/mlx5: Move locks initialization to the corresponding stage
       [not found]                                 ` <20171228171428.GA25436-uk2M96/98Pc@public.gmane.org>
@ 2017-12-28 17:22                                   ` Mark Bloch
       [not found]                                     ` <70dcf1fa-fd00-7288-3a59-0d61e62e0043-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
  0 siblings, 1 reply; 17+ messages in thread
From: Mark Bloch @ 2017-12-28 17:22 UTC (permalink / raw)
  To: Jason Gunthorpe
  Cc: Leon Romanovsky, Doug Ledford, linux-rdma-u79uwXL29TY76Z2rM5mHXA



On 28/12/2017 19:14, Jason Gunthorpe wrote:
> On Thu, Dec 28, 2017 at 07:10:31PM +0200, Mark Bloch wrote:
> 
>> Well, it's not only that, moving the init srcu to the INIT stage
>> will also make me wrap it around with CONFIG_INFINIBAND_ON_DEMAND_PAGING
>> which I really don't like seeing in .c files, but we are already doing that
>> so .... :)
> 
> Don't micro-optimize so much. Who cares if the few struct members are
> allocated when !CONFIG_INFINIBAND_ON_DEMAND_PAGING ? Always allocate
> the SRCU and always initialize it.
> 
Well, I didn't write the ODP code, so I don't know why they did it this way.
Any place today that accesses that srcu is already under that CONFIG option, so
the init srcu can also stay under that CONFIG.

> Maybe even get rid of CONFIG_INFINIBAND_ON_DEMAND_PAGING ?
> Never understood why we had it in the first place.
> 

I'll talk with the guys that did/touch last the ODP code and see what they have
to say, if there isn't a valid reason I'll post a patch that cleans this entire thing.

>> If you feel that strongly about it, let me know and I'll change that,
>> as well as the comment you had about the const in a different patch
>> and we'll send v1 with the fixes.
> 
> The function call back thing is really not nice, so I would be happy
> to see it go away.
> 
So I'll move the lock to INIT, but leave it under the ODP CONFIG option for now.
If there isn't (It's already on my TODO list to ask around) a good reason for this CONFIG
I'll send a patch that addresses that, sound good?

> Jason
> 

Mark
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH rdma-next 6/6] IB/mlx5: Move locks initialization to the corresponding stage
       [not found]                                     ` <70dcf1fa-fd00-7288-3a59-0d61e62e0043-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
@ 2017-12-28 17:45                                       ` Jason Gunthorpe
  0 siblings, 0 replies; 17+ messages in thread
From: Jason Gunthorpe @ 2017-12-28 17:45 UTC (permalink / raw)
  To: Mark Bloch
  Cc: Leon Romanovsky, Doug Ledford, linux-rdma-u79uwXL29TY76Z2rM5mHXA

On Thu, Dec 28, 2017 at 07:22:52PM +0200, Mark Bloch wrote:

> So I'll move the lock to INIT, but leave it under the ODP CONFIG
> option for now.  If there isn't (It's already on my TODO list to ask
> around) a good reason for this CONFIG I'll send a patch that
> addresses that, sound good?

Yep

Jason
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2017-12-28 17:45 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-12-24 12:40 [PATCH rdma-next 0/6] mlx5 profile infrastructure to add and remove stages Leon Romanovsky
     [not found] ` <20171224124015.31917-1-leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
2017-12-24 12:40   ` [PATCH rdma-next 1/6] IB/mlx5: Create " Leon Romanovsky
     [not found]     ` <20171224124015.31917-2-leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
2017-12-28  5:13       ` Jason Gunthorpe
2017-12-24 12:40   ` [PATCH rdma-next 2/6] IB/mlx5: Move RoCE/ETH initialization to the corresponding stage Leon Romanovsky
2017-12-24 12:40   ` [PATCH rdma-next 3/6] IB/mlx5: Move ODP " Leon Romanovsky
2017-12-24 12:40   ` [PATCH rdma-next 4/6] IB/mlx5: Move hardware counters " Leon Romanovsky
2017-12-24 12:40   ` [PATCH rdma-next 5/6] IB/mlx5: Move loopback " Leon Romanovsky
2017-12-24 12:40   ` [PATCH rdma-next 6/6] IB/mlx5: Move locks " Leon Romanovsky
     [not found]     ` <20171224124015.31917-7-leon-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
2017-12-28  5:18       ` Jason Gunthorpe
     [not found]         ` <20171228051806.GP25436-uk2M96/98Pc@public.gmane.org>
2017-12-28  5:32           ` Leon Romanovsky
     [not found]             ` <20171228053259.GP3494-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
2017-12-28  5:35               ` Jason Gunthorpe
     [not found]                 ` <20171228053523.GR25436-uk2M96/98Pc@public.gmane.org>
2017-12-28  7:51                   ` Mark Bloch
     [not found]                     ` <3ad02325-5244-31c9-eb6d-139a17b064b6-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
2017-12-28 16:03                       ` Jason Gunthorpe
     [not found]                         ` <20171228160310.GW25436-uk2M96/98Pc@public.gmane.org>
2017-12-28 17:10                           ` Mark Bloch
     [not found]                             ` <9f8a7042-7c6a-9929-fa10-0edf5c6e366e-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
2017-12-28 17:14                               ` Jason Gunthorpe
     [not found]                                 ` <20171228171428.GA25436-uk2M96/98Pc@public.gmane.org>
2017-12-28 17:22                                   ` Mark Bloch
     [not found]                                     ` <70dcf1fa-fd00-7288-3a59-0d61e62e0043-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>
2017-12-28 17:45                                       ` Jason Gunthorpe

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.