linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Stephen Rothwell <sfr@canb.auug.org.au>
To: David Miller <davem@davemloft.net>,
	Networking <netdev@vger.kernel.org>,
	Doug Ledford <dledford@redhat.com>,
	Jason Gunthorpe <jgg@mellanox.com>
Cc: Linux-Next Mailing List <linux-next@vger.kernel.org>,
	Linux Kernel Mailing List <linux-kernel@vger.kernel.org>,
	Mark Bloch <markb@mellanox.com>,
	Leon Romanovsky <leonro@mellanox.com>
Subject: linux-next: manual merge of the net-next tree with the rdma-fixes tree
Date: Fri, 16 Mar 2018 11:56:10 +1100	[thread overview]
Message-ID: <20180316115610.3d7f232a@canb.auug.org.au> (raw)

[-- Attachment #1: Type: text/plain, Size: 5754 bytes --]

Hi all,

Today's linux-next merge of the net-next tree got a conflict in:

  drivers/infiniband/hw/mlx5/main.c

between commit:

  42cea83f9524 ("IB/mlx5: Fix cleanup order on unload")

from the rdma-fixes tree and commit:

  b5ca15ad7e61 ("IB/mlx5: Add proper representors support")

from the net-next tree.

I fixed it up (see below and the merge fix patch as well) and can
carry the fix as necessary. This is now fixed as far as linux-next is
concerned, but any non trivial conflicts should be mentioned to your
upstream maintainer when your tree is submitted for merging.  You may
also want to consider cooperating with the maintainer of the conflicting
tree to minimise any particularly complex conflicts.

From: Stephen Rothwell <sfr@canb.auug.org.au>
Date: Fri, 16 Mar 2018 11:54:01 +1100
Subject: [PATCH] IB/mlx5: merge fix for "Fix cleanup order on unload"

Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
---
 drivers/infiniband/hw/mlx5/ib_rep.c  | 6 +++---
 drivers/infiniband/hw/mlx5/mlx5_ib.h | 3 +--
 2 files changed, 4 insertions(+), 5 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 61cc3d7db257..7fb997dadd80 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -33,9 +33,9 @@ static const struct mlx5_ib_profile rep_profile = {
 	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
 		     mlx5_ib_stage_ib_reg_init,
 		     mlx5_ib_stage_ib_reg_cleanup),
-	STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES,
-		     mlx5_ib_stage_umr_res_init,
-		     mlx5_ib_stage_umr_res_cleanup),
+	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
+		     mlx5_ib_stage_post_ib_reg_umr_init,
+		     NULL),
 	STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
 		     mlx5_ib_stage_class_attr_init,
 		     NULL),
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 7ec753ec7962..c45a7abdbe3e 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1071,8 +1071,7 @@ int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev);
 void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev);
 int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev);
 void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev);
-int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev);
-void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev);
+int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev);
 int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev);
 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
 		      const struct mlx5_ib_profile *profile,
-- 
2.16.1

-- 
Cheers,
Stephen Rothwell

diff --cc drivers/infiniband/hw/mlx5/main.c
index da091de4e69d,d9474b95d8e5..000000000000
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@@ -4860,19 -4999,19 +4996,19 @@@ int mlx5_ib_stage_ib_reg_init(struct ml
  	return ib_register_device(&dev->ib_dev, NULL);
  }
  
 -void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
 +static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
  {
 -	ib_unregister_device(&dev->ib_dev);
 +	destroy_umrc_res(dev);
  }
  
- static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
 -int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev)
++void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
  {
 -	return create_umr_res(dev);
 +	ib_unregister_device(&dev->ib_dev);
  }
  
- static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
 -void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev)
++int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
  {
 -	destroy_umrc_res(dev);
 +	return create_umr_res(dev);
  }
  
  static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
@@@ -4999,6 -5144,48 +5144,48 @@@ static const struct mlx5_ib_profile pf_
  		     NULL),
  };
  
+ static const struct mlx5_ib_profile nic_rep_profile = {
+ 	STAGE_CREATE(MLX5_IB_STAGE_INIT,
+ 		     mlx5_ib_stage_init_init,
+ 		     mlx5_ib_stage_init_cleanup),
+ 	STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
+ 		     mlx5_ib_stage_flow_db_init,
+ 		     mlx5_ib_stage_flow_db_cleanup),
+ 	STAGE_CREATE(MLX5_IB_STAGE_CAPS,
+ 		     mlx5_ib_stage_caps_init,
+ 		     NULL),
+ 	STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
+ 		     mlx5_ib_stage_rep_non_default_cb,
+ 		     NULL),
+ 	STAGE_CREATE(MLX5_IB_STAGE_ROCE,
+ 		     mlx5_ib_stage_rep_roce_init,
+ 		     mlx5_ib_stage_rep_roce_cleanup),
+ 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
+ 		     mlx5_ib_stage_dev_res_init,
+ 		     mlx5_ib_stage_dev_res_cleanup),
+ 	STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
+ 		     mlx5_ib_stage_counters_init,
+ 		     mlx5_ib_stage_counters_cleanup),
+ 	STAGE_CREATE(MLX5_IB_STAGE_UAR,
+ 		     mlx5_ib_stage_uar_init,
+ 		     mlx5_ib_stage_uar_cleanup),
+ 	STAGE_CREATE(MLX5_IB_STAGE_BFREG,
+ 		     mlx5_ib_stage_bfrag_init,
+ 		     mlx5_ib_stage_bfrag_cleanup),
+ 	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
+ 		     mlx5_ib_stage_ib_reg_init,
+ 		     mlx5_ib_stage_ib_reg_cleanup),
 -	STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES,
 -		     mlx5_ib_stage_umr_res_init,
 -		     mlx5_ib_stage_umr_res_cleanup),
++	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
++		     mlx5_ib_stage_post_ib_reg_umr_init,
++		     NULL),
+ 	STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
+ 		     mlx5_ib_stage_class_attr_init,
+ 		     NULL),
+ 	STAGE_CREATE(MLX5_IB_STAGE_REP_REG,
+ 		     mlx5_ib_stage_rep_reg_init,
+ 		     mlx5_ib_stage_rep_reg_cleanup),
+ };
+ 
  static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num)
  {
  	struct mlx5_ib_multiport_info *mpi;

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

             reply	other threads:[~2018-03-16  0:57 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-03-16  0:56 Stephen Rothwell [this message]
2018-03-16  1:18 ` linux-next: manual merge of the net-next tree with the rdma-fixes tree Doug Ledford
2018-03-16  2:05   ` Jason Gunthorpe
2018-03-16  6:54   ` Saeed Mahameed
2018-03-23  4:19   ` David Miller
2018-03-23  4:33     ` Jason Gunthorpe
2018-03-23 14:22       ` David Miller
2023-06-13  1:43 Stephen Rothwell
2023-06-13 12:39 ` Leon Romanovsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180316115610.3d7f232a@canb.auug.org.au \
    --to=sfr@canb.auug.org.au \
    --cc=davem@davemloft.net \
    --cc=dledford@redhat.com \
    --cc=jgg@mellanox.com \
    --cc=leonro@mellanox.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-next@vger.kernel.org \
    --cc=markb@mellanox.com \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).