linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* linux-next: manual merge of the block tree with the rdma tree
@ 2020-06-02  2:56 Stephen Rothwell
  2020-06-02  8:37 ` Max Gurtovoy
  0 siblings, 1 reply; 14+ messages in thread
From: Stephen Rothwell @ 2020-06-02  2:56 UTC (permalink / raw)
  To: Jens Axboe, Doug Ledford, Jason Gunthorpe
  Cc: Linux Next Mailing List, Linux Kernel Mailing List,
	Yamin Friedman, Israel Rukshin, Christoph Hellwig, Max Gurtovoy

[-- Attachment #1: Type: text/plain, Size: 1876 bytes --]

Hi all,

Today's linux-next merge of the block tree got a conflict in:

  drivers/nvme/target/rdma.c

between commit:

  5733111dcd97 ("nvmet-rdma: use new shared CQ mechanism")

from the rdma tree and commits:

  b0012dd39715 ("nvmet-rdma: use SRQ per completion vector")
  b09160c3996c ("nvmet-rdma: add metadata/T10-PI support")

from the block tree.

I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging.  You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc drivers/nvme/target/rdma.c
index 2405db8bd855,d5141780592e..000000000000
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@@ -589,7 -751,8 +752,8 @@@ static void nvmet_rdma_read_data_done(s
  {
  	struct nvmet_rdma_rsp *rsp =
  		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
 -	struct nvmet_rdma_queue *queue = cq->cq_context;
 +	struct nvmet_rdma_queue *queue = wc->qp->qp_context;
+ 	u16 status = 0;
  
  	WARN_ON(rsp->n_rdma <= 0);
  	atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
@@@ -996,8 -1257,9 +1258,8 @@@ static int nvmet_rdma_create_queue_ib(s
  	 */
  	nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
  
- 	queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1, comp_vector,
 -	queue->cq = ib_alloc_cq(ndev->device, queue,
 -			nr_cqe + 1, queue->comp_vector,
 -			IB_POLL_WORKQUEUE);
++	queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1, queue->comp_vector,
 +				   IB_POLL_WORKQUEUE);
  	if (IS_ERR(queue->cq)) {
  		ret = PTR_ERR(queue->cq);
  		pr_err("failed to create CQ cqe= %d ret= %d\n",

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 14+ messages in thread
* linux-next: manual merge of the block tree with the rdma tree
@ 2020-06-02  2:48 Stephen Rothwell
  0 siblings, 0 replies; 14+ messages in thread
From: Stephen Rothwell @ 2020-06-02  2:48 UTC (permalink / raw)
  To: Jens Axboe, Doug Ledford, Jason Gunthorpe
  Cc: Linux Next Mailing List, Linux Kernel Mailing List,
	Yamin Friedman, Max Gurtovoy, Christoph Hellwig

[-- Attachment #1: Type: text/plain, Size: 3079 bytes --]

Hi all,

Today's linux-next merge of the block tree got a conflict in:

  drivers/nvme/host/rdma.c

between commit:

  583f69304b91 ("nvme-rdma: use new shared CQ mechanism")

from the rdma tree and commit:

  5ec5d3bddc6b ("nvme-rdma: add metadata/T10-PI support")

from the block tree.

I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging.  You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc drivers/nvme/host/rdma.c
index 83d5f292c937,f8f856dc0c67..000000000000
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@@ -85,7 -95,7 +95,8 @@@ struct nvme_rdma_queue 
  	struct rdma_cm_id	*cm_id;
  	int			cm_error;
  	struct completion	cm_done;
 +	int			cq_size;
+ 	bool			pi_support;
  };
  
  struct nvme_rdma_ctrl {
@@@ -262,7 -272,8 +273,9 @@@ static int nvme_rdma_create_qp(struct n
  	init_attr.qp_type = IB_QPT_RC;
  	init_attr.send_cq = queue->ib_cq;
  	init_attr.recv_cq = queue->ib_cq;
 +	init_attr.qp_context = queue;
+ 	if (queue->pi_support)
+ 		init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
  
  	ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr);
  
@@@ -426,43 -437,18 +447,49 @@@ static void nvme_rdma_destroy_queue_ib(
  	nvme_rdma_dev_put(dev);
  }
  
- static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev)
+ static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev, bool pi_support)
  {
- 	return min_t(u32, NVME_RDMA_MAX_SEGMENTS,
- 		     ibdev->attrs.max_fast_reg_page_list_len - 1);
+ 	u32 max_page_list_len;
+ 
+ 	if (pi_support)
+ 		max_page_list_len = ibdev->attrs.max_pi_fast_reg_page_list_len;
+ 	else
+ 		max_page_list_len = ibdev->attrs.max_fast_reg_page_list_len;
+ 
+ 	return min_t(u32, NVME_RDMA_MAX_SEGMENTS, max_page_list_len - 1);
  }
  
 +static int nvme_rdma_create_cq(struct ib_device *ibdev,
 +				struct nvme_rdma_queue *queue)
 +{
 +	int ret, comp_vector, idx = nvme_rdma_queue_idx(queue);
 +	enum ib_poll_context poll_ctx;
 +
 +	/*
 +	 * Spread I/O queues completion vectors according their queue index.
 +	 * Admin queues can always go on completion vector 0.
 +	 */
 +	comp_vector = idx == 0 ? idx : idx - 1;
 +
 +	/* Polling queues need direct cq polling context */
 +	if (nvme_rdma_poll_queue(queue)) {
 +		poll_ctx = IB_POLL_DIRECT;
 +		queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size,
 +					   comp_vector, poll_ctx);
 +	} else {
 +		poll_ctx = IB_POLL_SOFTIRQ;
 +		queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size,
 +					      comp_vector, poll_ctx);
 +	}
 +
 +	if (IS_ERR(queue->ib_cq)) {
 +		ret = PTR_ERR(queue->ib_cq);
 +		return ret;
 +	}
 +
 +	return 0;
 +}
 +
  static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
  {
  	struct ib_device *ibdev;

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 14+ messages in thread
* linux-next: manual merge of the block tree with the rdma tree
@ 2018-07-26  3:58 Stephen Rothwell
  2018-08-15  1:45 ` Stephen Rothwell
  0 siblings, 1 reply; 14+ messages in thread
From: Stephen Rothwell @ 2018-07-26  3:58 UTC (permalink / raw)
  To: Jens Axboe, Doug Ledford, Jason Gunthorpe
  Cc: Linux-Next Mailing List, Linux Kernel Mailing List,
	Bart Van Assche, Max Gurtovoy, Christoph Hellwig

[-- Attachment #1: Type: text/plain, Size: 2289 bytes --]

Hi all,

Today's linux-next merge of the block tree got a conflict in:

  drivers/nvme/target/rdma.c

between commit:

  23f96d1f15a7 ("nvmet-rdma: Simplify ib_post_(send|recv|srq_recv)() calls")
  202093848cac ("nvmet-rdma: add an error flow for post_recv failures")

from the rdma tree and commits:

  2fc464e2162c ("nvmet-rdma: add unlikely check in the fast path")

from the block tree.

I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging.  You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc drivers/nvme/target/rdma.c
index 1a642e214a4c,e7f43d1e1779..000000000000
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@@ -382,13 -435,22 +435,21 @@@ static void nvmet_rdma_free_rsps(struc
  static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
  		struct nvmet_rdma_cmd *cmd)
  {
 -	struct ib_recv_wr *bad_wr;
+ 	int ret;
+ 
  	ib_dma_sync_single_for_device(ndev->device,
  		cmd->sge[0].addr, cmd->sge[0].length,
  		DMA_FROM_DEVICE);
  
  	if (ndev->srq)
- 		return ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
- 	return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
 -		ret = ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
++		ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
+ 	else
 -		ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
++		ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
+ 
+ 	if (unlikely(ret))
+ 		pr_err("post_recv cmd failed\n");
+ 
+ 	return ret;
  }
  
  static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
@@@ -491,7 -553,7 +552,7 @@@ static void nvmet_rdma_queue_response(s
  		rsp->send_sge.addr, rsp->send_sge.length,
  		DMA_TO_DEVICE);
  
- 	if (ib_post_send(cm_id->qp, first_wr, NULL)) {
 -	if (unlikely(ib_post_send(cm_id->qp, first_wr, &bad_wr))) {
++	if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) {
  		pr_err("sending cmd response failed\n");
  		nvmet_rdma_release_rsp(rsp);
  	}

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2020-06-03 10:56 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-02  2:56 linux-next: manual merge of the block tree with the rdma tree Stephen Rothwell
2020-06-02  8:37 ` Max Gurtovoy
2020-06-02 10:43   ` Stephen Rothwell
2020-06-02 19:01   ` Jason Gunthorpe
2020-06-02 19:02     ` Jens Axboe
2020-06-02 19:09       ` Jason Gunthorpe
2020-06-02 21:37         ` Jens Axboe
2020-06-02 22:40           ` Max Gurtovoy
2020-06-02 23:32             ` Jason Gunthorpe
2020-06-03 10:56               ` Max Gurtovoy
  -- strict thread matches above, loose matches on Subject: below --
2020-06-02  2:48 Stephen Rothwell
2018-07-26  3:58 Stephen Rothwell
2018-08-15  1:45 ` Stephen Rothwell
2018-08-15 19:26   ` Jason Gunthorpe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).