All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC 1/2] nvme-rdma: Matching rdma WC to rdma queue according to WC QP
@ 2016-08-08 11:00 Roy Shterman
  2016-08-08 11:00 ` [RFC 2/2] nvmet-rdma: " Roy Shterman
  2016-08-08 11:32 ` [RFC 1/2] nvme-rdma: " Sagi Grimberg
  0 siblings, 2 replies; 5+ messages in thread
From: Roy Shterman @ 2016-08-08 11:00 UTC (permalink / raw)


Today when assiging rdma_nvme_queue to rdma work complition we use
cq_context which is passed as queue pointer when creating the CQ.
In case we will want to aggregate few QP to one CQ this method will
not work, hence it will be better if we will use QP context instead.

Signed-off-by: Roy Shterman <roysh at mellanox.com>
---
 drivers/nvme/host/rdma.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 278551b..98a0ab5 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -277,6 +277,7 @@ static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
 	init_attr.qp_type = IB_QPT_RC;
 	init_attr.send_cq = queue->ib_cq;
 	init_attr.recv_cq = queue->ib_cq;
+	init_attr.qp_context = queue;
 
 	ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr);
 
@@ -803,7 +804,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
 static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
 		const char *op)
 {
-	struct nvme_rdma_queue *queue = cq->cq_context;
+	struct nvme_rdma_queue *queue = wc->qp->qp_context;
 	struct nvme_rdma_ctrl *ctrl = queue->ctrl;
 
 	if (ctrl->ctrl.state == NVME_CTRL_LIVE)
@@ -1163,7 +1164,7 @@ static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag)
 {
 	struct nvme_rdma_qe *qe =
 		container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
-	struct nvme_rdma_queue *queue = cq->cq_context;
+	struct nvme_rdma_queue *queue = wc->qp->qp_context;
 	struct ib_device *ibdev = queue->device->dev;
 	struct nvme_completion *cqe = qe->data;
 	const size_t len = sizeof(struct nvme_completion);
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [RFC 2/2] nvmet-rdma: Matching rdma WC to rdma queue according to WC QP
  2016-08-08 11:00 [RFC 1/2] nvme-rdma: Matching rdma WC to rdma queue according to WC QP Roy Shterman
@ 2016-08-08 11:00 ` Roy Shterman
  2016-08-08 11:35   ` Sagi Grimberg
  2016-08-08 11:32 ` [RFC 1/2] nvme-rdma: " Sagi Grimberg
  1 sibling, 1 reply; 5+ messages in thread
From: Roy Shterman @ 2016-08-08 11:00 UTC (permalink / raw)


Today when assiging rdma_nvme_queue to rdma work complition we use
cq_context which is passed as queue pointer when creating the CQ.
In case we will want to aggregate few QP to one CQ this method will
not work, hence it will be better if we will use QP context instead.

Signed-off-by: Roy Shterman <roysh at mellanox.com>
---
 drivers/nvme/target/rdma.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index e06d504..164b1a2 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -547,7 +547,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
 {
 	struct nvmet_rdma_rsp *rsp =
 		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
-	struct nvmet_rdma_queue *queue = cq->cq_context;
+	struct nvmet_rdma_queue *queue = wc->qp->qp_context;
 
 	WARN_ON(rsp->n_rdma <= 0);
 	atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
@@ -726,7 +726,7 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
 {
 	struct nvmet_rdma_cmd *cmd =
 		container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
-	struct nvmet_rdma_queue *queue = cq->cq_context;
+	struct nvmet_rdma_queue *queue = wc->qp->qp_context;
 	struct nvmet_rdma_rsp *rsp;
 
 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [RFC 1/2] nvme-rdma: Matching rdma WC to rdma queue according to WC QP
  2016-08-08 11:00 [RFC 1/2] nvme-rdma: Matching rdma WC to rdma queue according to WC QP Roy Shterman
  2016-08-08 11:00 ` [RFC 2/2] nvmet-rdma: " Roy Shterman
@ 2016-08-08 11:32 ` Sagi Grimberg
  1 sibling, 0 replies; 5+ messages in thread
From: Sagi Grimberg @ 2016-08-08 11:32 UTC (permalink / raw)




On 08/08/16 14:00, Roy Shterman wrote:
> Today when assiging rdma_nvme_queue to rdma work complition we use

s/complition/completion

> cq_context which is passed as queue pointer when creating the CQ.
> In case we will want to aggregate few QP to one CQ this method will
> not work, hence it will be better if we will use QP context instead.
>
> Signed-off-by: Roy Shterman <roysh at mellanox.com>
> ---
>  drivers/nvme/host/rdma.c | 5 +++--
>  1 file changed, 3 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
> index 278551b..98a0ab5 100644
> --- a/drivers/nvme/host/rdma.c
> +++ b/drivers/nvme/host/rdma.c
> @@ -277,6 +277,7 @@ static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
>  	init_attr.qp_type = IB_QPT_RC;
>  	init_attr.send_cq = queue->ib_cq;
>  	init_attr.recv_cq = queue->ib_cq;
> +	init_attr.qp_context = queue;
>
>  	ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr);
>
> @@ -803,7 +804,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
>  static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
>  		const char *op)
>  {
> -	struct nvme_rdma_queue *queue = cq->cq_context;
> +	struct nvme_rdma_queue *queue = wc->qp->qp_context;
>  	struct nvme_rdma_ctrl *ctrl = queue->ctrl;
>
>  	if (ctrl->ctrl.state == NVME_CTRL_LIVE)
> @@ -1163,7 +1164,7 @@ static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag)
>  {
>  	struct nvme_rdma_qe *qe =
>  		container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
> -	struct nvme_rdma_queue *queue = cq->cq_context;
> +	struct nvme_rdma_queue *queue = wc->qp->qp_context;
>  	struct ib_device *ibdev = queue->device->dev;
>  	struct nvme_completion *cqe = qe->data;
>  	const size_t len = sizeof(struct nvme_completion);

On its own, the patch is useless. I don't see a point getting it
in without the actual purpose its designed to allow.
Moreover, CQ sharing involves allocating large CQs (so we have room for
multiple QPs). This makes much better sense for the target driver
(which serves multiple hosts) but a little less for the host driver.

For the host driver, we'll need a proper performance justification to
sacrifice pre-allocation of large CQs (we'll one for the target as
well, but it won't be hard to show in real-life scenarios).

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [RFC 2/2] nvmet-rdma: Matching rdma WC to rdma queue according to WC QP
  2016-08-08 11:00 ` [RFC 2/2] nvmet-rdma: " Roy Shterman
@ 2016-08-08 11:35   ` Sagi Grimberg
  2016-08-08 13:24     ` Christoph Hellwig
  0 siblings, 1 reply; 5+ messages in thread
From: Sagi Grimberg @ 2016-08-08 11:35 UTC (permalink / raw)




On 08/08/16 14:00, Roy Shterman wrote:
> Today when assiging rdma_nvme_queue to rdma work complition we use
> cq_context which is passed as queue pointer when creating the CQ.
> In case we will want to aggregate few QP to one CQ this method will
> not work, hence it will be better if we will use QP context instead.
>
> Signed-off-by: Roy Shterman <roysh at mellanox.com>
> ---
>  drivers/nvme/target/rdma.c | 4 ++--
>  1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
> index e06d504..164b1a2 100644
> --- a/drivers/nvme/target/rdma.c
> +++ b/drivers/nvme/target/rdma.c
> @@ -547,7 +547,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
>  {
>  	struct nvmet_rdma_rsp *rsp =
>  		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
> -	struct nvmet_rdma_queue *queue = cq->cq_context;
> +	struct nvmet_rdma_queue *queue = wc->qp->qp_context;
>
>  	WARN_ON(rsp->n_rdma <= 0);
>  	atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
> @@ -726,7 +726,7 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
>  {
>  	struct nvmet_rdma_cmd *cmd =
>  		container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
> -	struct nvmet_rdma_queue *queue = cq->cq_context;
> +	struct nvmet_rdma_queue *queue = wc->qp->qp_context;
>  	struct nvmet_rdma_rsp *rsp;
>
>  	if (unlikely(wc->status != IB_WC_SUCCESS)) {
>

Christoph and I had some discussions over that in the past. We came to
the conclusion that a RDMA core API for per-cpu CQ pools might be
beneficial for other target mode drivers (isert, srpt).

I even had an RFC in the works, but it kinda got left behind... It'd be
great if you can take it from there...

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [RFC 2/2] nvmet-rdma: Matching rdma WC to rdma queue according to WC QP
  2016-08-08 11:35   ` Sagi Grimberg
@ 2016-08-08 13:24     ` Christoph Hellwig
  0 siblings, 0 replies; 5+ messages in thread
From: Christoph Hellwig @ 2016-08-08 13:24 UTC (permalink / raw)


On Mon, Aug 08, 2016@02:35:19PM +0300, Sagi Grimberg wrote:
> Christoph and I had some discussions over that in the past. We came to
> the conclusion that a RDMA core API for per-cpu CQ pools might be
> beneficial for other target mode drivers (isert, srpt).
> 
> I even had an RFC in the works, but it kinda got left behind... It'd be
> great if you can take it from there...

I've picked it up again and how to be able to post something in a week
or two.

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2016-08-08 13:24 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-08-08 11:00 [RFC 1/2] nvme-rdma: Matching rdma WC to rdma queue according to WC QP Roy Shterman
2016-08-08 11:00 ` [RFC 2/2] nvmet-rdma: " Roy Shterman
2016-08-08 11:35   ` Sagi Grimberg
2016-08-08 13:24     ` Christoph Hellwig
2016-08-08 11:32 ` [RFC 1/2] nvme-rdma: " Sagi Grimberg

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.