All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] nvmet: Avoid writing fabric_ops, queue pointers on every request.
@ 2017-02-07 22:37 Parav Pandit
  2017-02-08 10:13 ` Sagi Grimberg
  0 siblings, 1 reply; 13+ messages in thread
From: Parav Pandit @ 2017-02-07 22:37 UTC (permalink / raw)


Fabric operations are constants of a registered transport. They don't
change with every target request that gets processed by the nvmet-core.
Therefore this patch moves fabrics_ops initialization out of the hot
request processing path for rdma and fc.
It continues to remain in same way for loop target through extra API.

Additionally this patch further avoid nvme cq and sq pointer
initialization for every request during every request processing
for rdma because nvme queue linking occurs during queue allocation
time for AQ and IOQ.

As a result, in nvmet_req_init function, this change saves
initialization of 24 bytes constants on every datapath request and
also reduces function size by 32 bytes for rdma transport.

Reviewed-by: Max Gurtovoy <maxg at mellanox.com>
Signed-off-by: Parav Pandit <parav at mellanox.com>
---
 drivers/nvme/target/core.c  |  8 ++------
 drivers/nvme/target/fc.c    | 11 +++++------
 drivers/nvme/target/loop.c  | 15 +++++++++++----
 drivers/nvme/target/nvmet.h | 16 ++++++++++++++--
 drivers/nvme/target/rdma.c  |  7 +++++--
 5 files changed, 37 insertions(+), 20 deletions(-)

diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b1d66ed..e55d650 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -463,15 +463,11 @@ int nvmet_sq_init(struct nvmet_sq *sq)
 }
 EXPORT_SYMBOL_GPL(nvmet_sq_init);
 
-bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
-		struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops)
+bool nvmet_req_init(struct nvmet_req *req)
 {
 	u8 flags = req->cmd->common.flags;
 	u16 status;
 
-	req->cq = cq;
-	req->sq = sq;
-	req->ops = ops;
 	req->sg = NULL;
 	req->sg_cnt = 0;
 	req->rsp->status = 0;
@@ -504,7 +500,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
 	if (status)
 		goto fail;
 
-	if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
+	if (unlikely(!percpu_ref_tryget_live(&req->sq->ref))) {
 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
 		goto fail;
 	}
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 173e842..8b201c0 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -30,7 +30,6 @@
 
 /* *************************** Data Structures/Defines ****************** */
 
-
 #define NVMET_LS_CTX_COUNT		4
 
 /* for this implementation, assume small single frame rqst/rsp */
@@ -210,6 +209,7 @@ struct nvmet_fc_tgt_assoc {
 static LIST_HEAD(nvmet_fc_target_list);
 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
 
+static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
 
 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
 static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
@@ -417,6 +417,7 @@ struct nvmet_fc_tgt_assoc {
 		fod->tgtport = tgtport;
 		fod->queue = queue;
 		fod->active = false;
+		nvmet_req_fabric_ops_init(&fod->req, &nvmet_fc_tgt_fcp_ops);
 		list_add_tail(&fod->fcp_list, &queue->fod_list);
 		spin_lock_init(&fod->flock);
 
@@ -1403,8 +1404,6 @@ enum {
 
 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
 
-static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
-
 static void
 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
 {
@@ -2008,10 +2007,10 @@ enum {
 	/* clear any response payload */
 	memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
 
-	ret = nvmet_req_init(&fod->req,
+	nvmet_req_link_to_nvme_queues(&fod->req,
 				&fod->queue->nvme_cq,
-				&fod->queue->nvme_sq,
-				&nvmet_fc_tgt_fcp_ops);
+				&fod->queue->nvme_sq);
+	ret = nvmet_req_init(&fod->req);
 	if (!ret) {	/* bad SQE content */
 		nvmet_fc_abort_op(tgtport, fod->fcpreq);
 		return;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 9aaa700..2f7ca34 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -174,8 +174,12 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 
 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
 	iod->req.port = nvmet_loop_port;
-	if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
-			&queue->nvme_sq, &nvme_loop_ops)) {
+
+	nvmet_req_link_to_nvme_queues(&iod->req,
+				&queue->nvme_cq, &queue->nvme_sq);
+	nvmet_req_fabric_ops_init(&iod->req, &nvme_loop_ops);
+
+	if (!nvmet_req_init(&iod->req)) {
 		nvme_cleanup_cmd(req);
 		blk_mq_start_request(req);
 		nvme_loop_queue_response(&iod->req);
@@ -211,8 +215,11 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
 	iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
 
-	if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
-			&nvme_loop_ops)) {
+	nvmet_req_link_to_nvme_queues(&iod->req,
+				&queue->nvme_cq, &queue->nvme_sq);
+	nvmet_req_fabric_ops_init(&iod->req, &nvme_loop_ops);
+
+	if (!nvmet_req_init(&iod->req)) {
 		dev_err(ctrl->ctrl.device, "failed async event work\n");
 		return;
 	}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 23d5eb1..48486c2 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -259,8 +259,20 @@ struct nvmet_async_event {
 int nvmet_parse_discovery_cmd(struct nvmet_req *req);
 int nvmet_parse_fabrics_cmd(struct nvmet_req *req);
 
-bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
-		struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops);
+static inline void nvmet_req_fabric_ops_init(struct nvmet_req *req,
+		struct nvmet_fabrics_ops *ops)
+{
+	req->ops = ops;
+}
+
+static inline void nvmet_req_link_to_nvme_queues(struct nvmet_req *req,
+	struct nvmet_cq *cq, struct nvmet_sq *sq)
+{
+	req->cq = cq;
+	req->sq = sq;
+}
+
+bool nvmet_req_init(struct nvmet_req *req);
 void nvmet_req_complete(struct nvmet_req *req, u16 status);
 
 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 8c3760a..1a57ab3 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -402,6 +402,10 @@ static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
 		if (ret)
 			goto out_free;
 
+		nvmet_req_link_to_nvme_queues(&rsp->req,
+					&queue->nvme_cq, &queue->nvme_sq);
+		nvmet_req_fabric_ops_init(&rsp->req, &nvmet_rdma_ops);
+
 		list_add_tail(&rsp->free_list, &queue->free_rsps);
 	}
 
@@ -698,8 +702,7 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
 	cmd->n_rdma = 0;
 	cmd->req.port = queue->port;
 
-	if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
-			&queue->nvme_sq, &nvmet_rdma_ops))
+	if (!nvmet_req_init(&cmd->req))
 		return;
 
 	status = nvmet_rdma_map_sgl(cmd);
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH] nvmet: Avoid writing fabric_ops, queue pointers on every request.
  2017-02-07 22:37 [PATCH] nvmet: Avoid writing fabric_ops, queue pointers on every request Parav Pandit
@ 2017-02-08 10:13 ` Sagi Grimberg
  2017-02-08 18:06   ` Parav Pandit
  0 siblings, 1 reply; 13+ messages in thread
From: Sagi Grimberg @ 2017-02-08 10:13 UTC (permalink / raw)




On 08/02/17 00:37, Parav Pandit wrote:
> Fabric operations are constants of a registered transport. They don't
> change with every target request that gets processed by the nvmet-core.
> Therefore this patch moves fabrics_ops initialization out of the hot
> request processing path for rdma and fc.
> It continues to remain in same way for loop target through extra API.

Can't you add it to nvme_loop_init_iod()?

> Additionally this patch further avoid nvme cq and sq pointer
> initialization for every request during every request processing
> for rdma because nvme queue linking occurs during queue allocation
> time for AQ and IOQ.

This breaks SRQ mode where every nvmet_rdma_cmd serves different
queues in it's lifetime..

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH] nvmet: Avoid writing fabric_ops, queue pointers on every request.
  2017-02-08 10:13 ` Sagi Grimberg
@ 2017-02-08 18:06   ` Parav Pandit
  2017-02-08 18:18     ` Sagi Grimberg
  0 siblings, 1 reply; 13+ messages in thread
From: Parav Pandit @ 2017-02-08 18:06 UTC (permalink / raw)


Hi Sagi,

> -----Original Message-----
> From: Sagi Grimberg [mailto:sagi at grimberg.me]
> Sent: Wednesday, February 8, 2017 4:13 AM
> To: Parav Pandit <parav at mellanox.com>; hch at lst.de;
> james.smart at broadcom.com; linux-nvme at lists.infradead.org
> Subject: Re: [PATCH] nvmet: Avoid writing fabric_ops, queue pointers on
> every request.
> 
> 
> 
> On 08/02/17 00:37, Parav Pandit wrote:
> > Fabric operations are constants of a registered transport. They don't
> > change with every target request that gets processed by the nvmet-core.
> > Therefore this patch moves fabrics_ops initialization out of the hot
> > request processing path for rdma and fc.
> > It continues to remain in same way for loop target through extra API.
> 
> Can't you add it to nvme_loop_init_iod()?
I didn't review that option when I first did it. I look it now and I believe it can be moved there.
I was under assumption that init_request() is done on every request processing and if that's the case we are not currently touching nvmet_loop_queue, so that minor overhead can be avoided by continue to do this in current location.

> 
> > Additionally this patch further avoid nvme cq and sq pointer
> > initialization for every request during every request processing for
> > rdma because nvme queue linking occurs during queue allocation time
> > for AQ and IOQ.
> 
> This breaks SRQ mode where every nvmet_rdma_cmd serves different
> queues in it's lifetime..

I fail to understand that.
nvmet_rdma_create_queue_ib() is call for as many QPs as we create; not based on number of SRQs we create.
nvmet_rdma_queue stores cq and sq.
So there are as many cq and sq on the fabric side as QPs for fabric_connect command is called.
queue is pulled out of cq context on which we received the command.
SRQ is just a place shared among this nvme queues to share the RQ buffer, right?
What did I miss?

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH] nvmet: Avoid writing fabric_ops, queue pointers on every request.
  2017-02-08 18:06   ` Parav Pandit
@ 2017-02-08 18:18     ` Sagi Grimberg
  2017-02-08 20:02       ` Parav Pandit
                         ` (2 more replies)
  0 siblings, 3 replies; 13+ messages in thread
From: Sagi Grimberg @ 2017-02-08 18:18 UTC (permalink / raw)



>>> Additionally this patch further avoid nvme cq and sq pointer
>>> initialization for every request during every request processing for
>>> rdma because nvme queue linking occurs during queue allocation time
>>> for AQ and IOQ.
>>
>> This breaks SRQ mode where every nvmet_rdma_cmd serves different
>> queues in it's lifetime..
>
> I fail to understand that.
> nvmet_rdma_create_queue_ib() is call for as many QPs as we create; not based on number of SRQs we create.

Correct.

> nvmet_rdma_queue stores cq and sq.

Correct.

> So there are as many cq and sq on the fabric side as QPs for fabric_connect command is called.
> queue is pulled out of cq context on which we received the command.
> SRQ is just a place shared among this nvme queues to share the RQ buffer, right?

Correct too, but we then assign the queue to the command, which is the
context of the received SQE (maybe with in-capsule data). For the SRQ
case we allocate the commands and pre-post them (before we have any
queues), they are absolutely not bound to a given queue, they can't
actually.

So for each new recv completion, the command context is now bound to
the queue that it completed on, so it can be bound to different queues
in its life-time.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH] nvmet: Avoid writing fabric_ops, queue pointers on every request.
  2017-02-08 18:18     ` Sagi Grimberg
@ 2017-02-08 20:02       ` Parav Pandit
  2017-02-13 16:11       ` Parav Pandit
  2017-02-13 16:12       ` Parav Pandit
  2 siblings, 0 replies; 13+ messages in thread
From: Parav Pandit @ 2017-02-08 20:02 UTC (permalink / raw)


Hi Sagi,

> -----Original Message-----
> From: Sagi Grimberg [mailto:sagi at grimberg.me]
> Sent: Wednesday, February 8, 2017 12:18 PM
> To: Parav Pandit <parav at mellanox.com>; hch at lst.de;
> james.smart at broadcom.com; linux-nvme at lists.infradead.org
> Subject: Re: [PATCH] nvmet: Avoid writing fabric_ops, queue pointers on
> every request.
> 
> 
> >>> Additionally this patch further avoid nvme cq and sq pointer
> >>> initialization for every request during every request processing for
> >>> rdma because nvme queue linking occurs during queue allocation time
> >>> for AQ and IOQ.
> >>
> >> This breaks SRQ mode where every nvmet_rdma_cmd serves different
> >> queues in it's lifetime..
> >
> > I fail to understand that.
> > nvmet_rdma_create_queue_ib() is call for as many QPs as we create; not
> based on number of SRQs we create.
> 
> Correct.
> 
> > nvmet_rdma_queue stores cq and sq.
> 
> Correct.
> 
> > So there are as many cq and sq on the fabric side as QPs for fabric_connect
> command is called.
> > queue is pulled out of cq context on which we received the command.
> > SRQ is just a place shared among this nvme queues to share the RQ buffer,
> right?
> 
> Correct too, but we then assign the queue to the command, which is the
> context of the received SQE (maybe with in-capsule data). For the SRQ case
> we allocate the commands and pre-post them (before we have any queues),
> they are absolutely not bound to a given queue, they can't actually.
> 
> So for each new recv completion, the command context is now bound to the
> queue that it completed on, so it can be bound to different queues in its life-
> time.

Sorry, I am still not getting it. 
nvmet_rdma_rsp structure contains nvmet_req.
nvmet_rdma_rsp (and so nvmet_req) are per QP allocations.
cq and sq pointers are initialized inside nvmet_req.

nvmet_rdma_cmd is per RQ/SRQ allocation.
When we do recv_done(), we bind rsp structure to cmd (cmd can be from RQ or SRQ).
So I believe this is still good.
If we have cq and sq pointer inside the nvmet_rdma_cmd than I can understand that it can break.

On a side note: I tested the patch with use_srq flag (but didn't publish its performance numbers as they awaiting your per core SRQ fixes to match regular RQ numbers :-) ).

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH] nvmet: Avoid writing fabric_ops, queue pointers on every request.
  2017-02-08 18:18     ` Sagi Grimberg
  2017-02-08 20:02       ` Parav Pandit
@ 2017-02-13 16:11       ` Parav Pandit
  2017-02-15  8:58         ` Sagi Grimberg
  2017-02-13 16:12       ` Parav Pandit
  2 siblings, 1 reply; 13+ messages in thread
From: Parav Pandit @ 2017-02-13 16:11 UTC (permalink / raw)


Hi Sagi,


> -----Original Message-----
> From: Parav Pandit
> Sent: Wednesday, February 8, 2017 2:03 PM
> To: 'Sagi Grimberg' <sagi at grimberg.me>; hch at lst.de;
> james.smart at broadcom.com; linux-nvme at lists.infradead.org
> Subject: RE: [PATCH] nvmet: Avoid writing fabric_ops, queue pointers on
> every request.
> 
> > >>> Additionally this patch further avoid nvme cq and sq pointer
> > >>> initialization for every request during every request processing
> > >>> for rdma because nvme queue linking occurs during queue allocation
> > >>> time for AQ and IOQ.
> > >>
> > >> This breaks SRQ mode where every nvmet_rdma_cmd serves different
> > >> queues in it's lifetime..
> > >
> > > I fail to understand that.
> > > nvmet_rdma_create_queue_ib() is call for as many QPs as we create;
> > > not
> > based on number of SRQs we create.
> >
> > Correct.
> >
> > > nvmet_rdma_queue stores cq and sq.
> >
> > Correct.
> >
> > > So there are as many cq and sq on the fabric side as QPs for
> > > fabric_connect
> > command is called.
> > > queue is pulled out of cq context on which we received the command.
> > > SRQ is just a place shared among this nvme queues to share the RQ
> > > buffer,
> > right?
> >
> > Correct too, but we then assign the queue to the command, which is the
> > context of the received SQE (maybe with in-capsule data). For the SRQ
> > case we allocate the commands and pre-post them (before we have any
> > queues), they are absolutely not bound to a given queue, they can't
> actually.
> >
> > So for each new recv completion, the command context is now bound to
> > the queue that it completed on, so it can be bound to different queues
> > in its life- time.
> 
> Sorry, I am still not getting it.
> nvmet_rdma_rsp structure contains nvmet_req.
> nvmet_rdma_rsp (and so nvmet_req) are per QP allocations.
> cq and sq pointers are initialized inside nvmet_req.
> 
> nvmet_rdma_cmd is per RQ/SRQ allocation.
> When we do recv_done(), we bind rsp structure to cmd (cmd can be from RQ
> or SRQ).
> So I believe this is still good.
> If we have cq and sq pointer inside the nvmet_rdma_cmd than I can
> understand that it can break.
> 
> On a side note: I tested the patch with use_srq flag (but didn't publish its
> performance numbers as they awaiting your per core SRQ fixes to match
> regular RQ numbers :-) ).
> 
> 
Did you get chance to review my comments?
Can you please ack that this doesn't break the SRQ? 

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH] nvmet: Avoid writing fabric_ops, queue pointers on every request.
  2017-02-08 18:18     ` Sagi Grimberg
  2017-02-08 20:02       ` Parav Pandit
  2017-02-13 16:11       ` Parav Pandit
@ 2017-02-13 16:12       ` Parav Pandit
  2 siblings, 0 replies; 13+ messages in thread
From: Parav Pandit @ 2017-02-13 16:12 UTC (permalink / raw)


Hi James,

Does this change look fine for FC?
I do not have infrastructure to test the FC side.

Parav

> -----Original Message-----
> From: Parav Pandit
> Sent: Wednesday, February 8, 2017 2:03 PM
> To: 'Sagi Grimberg' <sagi at grimberg.me>; hch at lst.de;
> james.smart at broadcom.com; linux-nvme at lists.infradead.org
> Subject: RE: [PATCH] nvmet: Avoid writing fabric_ops, queue pointers on
> every request.
> 
> Hi Sagi,
> 
> > -----Original Message-----
> > From: Sagi Grimberg [mailto:sagi at grimberg.me]
> > Sent: Wednesday, February 8, 2017 12:18 PM
> > To: Parav Pandit <parav at mellanox.com>; hch at lst.de;
> > james.smart at broadcom.com; linux-nvme at lists.infradead.org
> > Subject: Re: [PATCH] nvmet: Avoid writing fabric_ops, queue pointers
> > on every request.
> >
> >
> > >>> Additionally this patch further avoid nvme cq and sq pointer
> > >>> initialization for every request during every request processing
> > >>> for rdma because nvme queue linking occurs during queue allocation
> > >>> time for AQ and IOQ.
> > >>
> > >> This breaks SRQ mode where every nvmet_rdma_cmd serves different
> > >> queues in it's lifetime..
> > >
> > > I fail to understand that.
> > > nvmet_rdma_create_queue_ib() is call for as many QPs as we create;
> > > not
> > based on number of SRQs we create.
> >
> > Correct.
> >
> > > nvmet_rdma_queue stores cq and sq.
> >
> > Correct.
> >
> > > So there are as many cq and sq on the fabric side as QPs for
> > > fabric_connect
> > command is called.
> > > queue is pulled out of cq context on which we received the command.
> > > SRQ is just a place shared among this nvme queues to share the RQ
> > > buffer,
> > right?
> >
> > Correct too, but we then assign the queue to the command, which is the
> > context of the received SQE (maybe with in-capsule data). For the SRQ
> > case we allocate the commands and pre-post them (before we have any
> > queues), they are absolutely not bound to a given queue, they can't
> actually.
> >
> > So for each new recv completion, the command context is now bound to
> > the queue that it completed on, so it can be bound to different queues
> > in its life- time.
> 
> Sorry, I am still not getting it.
> nvmet_rdma_rsp structure contains nvmet_req.
> nvmet_rdma_rsp (and so nvmet_req) are per QP allocations.
> cq and sq pointers are initialized inside nvmet_req.
> 
> nvmet_rdma_cmd is per RQ/SRQ allocation.
> When we do recv_done(), we bind rsp structure to cmd (cmd can be from RQ
> or SRQ).
> So I believe this is still good.
> If we have cq and sq pointer inside the nvmet_rdma_cmd than I can
> understand that it can break.
> 
> On a side note: I tested the patch with use_srq flag (but didn't publish its
> performance numbers as they awaiting your per core SRQ fixes to match
> regular RQ numbers :-) ).
> 
> 

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH] nvmet: Avoid writing fabric_ops, queue pointers on every request.
  2017-02-13 16:11       ` Parav Pandit
@ 2017-02-15  8:58         ` Sagi Grimberg
  2017-02-15 16:15           ` hch
  0 siblings, 1 reply; 13+ messages in thread
From: Sagi Grimberg @ 2017-02-15  8:58 UTC (permalink / raw)



> Hi Sagi,

Hi Parav,

>> Sorry, I am still not getting it.
>> nvmet_rdma_rsp structure contains nvmet_req.
>> nvmet_rdma_rsp (and so nvmet_req) are per QP allocations.
>> cq and sq pointers are initialized inside nvmet_req.
>>
>> nvmet_rdma_cmd is per RQ/SRQ allocation.
>> When we do recv_done(), we bind rsp structure to cmd (cmd can be from RQ
>> or SRQ).
>> So I believe this is still good.
>> If we have cq and sq pointer inside the nvmet_rdma_cmd than I can
>> understand that it can break.
>>
>> On a side note: I tested the patch with use_srq flag (but didn't publish its
>> performance numbers as they awaiting your per core SRQ fixes to match
>> regular RQ numbers :-) ).

Yep, forgot that the sq/cq are referenced from the rsp which is
per-queue.

Christoph, James, can I get an ack from you guys?

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH] nvmet: Avoid writing fabric_ops, queue pointers on every request.
  2017-02-15  8:58         ` Sagi Grimberg
@ 2017-02-15 16:15           ` hch
  2017-02-15 16:28             ` Sagi Grimberg
  0 siblings, 1 reply; 13+ messages in thread
From: hch @ 2017-02-15 16:15 UTC (permalink / raw)


On Wed, Feb 15, 2017@10:58:32AM +0200, Sagi Grimberg wrote:
> Yep, forgot that the sq/cq are referenced from the rsp which is
> per-queue.
>
> Christoph, James, can I get an ack from you guys?

I really hate the nvmet_req_fabric_ops_init and
nvmet_req_link_to_nvme_queues helpers, just use direct assignments
instead of trivial helpers with crazy long names.

I need to look at it a bit more for the a real review.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH] nvmet: Avoid writing fabric_ops, queue pointers on every request.
  2017-02-15 16:15           ` hch
@ 2017-02-15 16:28             ` Sagi Grimberg
  2017-02-15 16:31               ` Parav Pandit
  0 siblings, 1 reply; 13+ messages in thread
From: Sagi Grimberg @ 2017-02-15 16:28 UTC (permalink / raw)



>> Yep, forgot that the sq/cq are referenced from the rsp which is
>> per-queue.
>>
>> Christoph, James, can I get an ack from you guys?
>
> I really hate the nvmet_req_fabric_ops_init and
> nvmet_req_link_to_nvme_queues helpers, just use direct assignments
> instead of trivial helpers with crazy long names.

Heh, I don't mind it all that much, but we can hold it off
until something better (if at all) comes along..

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH] nvmet: Avoid writing fabric_ops, queue pointers on every request.
  2017-02-15 16:28             ` Sagi Grimberg
@ 2017-02-15 16:31               ` Parav Pandit
  2017-02-15 16:34                 ` hch
  0 siblings, 1 reply; 13+ messages in thread
From: Parav Pandit @ 2017-02-15 16:31 UTC (permalink / raw)




> -----Original Message-----
> From: Sagi Grimberg [mailto:sagi at grimberg.me]
> Sent: Wednesday, February 15, 2017 10:29 AM
> To: hch at lst.de
> Cc: Parav Pandit <parav at mellanox.com>; james.smart at broadcom.com;
> linux-nvme at lists.infradead.org
> Subject: Re: [PATCH] nvmet: Avoid writing fabric_ops, queue pointers on
> every request.
> 
> 
> >> Yep, forgot that the sq/cq are referenced from the rsp which is
> >> per-queue.
> >>
> >> Christoph, James, can I get an ack from you guys?
> >
> > I really hate the nvmet_req_fabric_ops_init and
> > nvmet_req_link_to_nvme_queues helpers, just use direct assignments
> > instead of trivial helpers with crazy long names.
> 
> Heh, I don't mind it all that much, but we can hold it off until something
> better (if at all) comes along..

How about 
nvmet_ops_init()
nvmet_link_queues()

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH] nvmet: Avoid writing fabric_ops, queue pointers on every request.
  2017-02-15 16:31               ` Parav Pandit
@ 2017-02-15 16:34                 ` hch
  2017-02-15 16:54                   ` Parav Pandit
  0 siblings, 1 reply; 13+ messages in thread
From: hch @ 2017-02-15 16:34 UTC (permalink / raw)


On Wed, Feb 15, 2017@04:31:00PM +0000, Parav Pandit wrote:
> How about 
> nvmet_ops_init()
> nvmet_link_queues()

Just do the one respective two trivial assignment inline in the
callers.  That actually leads to smaller source and binary code.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH] nvmet: Avoid writing fabric_ops, queue pointers on every request.
  2017-02-15 16:34                 ` hch
@ 2017-02-15 16:54                   ` Parav Pandit
  0 siblings, 0 replies; 13+ messages in thread
From: Parav Pandit @ 2017-02-15 16:54 UTC (permalink / raw)




> -----Original Message-----
> From: hch at lst.de [mailto:hch at lst.de]
> Sent: Wednesday, February 15, 2017 10:34 AM
> To: Parav Pandit <parav at mellanox.com>
> Cc: Sagi Grimberg <sagi at grimberg.me>; hch at lst.de;
> james.smart at broadcom.com; linux-nvme at lists.infradead.org
> Subject: Re: [PATCH] nvmet: Avoid writing fabric_ops, queue pointers on
> every request.
> 
> On Wed, Feb 15, 2017@04:31:00PM +0000, Parav Pandit wrote:
> > How about
> > nvmet_ops_init()
> > nvmet_link_queues()
> 
> Just do the one respective two trivial assignment inline in the callers.  That
> actually leads to smaller source and binary code.

o.k.

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2017-02-15 16:54 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-02-07 22:37 [PATCH] nvmet: Avoid writing fabric_ops, queue pointers on every request Parav Pandit
2017-02-08 10:13 ` Sagi Grimberg
2017-02-08 18:06   ` Parav Pandit
2017-02-08 18:18     ` Sagi Grimberg
2017-02-08 20:02       ` Parav Pandit
2017-02-13 16:11       ` Parav Pandit
2017-02-15  8:58         ` Sagi Grimberg
2017-02-15 16:15           ` hch
2017-02-15 16:28             ` Sagi Grimberg
2017-02-15 16:31               ` Parav Pandit
2017-02-15 16:34                 ` hch
2017-02-15 16:54                   ` Parav Pandit
2017-02-13 16:12       ` Parav Pandit

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.