* [PATCH 0/3] Add nvme-tcp hpda support
@ 2020-05-07 14:02 Yoray Zack
2020-05-07 14:02 ` [PATCH 1/3] nvme-fabrics: Add support for padding alignment (pda) option on tcp Yoray Zack
` (3 more replies)
0 siblings, 4 replies; 10+ messages in thread
From: Yoray Zack @ 2020-05-07 14:02 UTC (permalink / raw)
To: Sagi Grimberg, Keith Busch, Christoph Hellwig
Cc: Yoray Zack, Boris Pismenny, linux-nvme
Given the padding defined by user-space, the host driver will
send the requested hpda to the target which will support that
on nvme/tcp c2h capsules. The host will parse the capsules
according to the padding.
The request hpda is 0's based value in units of dwords in the range 0 to 31
(e.g., values 0, 1, and 2 correspond to 4 byte, 8 byte, and 12 byte alignment)).
Yoray Zack (3):
nvme-fabrics: Add support for padding alignment (pda) option on tcp
nvme-tcp: Add target padding support
nvme-tcp: Add Host hpda support
drivers/nvme/host/fabrics.c | 14 ++++++++++++++
drivers/nvme/host/fabrics.h | 4 ++++
drivers/nvme/host/tcp.c | 21 +++++++++++++++++++--
drivers/nvme/target/tcp.c | 24 ++++++++++++++----------
4 files changed, 51 insertions(+), 12 deletions(-)
--
1.8.3.1
_______________________________________________
linux-nvme mailing list
linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH 1/3] nvme-fabrics: Add support for padding alignment (pda) option on tcp
2020-05-07 14:02 [PATCH 0/3] Add nvme-tcp hpda support Yoray Zack
@ 2020-05-07 14:02 ` Yoray Zack
2020-05-08 7:06 ` Sagi Grimberg
2020-05-07 14:02 ` [PATCH 2/3] nvme-tcp: Add target padding support Yoray Zack
` (2 subsequent siblings)
3 siblings, 1 reply; 10+ messages in thread
From: Yoray Zack @ 2020-05-07 14:02 UTC (permalink / raw)
To: Sagi Grimberg, Keith Busch, Christoph Hellwig
Cc: Yoray Zack, Boris Pismenny, linux-nvme
Consume the pdu alignment from the nvme connect command according
to user-space directives.
Signed-off-by: Yoray Zack <yorayz@mellanox.com>
---
drivers/nvme/host/fabrics.c | 14 ++++++++++++++
drivers/nvme/host/fabrics.h | 4 ++++
2 files changed, 18 insertions(+)
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 2a6c819..4468b57 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -612,6 +612,7 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
{ NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" },
{ NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
{ NVMF_OPT_TOS, "tos=%d" },
+ { NVMF_OPT_PDA, "pda_size=%d" },
{ NVMF_OPT_ERR, NULL }
};
@@ -634,6 +635,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->hdr_digest = false;
opts->data_digest = false;
opts->tos = -1; /* < 0 == use transport default */
+ opts->pda = 0; /* default no padding */
options = o = kstrdup(buf, GFP_KERNEL);
if (!options)
@@ -851,6 +853,18 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
}
opts->nr_poll_queues = token;
break;
+ case NVMF_OPT_PDA:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token < 0 || token > NVMF_MAX_PDA_SIZE) {
+ pr_err("Invalid padding value ,should be 1-31 %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->pda = token;
+ break;
case NVMF_OPT_TOS:
if (match_int(args, &token)) {
ret = -EINVAL;
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index a0ec40a..f1e562c 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -9,6 +9,7 @@
#include <linux/in.h>
#include <linux/inet.h>
+#define NVMF_MAX_PDA_SIZE 31
#define NVMF_MIN_QUEUE_SIZE 16
#define NVMF_MAX_QUEUE_SIZE 1024
#define NVMF_DEF_QUEUE_SIZE 128
@@ -56,6 +57,7 @@ enum {
NVMF_OPT_NR_WRITE_QUEUES = 1 << 17,
NVMF_OPT_NR_POLL_QUEUES = 1 << 18,
NVMF_OPT_TOS = 1 << 19,
+ NVMF_OPT_PDA = 1 << 20,
};
/**
@@ -89,6 +91,7 @@ enum {
* @nr_write_queues: number of queues for write I/O
* @nr_poll_queues: number of queues for polling I/O
* @tos: type of service
+ * @pda: host pdu alignment (TCP)
*/
struct nvmf_ctrl_options {
unsigned mask;
@@ -111,6 +114,7 @@ struct nvmf_ctrl_options {
unsigned int nr_write_queues;
unsigned int nr_poll_queues;
int tos;
+ int pda;
};
/*
--
1.8.3.1
_______________________________________________
linux-nvme mailing list
linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH 2/3] nvme-tcp: Add target padding support
2020-05-07 14:02 [PATCH 0/3] Add nvme-tcp hpda support Yoray Zack
2020-05-07 14:02 ` [PATCH 1/3] nvme-fabrics: Add support for padding alignment (pda) option on tcp Yoray Zack
@ 2020-05-07 14:02 ` Yoray Zack
2020-05-07 14:02 ` [PATCH 3/3] nvme-tcp: Add Host hpda support Yoray Zack
2020-05-08 7:01 ` [PATCH 0/3] Add nvme-tcp " Sagi Grimberg
3 siblings, 0 replies; 10+ messages in thread
From: Yoray Zack @ 2020-05-07 14:02 UTC (permalink / raw)
To: Sagi Grimberg, Keith Busch, Christoph Hellwig
Cc: Yoray Zack, Boris Pismenny, linux-nvme
Align the pdu in c2h capsules according to the icreq->hpda.
Signed-off-by: Yoray Zack <yorayz@mellanox.com>
---
drivers/nvme/target/tcp.c | 24 ++++++++++++++----------
1 file changed, 14 insertions(+), 10 deletions(-)
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index f0da04e..9e15785 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -104,6 +104,7 @@ struct nvmet_tcp_queue {
struct list_head free_list;
struct llist_head resp_list;
struct list_head resp_send_list;
+ int pda;
int send_list_len;
struct nvmet_tcp_cmd *snd_cmd;
@@ -219,6 +220,11 @@ static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
}
+static inline u8 nvmet_tcp_pda_size(struct nvmet_tcp_queue *queue)
+{
+ return queue->pda * 4;
+}
+
static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
{
return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
@@ -376,6 +382,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
struct nvmet_tcp_queue *queue = cmd->queue;
u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
+ u8 pda_size = nvmet_tcp_pda_size(cmd->queue);
cmd->offset = 0;
cmd->state = NVMET_TCP_SEND_DATA_PDU;
@@ -384,9 +391,9 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
NVME_TCP_F_DATA_SUCCESS : 0);
pdu->hdr.hlen = sizeof(*pdu);
- pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
+ pdu->hdr.pdo = pdu->hdr.hlen + hdgst + pda_size;
pdu->hdr.plen =
- cpu_to_le32(pdu->hdr.hlen + hdgst +
+ cpu_to_le32(pdu->hdr.hlen + hdgst + pda_size +
cmd->req.transfer_len + ddgst);
pdu->command_id = cmd->req.cqe->command_id;
pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
@@ -505,7 +512,8 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
{
u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
- int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
+ u8 pda_size = nvmet_tcp_pda_size(cmd->queue);
+ int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst + pda_size;
int ret;
ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
@@ -787,12 +795,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
return -EPROTO;
}
- if (icreq->hpda != 0) {
- pr_err("queue %d: unsupported hpda %d\n", queue->idx,
- icreq->hpda);
- return -EPROTO;
- }
-
+ queue->pda = icreq->hpda;
queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
if (queue->hdr_digest || queue->data_digest) {
@@ -1221,6 +1224,7 @@ static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
struct nvmet_tcp_cmd *c)
{
u8 hdgst = nvmet_tcp_hdgst_len(queue);
+ u8 pda_size = nvmet_tcp_pda_size(queue);
c->queue = queue;
c->req.port = queue->port->nport;
@@ -1238,7 +1242,7 @@ static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
c->req.cqe = &c->rsp_pdu->cqe;
c->data_pdu = page_frag_alloc(&queue->pf_cache,
- sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
+ sizeof(*c->data_pdu) + hdgst + pda_size, GFP_KERNEL | __GFP_ZERO);
if (!c->data_pdu)
goto out_free_rsp;
--
1.8.3.1
_______________________________________________
linux-nvme mailing list
linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH 3/3] nvme-tcp: Add Host hpda support
2020-05-07 14:02 [PATCH 0/3] Add nvme-tcp hpda support Yoray Zack
2020-05-07 14:02 ` [PATCH 1/3] nvme-fabrics: Add support for padding alignment (pda) option on tcp Yoray Zack
2020-05-07 14:02 ` [PATCH 2/3] nvme-tcp: Add target padding support Yoray Zack
@ 2020-05-07 14:02 ` Yoray Zack
2020-05-08 7:01 ` [PATCH 0/3] Add nvme-tcp " Sagi Grimberg
3 siblings, 0 replies; 10+ messages in thread
From: Yoray Zack @ 2020-05-07 14:02 UTC (permalink / raw)
To: Sagi Grimberg, Keith Busch, Christoph Hellwig
Cc: Yoray Zack, Boris Pismenny, linux-nvme
1. Send the requested hpda to the target.
2. Parse the recv nvme caps as having pdu alignment.
Signed-off-by: Yoray Zack <yorayz@mellanox.com>
---
drivers/nvme/host/tcp.c | 21 +++++++++++++++++++--
1 file changed, 19 insertions(+), 2 deletions(-)
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index c15a921..42b3f06 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -81,6 +81,8 @@ struct nvme_tcp_queue {
void *pdu;
int pdu_remaining;
int pdu_offset;
+ int pda_remaining;
+ int pda;
size_t data_remaining;
size_t ddgst_remaining;
unsigned int nr_cqe;
@@ -423,6 +425,7 @@ static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
queue->pdu_offset = 0;
queue->data_remaining = -1;
queue->ddgst_remaining = 0;
+ queue->pda_remaining = 0;
}
static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
@@ -474,6 +477,7 @@ static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
}
queue->data_remaining = le32_to_cpu(pdu->data_length);
+ queue->pda_remaining = queue->pda * 4;
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
@@ -646,6 +650,17 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
struct nvme_tcp_request *req;
struct request *rq;
+ size_t recv_pda;
+
+ if (queue->pda_remaining) {
+ recv_pda = min_t(size_t, *len, queue->pda_remaining);
+ queue->pda_remaining -= recv_pda;
+ *offset += recv_pda;
+ *len -= recv_pda;
+
+ if (queue->pda_remaining)
+ return 0;
+ }
rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
if (!rq) {
@@ -1181,7 +1196,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
icreq->maxr2t = 0; /* single inflight r2t supported */
- icreq->hpda = 0; /* no alignment constraint */
+ icreq->hpda = queue->pda;
if (queue->hdr_digest)
icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
if (queue->data_digest)
@@ -1398,6 +1413,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
queue->data_remaining = 0;
queue->ddgst_remaining = 0;
queue->pdu_remaining = 0;
+ queue->pda_remaining = 0;
+ queue->pda = nctrl->opts->pda;
queue->pdu_offset = 0;
sk_set_memalloc(queue->sock->sk);
@@ -2464,7 +2481,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
- NVMF_OPT_TOS,
+ NVMF_OPT_TOS | NVMF_OPT_PDA,
.create_ctrl = nvme_tcp_create_ctrl,
};
--
1.8.3.1
_______________________________________________
linux-nvme mailing list
linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH 0/3] Add nvme-tcp hpda support
2020-05-07 14:02 [PATCH 0/3] Add nvme-tcp hpda support Yoray Zack
` (2 preceding siblings ...)
2020-05-07 14:02 ` [PATCH 3/3] nvme-tcp: Add Host hpda support Yoray Zack
@ 2020-05-08 7:01 ` Sagi Grimberg
2020-05-08 9:56 ` Yoray Zack
3 siblings, 1 reply; 10+ messages in thread
From: Sagi Grimberg @ 2020-05-08 7:01 UTC (permalink / raw)
To: Yoray Zack, Keith Busch, Christoph Hellwig; +Cc: Boris Pismenny, linux-nvme
> Given the padding defined by user-space, the host driver will
> send the requested hpda to the target which will support that
> on nvme/tcp c2h capsules. The host will parse the capsules
> according to the padding.
>
> The request hpda is 0's based value in units of dwords in the range 0 to 31
> (e.g., values 0, 1, and 2 correspond to 4 byte, 8 byte, and 12 byte alignment)).
Thanks Yoray for the contribution!
Can you share a bit on the use case you are adding this for? Curious to
learn about it.
_______________________________________________
linux-nvme mailing list
linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 1/3] nvme-fabrics: Add support for padding alignment (pda) option on tcp
2020-05-07 14:02 ` [PATCH 1/3] nvme-fabrics: Add support for padding alignment (pda) option on tcp Yoray Zack
@ 2020-05-08 7:06 ` Sagi Grimberg
2020-05-08 9:58 ` Yoray Zack
0 siblings, 1 reply; 10+ messages in thread
From: Sagi Grimberg @ 2020-05-08 7:06 UTC (permalink / raw)
To: Yoray Zack, Keith Busch, Christoph Hellwig; +Cc: Boris Pismenny, linux-nvme
> Consume the pdu alignment from the nvme connect command according
> to user-space directives.
>
> Signed-off-by: Yoray Zack <yorayz@mellanox.com>
> ---
> drivers/nvme/host/fabrics.c | 14 ++++++++++++++
> drivers/nvme/host/fabrics.h | 4 ++++
> 2 files changed, 18 insertions(+)
>
> diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
> index 2a6c819..4468b57 100644
> --- a/drivers/nvme/host/fabrics.c
> +++ b/drivers/nvme/host/fabrics.c
> @@ -612,6 +612,7 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
> { NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" },
> { NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
> { NVMF_OPT_TOS, "tos=%d" },
> + { NVMF_OPT_PDA, "pda_size=%d" },
This is very specific to nvme-tcp...
We are gradually making the fabrics arguments span transport specific
stuff. I'm wandering if we should have a cleaner approach to
this...
_______________________________________________
linux-nvme mailing list
linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme
^ permalink raw reply [flat|nested] 10+ messages in thread
* RE: [PATCH 0/3] Add nvme-tcp hpda support
2020-05-08 7:01 ` [PATCH 0/3] Add nvme-tcp " Sagi Grimberg
@ 2020-05-08 9:56 ` Yoray Zack
2020-05-08 15:00 ` Sagi Grimberg
0 siblings, 1 reply; 10+ messages in thread
From: Yoray Zack @ 2020-05-08 9:56 UTC (permalink / raw)
To: Sagi Grimberg, Keith Busch, Christoph Hellwig; +Cc: Boris Pismenny, linux-nvme
> -----Original Message-----
> From: Sagi Grimberg <sagi@grimberg.me>
> Sent: Friday, May 8, 2020 10:02 AM
> To: Yoray Zack <yorayz@mellanox.com>; Keith Busch <kbusch@kernel.org>;
> Christoph Hellwig <hch@lst.de>
> Cc: Boris Pismenny <borisp@mellanox.com>; linux-nvme@lists.infradead.org
> Subject: Re: [PATCH 0/3] Add nvme-tcp hpda support
>
> > Given the padding defined by user-space, the host driver will send the
> > requested hpda to the target which will support that on nvme/tcp c2h
> > capsules. The host will parse the capsules according to the padding.
> >
> > The request hpda is 0's based value in units of dwords in the range 0
> > to 31 (e.g., values 0, 1, and 2 correspond to 4 byte, 8 byte, and 12 byte
> alignment)).
>
> Thanks Yoray for the contribution!
>
> Can you share a bit on the use case you are adding this for? Curious to learn
> about it.
Hi Sagi,
I am adding these changes because :
A. This is part of the nvme over tcp spec
B. We testing future hardware that needs to know how to handle it
_______________________________________________
linux-nvme mailing list
linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme
^ permalink raw reply [flat|nested] 10+ messages in thread
* RE: [PATCH 1/3] nvme-fabrics: Add support for padding alignment (pda) option on tcp
2020-05-08 7:06 ` Sagi Grimberg
@ 2020-05-08 9:58 ` Yoray Zack
2020-05-08 14:54 ` Sagi Grimberg
0 siblings, 1 reply; 10+ messages in thread
From: Yoray Zack @ 2020-05-08 9:58 UTC (permalink / raw)
To: Sagi Grimberg, Keith Busch, Christoph Hellwig; +Cc: Boris Pismenny, linux-nvme
> -----Original Message-----
> From: Sagi Grimberg <sagi@grimberg.me>
> Sent: Friday, May 8, 2020 10:06 AM
> To: Yoray Zack <yorayz@mellanox.com>; Keith Busch <kbusch@kernel.org>;
> Christoph Hellwig <hch@lst.de>
> Cc: Boris Pismenny <borisp@mellanox.com>; linux-nvme@lists.infradead.org
> Subject: Re: [PATCH 1/3] nvme-fabrics: Add support for padding alignment
> (pda) option on tcp
>
>
> > Consume the pdu alignment from the nvme connect command according
> to
> > user-space directives.
> >
> > Signed-off-by: Yoray Zack <yorayz@mellanox.com>
> > ---
> > drivers/nvme/host/fabrics.c | 14 ++++++++++++++
> > drivers/nvme/host/fabrics.h | 4 ++++
> > 2 files changed, 18 insertions(+)
> >
> > diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
> > index 2a6c819..4468b57 100644
> > --- a/drivers/nvme/host/fabrics.c
> > +++ b/drivers/nvme/host/fabrics.c
> > @@ -612,6 +612,7 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl,
> struct request *rq,
> > { NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d"
> },
> > { NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
> > { NVMF_OPT_TOS, "tos=%d" },
> > + { NVMF_OPT_PDA, "pda_size=%d" },
>
> This is very specific to nvme-tcp...
>
> We are gradually making the fabrics arguments span transport specific stuff.
> I'm wandering if we should have a cleaner approach to this...
Yes, this is specific to nvme-tcp
But we added it there because there are already other parameters there that are nvme-tcp specific (such as data/header digest).
We agree that there is place for improvement, but this is a separate infrastructure work
_______________________________________________
linux-nvme mailing list
linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 1/3] nvme-fabrics: Add support for padding alignment (pda) option on tcp
2020-05-08 9:58 ` Yoray Zack
@ 2020-05-08 14:54 ` Sagi Grimberg
0 siblings, 0 replies; 10+ messages in thread
From: Sagi Grimberg @ 2020-05-08 14:54 UTC (permalink / raw)
To: Yoray Zack, Keith Busch, Christoph Hellwig; +Cc: Boris Pismenny, linux-nvme
>>> diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
>>> index 2a6c819..4468b57 100644
>>> --- a/drivers/nvme/host/fabrics.c
>>> +++ b/drivers/nvme/host/fabrics.c
>>> @@ -612,6 +612,7 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl,
>> struct request *rq,
>>> { NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d"
>> },
>>> { NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
>>> { NVMF_OPT_TOS, "tos=%d" },
>>> + { NVMF_OPT_PDA, "pda_size=%d" },
>>
>> This is very specific to nvme-tcp...
>>
>> We are gradually making the fabrics arguments span transport specific stuff.
>> I'm wandering if we should have a cleaner approach to this...
>
> Yes, this is specific to nvme-tcp
>
> But we added it there because there are already other parameters there that are nvme-tcp specific (such as data/header digest).
Yes, the digest pieces could at least in theory become applicable to
other transports by extension.
> We agree that there is place for improvement, but this is a separate infrastructure work
Which should probably happen before we add user-interfacing
functionality if we need to figure it out, especially if there is no
pressing use to get this in fast.
_______________________________________________
linux-nvme mailing list
linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 0/3] Add nvme-tcp hpda support
2020-05-08 9:56 ` Yoray Zack
@ 2020-05-08 15:00 ` Sagi Grimberg
0 siblings, 0 replies; 10+ messages in thread
From: Sagi Grimberg @ 2020-05-08 15:00 UTC (permalink / raw)
To: Yoray Zack, Keith Busch, Christoph Hellwig; +Cc: Boris Pismenny, linux-nvme
>>> Given the padding defined by user-space, the host driver will send the
>>> requested hpda to the target which will support that on nvme/tcp c2h
>>> capsules. The host will parse the capsules according to the padding.
>>>
>>> The request hpda is 0's based value in units of dwords in the range 0
>>> to 31 (e.g., values 0, 1, and 2 correspond to 4 byte, 8 byte, and 12 byte
>> alignment)).
>>
>> Thanks Yoray for the contribution!
>>
>> Can you share a bit on the use case you are adding this for? Curious to learn
>> about it.
>
> Hi Sagi,
>
> I am adding these changes because :
>
> A. This is part of the nvme over tcp spec
There are several stuff that are part of the veriaty of the nvme specs
which we implement, we usually want a reason to add it. Not against it,
but would like additions to come from a real need.
> B. We testing future hardware that needs to know how to handle it
OK, but is it required by the device?
_______________________________________________
linux-nvme mailing list
linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme
^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2020-05-08 15:00 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-05-07 14:02 [PATCH 0/3] Add nvme-tcp hpda support Yoray Zack
2020-05-07 14:02 ` [PATCH 1/3] nvme-fabrics: Add support for padding alignment (pda) option on tcp Yoray Zack
2020-05-08 7:06 ` Sagi Grimberg
2020-05-08 9:58 ` Yoray Zack
2020-05-08 14:54 ` Sagi Grimberg
2020-05-07 14:02 ` [PATCH 2/3] nvme-tcp: Add target padding support Yoray Zack
2020-05-07 14:02 ` [PATCH 3/3] nvme-tcp: Add Host hpda support Yoray Zack
2020-05-08 7:01 ` [PATCH 0/3] Add nvme-tcp " Sagi Grimberg
2020-05-08 9:56 ` Yoray Zack
2020-05-08 15:00 ` Sagi Grimberg
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).