From: Boris Pismenny <borisp@mellanox.com>
To: kuba@kernel.org, davem@davemloft.net, saeedm@nvidia.com,
hch@lst.de, sagi@grimberg.me, axboe@fb.com, kbusch@kernel.org,
viro@zeniv.linux.org.uk, edumazet@google.com
Cc: boris.pismenny@gmail.com, linux-nvme@lists.infradead.org,
netdev@vger.kernel.org, benishay@nvidia.com, ogerlitz@nvidia.com,
yorayz@nvidia.com, Ben Ben-Ishay <benishay@mellanox.com>,
Or Gerlitz <ogerlitz@mellanox.com>,
Yoray Zack <yorayz@mellanox.com>
Subject: [PATCH v1 net-next 06/15] nvme-tcp: Add DDP data-path
Date: Mon, 7 Dec 2020 23:06:40 +0200 [thread overview]
Message-ID: <20201207210649.19194-7-borisp@mellanox.com> (raw)
In-Reply-To: <20201207210649.19194-1-borisp@mellanox.com>
Introduce the NVMe-TCP DDP data-path offload.
Using this interface, the NIC hardware will scatter TCP payload directly
to the BIO pages according to the command_id in the PDU.
To maintain the correctness of the network stack, the driver is expected
to construct SKBs that point to the BIO pages.
The data-path interface contains two routines: tcp_ddp_setup/teardown.
The setup provides the mapping from command_id to the request buffers,
while the teardown removes this mapping.
For efficiency, we introduce an asynchronous nvme completion, which is
split between NVMe-TCP and the NIC driver as follows:
NVMe-TCP performs the specific completion, while NIC driver performs the
generic mq_blk completion.
Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Ben Ben-Ishay <benishay@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Yoray Zack <yorayz@mellanox.com>
---
drivers/nvme/host/tcp.c | 119 ++++++++++++++++++++++++++++++++++++++--
1 file changed, 115 insertions(+), 4 deletions(-)
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index ef96e4a02bbd..534fd5c00f33 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -57,6 +57,11 @@ struct nvme_tcp_request {
size_t offset;
size_t data_sent;
enum nvme_tcp_send_state state;
+
+ bool offloaded;
+ struct tcp_ddp_io ddp;
+ __le16 status;
+ union nvme_result result;
};
enum nvme_tcp_queue_flags {
@@ -231,10 +236,74 @@ static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
#ifdef CONFIG_TCP_DDP
bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags);
+void nvme_tcp_ddp_teardown_done(void *ddp_ctx);
const struct tcp_ddp_ulp_ops nvme_tcp_ddp_ulp_ops = {
.resync_request = nvme_tcp_resync_request,
+ .ddp_teardown_done = nvme_tcp_ddp_teardown_done,
};
+static
+int nvme_tcp_teardown_ddp(struct nvme_tcp_queue *queue,
+ u16 command_id,
+ struct request *rq)
+{
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ struct net_device *netdev = queue->ctrl->offloading_netdev;
+ int ret;
+
+ if (unlikely(!netdev)) {
+ pr_info_ratelimited("%s: netdev not found\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = netdev->tcp_ddp_ops->tcp_ddp_teardown(netdev, queue->sock->sk,
+ &req->ddp, rq);
+ sg_free_table_chained(&req->ddp.sg_table, SG_CHUNK_SIZE);
+ req->offloaded = false;
+ return ret;
+}
+
+void nvme_tcp_ddp_teardown_done(void *ddp_ctx)
+{
+ struct request *rq = ddp_ctx;
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+
+ if (!nvme_try_complete_req(rq, cpu_to_le16(req->status << 1), req->result))
+ nvme_complete_rq(rq);
+}
+
+static
+int nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue,
+ u16 command_id,
+ struct request *rq)
+{
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ struct net_device *netdev = queue->ctrl->offloading_netdev;
+ int ret;
+
+ req->offloaded = false;
+
+ if (unlikely(!netdev)) {
+ pr_info_ratelimited("%s: netdev not found\n", __func__);
+ return -EINVAL;
+ }
+
+ req->ddp.command_id = command_id;
+ req->ddp.sg_table.sgl = req->ddp.first_sgl;
+ ret = sg_alloc_table_chained(&req->ddp.sg_table, blk_rq_nr_phys_segments(rq),
+ req->ddp.sg_table.sgl, SG_CHUNK_SIZE);
+ if (ret)
+ return -ENOMEM;
+ req->ddp.nents = blk_rq_map_sg(rq->q, rq, req->ddp.sg_table.sgl);
+
+ ret = netdev->tcp_ddp_ops->tcp_ddp_setup(netdev,
+ queue->sock->sk,
+ &req->ddp);
+ if (!ret)
+ req->offloaded = true;
+ return ret;
+}
+
static
int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue)
{
@@ -374,6 +443,25 @@ bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags)
#else
+static
+int nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue,
+ u16 command_id,
+ struct request *rq)
+{
+ return -EINVAL;
+}
+
+static
+int nvme_tcp_teardown_ddp(struct nvme_tcp_queue *queue,
+ u16 command_id,
+ struct request *rq)
+{
+ return -EINVAL;
+}
+
+void nvme_tcp_ddp_teardown_done(void *ddp_ctx)
+{}
+
static
int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue)
{
@@ -651,6 +739,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
struct nvme_completion *cqe)
{
+ struct nvme_tcp_request *req;
struct request *rq;
rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
@@ -662,8 +751,15 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
return -EINVAL;
}
- if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
- nvme_complete_rq(rq);
+ req = blk_mq_rq_to_pdu(rq);
+ if (req->offloaded) {
+ req->status = cqe->status;
+ req->result = cqe->result;
+ nvme_tcp_teardown_ddp(queue, cqe->command_id, rq);
+ } else {
+ if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
+ nvme_complete_rq(rq);
+ }
queue->nr_cqe++;
return 0;
@@ -857,9 +953,18 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
static inline void nvme_tcp_end_request(struct request *rq, u16 status)
{
union nvme_result res = {};
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_tcp_queue *queue = req->queue;
+ struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
- if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
- nvme_complete_rq(rq);
+ if (req->offloaded) {
+ req->status = cpu_to_le16(status << 1);
+ req->result = res;
+ nvme_tcp_teardown_ddp(queue, pdu->command_id, rq);
+ } else {
+ if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
+ nvme_complete_rq(rq);
+ }
}
static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
@@ -1135,6 +1240,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
bool inline_data = nvme_tcp_has_inline_data(req);
u8 hdgst = nvme_tcp_hdgst_len(queue);
int len = sizeof(*pdu) + hdgst - req->offset;
+ struct request *rq = blk_mq_rq_from_pdu(req);
int flags = MSG_DONTWAIT;
int ret;
@@ -1143,6 +1249,10 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
else
flags |= MSG_EOR;
+ if (test_bit(NVME_TCP_Q_OFFLOADS, &queue->flags) &&
+ blk_rq_nr_phys_segments(rq) && rq_data_dir(rq) == READ)
+ nvme_tcp_setup_ddp(queue, pdu->cmd.common.command_id, rq);
+
if (queue->hdr_digest && !req->offset)
nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
@@ -2445,6 +2555,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
req->data_len = blk_rq_nr_phys_segments(rq) ?
blk_rq_payload_bytes(rq) : 0;
req->curr_bio = rq->bio;
+ req->offloaded = false;
if (rq_data_dir(rq) == WRITE &&
req->data_len <= nvme_tcp_inline_data_size(queue))
--
2.24.1
next prev parent reply other threads:[~2020-12-07 21:08 UTC|newest]
Thread overview: 53+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-12-07 21:06 [PATCH v1 net-next 00/15] nvme-tcp receive offloads Boris Pismenny
2020-12-07 21:06 ` [PATCH v1 net-next 01/15] iov_iter: Skip copy in memcpy_to_page if src==dst Boris Pismenny
2020-12-08 0:39 ` David Ahern
2020-12-08 14:30 ` Boris Pismenny
2020-12-07 21:06 ` [PATCH v1 net-next 02/15] net: Introduce direct data placement tcp offload Boris Pismenny
2020-12-08 0:42 ` David Ahern
2020-12-08 14:36 ` Boris Pismenny
2020-12-09 0:38 ` David Ahern
2020-12-09 8:15 ` Boris Pismenny
2020-12-10 4:26 ` David Ahern
2020-12-11 2:01 ` Jakub Kicinski
2020-12-11 2:43 ` David Ahern
2020-12-11 18:45 ` Jakub Kicinski
2020-12-11 18:58 ` Eric Dumazet
2020-12-11 19:59 ` David Ahern
2020-12-11 23:05 ` Jonathan Lemon
2020-12-13 18:34 ` Boris Pismenny
2020-12-13 18:21 ` Boris Pismenny
2020-12-15 5:19 ` David Ahern
2020-12-17 19:06 ` Boris Pismenny
2020-12-18 0:44 ` David Ahern
2020-12-09 0:57 ` David Ahern
2020-12-09 1:11 ` David Ahern
2020-12-09 8:28 ` Boris Pismenny
2020-12-09 8:25 ` Boris Pismenny
2020-12-07 21:06 ` [PATCH v1 net-next 03/15] net: Introduce crc offload for tcp ddp ulp Boris Pismenny
2020-12-07 21:06 ` [PATCH v1 net-next 04/15] net/tls: expose get_netdev_for_sock Boris Pismenny
2020-12-09 1:06 ` David Ahern
2020-12-09 7:41 ` Boris Pismenny
2020-12-10 3:39 ` David Ahern
2020-12-11 18:43 ` Boris Pismenny
2020-12-07 21:06 ` [PATCH v1 net-next 05/15] nvme-tcp: Add DDP offload control path Boris Pismenny
2020-12-10 17:15 ` Shai Malin
2020-12-14 6:38 ` Boris Pismenny
2020-12-15 13:33 ` Shai Malin
2020-12-17 18:51 ` Boris Pismenny
2020-12-07 21:06 ` Boris Pismenny [this message]
2020-12-07 21:06 ` [PATCH v1 net-next 07/15] nvme-tcp : Recalculate crc in the end of the capsule Boris Pismenny
2020-12-15 14:07 ` Shai Malin
2020-12-07 21:06 ` [PATCH v1 net-next 08/15] nvme-tcp: Deal with netdevice DOWN events Boris Pismenny
2020-12-07 21:06 ` [PATCH v1 net-next 09/15] net/mlx5: Header file changes for nvme-tcp offload Boris Pismenny
2020-12-07 21:06 ` [PATCH v1 net-next 10/15] net/mlx5: Add 128B CQE for NVMEoTCP offload Boris Pismenny
2020-12-07 21:06 ` [PATCH v1 net-next 11/15] net/mlx5e: TCP flow steering for nvme-tcp Boris Pismenny
2020-12-07 21:06 ` [PATCH v1 net-next 12/15] net/mlx5e: NVMEoTCP DDP offload control path Boris Pismenny
2020-12-07 21:06 ` [PATCH v1 net-next 13/15] net/mlx5e: NVMEoTCP, data-path for DDP offload Boris Pismenny
2020-12-18 0:57 ` David Ahern
2020-12-07 21:06 ` [PATCH v1 net-next 14/15] net/mlx5e: NVMEoTCP statistics Boris Pismenny
2020-12-07 21:06 ` [PATCH v1 net-next 15/15] net/mlx5e: NVMEoTCP workaround CRC after resync Boris Pismenny
2021-01-14 1:27 ` [PATCH v1 net-next 00/15] nvme-tcp receive offloads Sagi Grimberg
2021-01-14 4:47 ` David Ahern
2021-01-14 19:21 ` Boris Pismenny
2021-01-14 19:17 ` Boris Pismenny
2021-01-14 21:07 ` Sagi Grimberg
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201207210649.19194-7-borisp@mellanox.com \
--to=borisp@mellanox.com \
--cc=axboe@fb.com \
--cc=benishay@mellanox.com \
--cc=benishay@nvidia.com \
--cc=boris.pismenny@gmail.com \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=hch@lst.de \
--cc=kbusch@kernel.org \
--cc=kuba@kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=netdev@vger.kernel.org \
--cc=ogerlitz@mellanox.com \
--cc=ogerlitz@nvidia.com \
--cc=saeedm@nvidia.com \
--cc=sagi@grimberg.me \
--cc=viro@zeniv.linux.org.uk \
--cc=yorayz@mellanox.com \
--cc=yorayz@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).