All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v4] nvme-tcp: send H2CData PDUs based on MAXH2CDATA
@ 2022-01-22 16:57 Varun Prakash
  2022-01-23  9:16 ` Sagi Grimberg
  0 siblings, 1 reply; 9+ messages in thread
From: Varun Prakash @ 2022-01-22 16:57 UTC (permalink / raw)
  To: sagi, hch, kbusch; +Cc: linux-nvme, varun

As per NVMe/TCP specification (revision 1.0a, section 3.6.2.3)
Maximum Host to Controller Data length (MAXH2CDATA): Specifies the
maximum number of PDU-Data bytes per H2CData PDU in bytes. This value
is a multiple of dwords and should be no less than 4,096.

Current code sets H2CData PDU data_length to r2t_length,
it does not check MAXH2CDATA value. Fix this by setting H2CData PDU
data_length to min(req->h2cdata_left, queue->maxh2cdata).

Also validate MAXH2CDATA value returned by target in ICResp PDU,
if it is not a multiple of dword or if it is less than 4096 return
-EINVAL from nvme_tcp_init_connection().

Signed-off-by: Varun Prakash <varun@chelsio.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
---

v4:
- removed MSG_SENDPAGE_NOTLAST flag in sock_no_sendpage() case

v3:
- added h2cdata_left, h2cdata_offset
- removed unnecessary local variables from nvme_tcp_try_send_data_pdu()

v2:
- removed nvme_tcp_update_h2c_data_pdu()
- used sock_no_sendpage() instead of kernel_sendmsg()

 drivers/nvme/host/tcp.c  | 63 +++++++++++++++++++++++++++++++++++++-----------
 include/linux/nvme-tcp.h |  1 +
 2 files changed, 50 insertions(+), 14 deletions(-)

diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 4ceb286..c0fff72 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -44,6 +44,8 @@ struct nvme_tcp_request {
 	u32			data_len;
 	u32			pdu_len;
 	u32			pdu_sent;
+	u32			h2cdata_left;
+	u32			h2cdata_offset;
 	u16			ttag;
 	__le16			status;
 	struct list_head	entry;
@@ -95,6 +97,7 @@ struct nvme_tcp_queue {
 	struct nvme_tcp_request *request;
 
 	int			queue_size;
+	u32			maxh2cdata;
 	size_t			cmnd_capsule_len;
 	struct nvme_tcp_ctrl	*ctrl;
 	unsigned long		flags;
@@ -572,23 +575,26 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
 	return ret;
 }
 
-static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
-		struct nvme_tcp_r2t_pdu *pdu)
+static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
 {
 	struct nvme_tcp_data_pdu *data = req->pdu;
 	struct nvme_tcp_queue *queue = req->queue;
 	struct request *rq = blk_mq_rq_from_pdu(req);
+	u32 h2cdata_sent = req->pdu_len;
 	u8 hdgst = nvme_tcp_hdgst_len(queue);
 	u8 ddgst = nvme_tcp_ddgst_len(queue);
 
 	req->state = NVME_TCP_SEND_H2C_PDU;
 	req->offset = 0;
-	req->pdu_len = le32_to_cpu(pdu->r2t_length);
+	req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata);
 	req->pdu_sent = 0;
+	req->h2cdata_left -= req->pdu_len;
+	req->h2cdata_offset += h2cdata_sent;
 
 	memset(data, 0, sizeof(*data));
 	data->hdr.type = nvme_tcp_h2c_data;
-	data->hdr.flags = NVME_TCP_F_DATA_LAST;
+	if (!req->h2cdata_left)
+		data->hdr.flags = NVME_TCP_F_DATA_LAST;
 	if (queue->hdr_digest)
 		data->hdr.flags |= NVME_TCP_F_HDGST;
 	if (queue->data_digest)
@@ -597,9 +603,9 @@ static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
 	data->hdr.pdo = data->hdr.hlen + hdgst;
 	data->hdr.plen =
 		cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
-	data->ttag = pdu->ttag;
+	data->ttag = req->ttag;
 	data->command_id = nvme_cid(rq);
-	data->data_offset = pdu->r2t_offset;
+	data->data_offset = cpu_to_le32(req->h2cdata_offset);
 	data->data_length = cpu_to_le32(req->pdu_len);
 }
 
@@ -609,6 +615,7 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
 	struct nvme_tcp_request *req;
 	struct request *rq;
 	u32 r2t_length = le32_to_cpu(pdu->r2t_length);
+	u32 r2t_offset = le32_to_cpu(pdu->r2t_offset);
 
 	rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
 	if (!rq) {
@@ -633,14 +640,19 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
 		return -EPROTO;
 	}
 
-	if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
+	if (unlikely(r2t_offset < req->data_sent)) {
 		dev_err(queue->ctrl->ctrl.device,
 			"req %d unexpected r2t offset %u (expected %zu)\n",
-			rq->tag, le32_to_cpu(pdu->r2t_offset), req->data_sent);
+			rq->tag, r2t_offset, req->data_sent);
 		return -EPROTO;
 	}
 
-	nvme_tcp_setup_h2c_data_pdu(req, pdu);
+	req->pdu_len = 0;
+	req->h2cdata_left = r2t_length;
+	req->h2cdata_offset = r2t_offset;
+	req->ttag = pdu->ttag;
+
+	nvme_tcp_setup_h2c_data_pdu(req);
 	nvme_tcp_queue_request(req, false, true);
 
 	return 0;
@@ -920,6 +932,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
 {
 	struct nvme_tcp_queue *queue = req->queue;
 	int req_data_len = req->data_len;
+	u32 h2cdata_left = req->h2cdata_left;
 
 	while (true) {
 		struct page *page = nvme_tcp_req_cur_page(req);
@@ -964,7 +977,10 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
 				req->state = NVME_TCP_SEND_DDGST;
 				req->offset = 0;
 			} else {
-				nvme_tcp_done_send_req(queue);
+				if (h2cdata_left)
+					nvme_tcp_setup_h2c_data_pdu(req);
+				else
+					nvme_tcp_done_send_req(queue);
 			}
 			return 1;
 		}
@@ -1022,9 +1038,14 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
 	if (queue->hdr_digest && !req->offset)
 		nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
 
-	ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
-			offset_in_page(pdu) + req->offset, len,
-			MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
+	if (!req->h2cdata_left)
+		ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
+				offset_in_page(pdu) + req->offset, len,
+				MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
+	else
+		ret = sock_no_sendpage(queue->sock, virt_to_page(pdu),
+				offset_in_page(pdu) + req->offset, len,
+				MSG_DONTWAIT | MSG_MORE);
 	if (unlikely(ret <= 0))
 		return ret;
 
@@ -1044,6 +1065,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
 {
 	struct nvme_tcp_queue *queue = req->queue;
 	size_t offset = req->offset;
+	u32 h2cdata_left = req->h2cdata_left;
 	int ret;
 	struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
 	struct kvec iov = {
@@ -1061,7 +1083,10 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
 		return ret;
 
 	if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
-		nvme_tcp_done_send_req(queue);
+		if (h2cdata_left)
+			nvme_tcp_setup_h2c_data_pdu(req);
+		else
+			nvme_tcp_done_send_req(queue);
 		return 1;
 	}
 
@@ -1253,6 +1278,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
 	struct msghdr msg = {};
 	struct kvec iov;
 	bool ctrl_hdgst, ctrl_ddgst;
+	u32 maxh2cdata;
 	int ret;
 
 	icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
@@ -1336,6 +1362,14 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
 		goto free_icresp;
 	}
 
+	maxh2cdata = le32_to_cpu(icresp->maxdata);
+	if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) {
+		pr_err("queue %d: invalid maxh2cdata returned %u\n",
+		       nvme_tcp_queue_id(queue), maxh2cdata);
+		goto free_icresp;
+	}
+	queue->maxh2cdata = maxh2cdata;
+
 	ret = 0;
 free_icresp:
 	kfree(icresp);
@@ -2320,6 +2354,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
 	req->data_sent = 0;
 	req->pdu_len = 0;
 	req->pdu_sent = 0;
+	req->h2cdata_left = 0;
 	req->data_len = blk_rq_nr_phys_segments(rq) ?
 				blk_rq_payload_bytes(rq) : 0;
 	req->curr_bio = rq->bio;
diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h
index 959e0bd..7547015 100644
--- a/include/linux/nvme-tcp.h
+++ b/include/linux/nvme-tcp.h
@@ -12,6 +12,7 @@
 #define NVME_TCP_DISC_PORT	8009
 #define NVME_TCP_ADMIN_CCSZ	SZ_8K
 #define NVME_TCP_DIGEST_LENGTH	4
+#define NVME_TCP_MIN_MAXH2CDATA 4096
 
 enum nvme_tcp_pfv {
 	NVME_TCP_PFV_1_0 = 0x0,
-- 
2.0.2



^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH v4] nvme-tcp: send H2CData PDUs based on MAXH2CDATA
  2022-01-22 16:57 [PATCH v4] nvme-tcp: send H2CData PDUs based on MAXH2CDATA Varun Prakash
@ 2022-01-23  9:16 ` Sagi Grimberg
  2022-01-24 21:11   ` Sagi Grimberg
  0 siblings, 1 reply; 9+ messages in thread
From: Sagi Grimberg @ 2022-01-23  9:16 UTC (permalink / raw)
  To: Varun Prakash, hch, kbusch; +Cc: linux-nvme

Reviewed-by: Sagi Grimberg <sagi@grimberg.me>


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v4] nvme-tcp: send H2CData PDUs based on MAXH2CDATA
  2022-01-23  9:16 ` Sagi Grimberg
@ 2022-01-24 21:11   ` Sagi Grimberg
  2022-02-20 13:05     ` Sagi Grimberg
  0 siblings, 1 reply; 9+ messages in thread
From: Sagi Grimberg @ 2022-01-24 21:11 UTC (permalink / raw)
  To: Varun Prakash, hch, kbusch, Ran.Anner; +Cc: linux-nvme


> Reviewed-by: Sagi Grimberg <sagi@grimberg.me>

Let's wait with this for a moment..

Keith,
Is it possible that you guys take this for a test run?
This is the area that exposed a few issues in the past
working against your target device...

Ran, maybe you can run it against your target as well?

Would love to get your tested-by/acked-by tags.


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v4] nvme-tcp: send H2CData PDUs based on MAXH2CDATA
  2022-01-24 21:11   ` Sagi Grimberg
@ 2022-02-20 13:05     ` Sagi Grimberg
  2022-02-22 17:18       ` Christoph Hellwig
  2022-03-01 22:56       ` Keith Busch
  0 siblings, 2 replies; 9+ messages in thread
From: Sagi Grimberg @ 2022-02-20 13:05 UTC (permalink / raw)
  To: Varun Prakash, hch, kbusch, Ran.Anner; +Cc: linux-nvme


>> Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
> 
> Let's wait with this for a moment..
> 
> Keith,
> Is it possible that you guys take this for a test run?
> This is the area that exposed a few issues in the past
> working against your target device...
> 
> Ran, maybe you can run it against your target as well?
> 
> Would love to get your tested-by/acked-by tags.

I think we can move forward with this patch. If this turns
out to cause any issues we can address those as they come...


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v4] nvme-tcp: send H2CData PDUs based on MAXH2CDATA
  2022-02-20 13:05     ` Sagi Grimberg
@ 2022-02-22 17:18       ` Christoph Hellwig
  2022-02-22 17:19         ` Christoph Hellwig
  2022-03-01 22:56       ` Keith Busch
  1 sibling, 1 reply; 9+ messages in thread
From: Christoph Hellwig @ 2022-02-22 17:18 UTC (permalink / raw)
  To: Sagi Grimberg; +Cc: Varun Prakash, hch, kbusch, Ran.Anner, linux-nvme

On Sun, Feb 20, 2022 at 03:05:18PM +0200, Sagi Grimberg wrote:
>
>>> Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
>>
>> Let's wait with this for a moment..
>>
>> Keith,
>> Is it possible that you guys take this for a test run?
>> This is the area that exposed a few issues in the past
>> working against your target device...
>>
>> Ran, maybe you can run it against your target as well?
>>
>> Would love to get your tested-by/acked-by tags.
>
> I think we can move forward with this patch. If this turns
> out to cause any issues we can address those as they come...

Is this a Reviewed-by?


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v4] nvme-tcp: send H2CData PDUs based on MAXH2CDATA
  2022-02-22 17:18       ` Christoph Hellwig
@ 2022-02-22 17:19         ` Christoph Hellwig
  0 siblings, 0 replies; 9+ messages in thread
From: Christoph Hellwig @ 2022-02-22 17:19 UTC (permalink / raw)
  To: Sagi Grimberg; +Cc: Varun Prakash, hch, kbusch, Ran.Anner, linux-nvme

On Tue, Feb 22, 2022 at 06:18:26PM +0100, Christoph Hellwig wrote:
> > out to cause any issues we can address those as they come...
> 
> Is this a Reviewed-by?

Ah, we already had one.


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v4] nvme-tcp: send H2CData PDUs based on MAXH2CDATA
  2022-02-20 13:05     ` Sagi Grimberg
  2022-02-22 17:18       ` Christoph Hellwig
@ 2022-03-01 22:56       ` Keith Busch
  2022-03-02  1:50         ` Chaitanya Kulkarni
  2022-03-04 21:11         ` Keith Busch
  1 sibling, 2 replies; 9+ messages in thread
From: Keith Busch @ 2022-03-01 22:56 UTC (permalink / raw)
  To: Sagi Grimberg; +Cc: Varun Prakash, hch, Ran.Anner, linux-nvme

On Sun, Feb 20, 2022 at 03:05:18PM +0200, Sagi Grimberg wrote:
> 
> > > Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
> > 
> > Let's wait with this for a moment..
> > 
> > Keith,
> > Is it possible that you guys take this for a test run?
> > This is the area that exposed a few issues in the past
> > working against your target device...
> > 
> > Ran, maybe you can run it against your target as well?
> > 
> > Would love to get your tested-by/acked-by tags.
> 
> I think we can move forward with this patch. If this turns
> out to cause any issues we can address those as they come...

Hmm, it looks like this triggers the below warning on a disconnect test.

The warning seems to indicate the srcu lock/unlock are not equal. That
doesn't immediately make sence because the queue freeze should have
completed before this, which should havs the same enter/exit calls.

I didn't find anything obvious for how *this* patch triggers it, but the
testers say they haven't been able to recreate without the patch, so I'm
letting you know now.

---

[Tue Mar  1 13:13:19 2022] nvme nvme0: Removing ctrl: NQN "nqn.2015-09.com.wdc:nvme.1"
[Tue Mar  1 13:13:19 2022] ------------[ cut here ]------------
[Tue Mar  1 13:13:19 2022] WARNING: CPU: 5 PID: 41414 at kernel/rcu/srcutree.c:373 cleanup_srcu_struct+0xe8/0xf0
[Tue Mar  1 13:13:19 2022] Modules linked in: nvme_tcp(OE) nvme_rdma nvme_fabrics nvme nvme_core ib_umad rdma_ucm rdma_cm iw_cm ib_cm snd_seq_dummy snd_hrtimer snd_seq_midi snd_seq_midi_event snd_rawmidi snd_seq snd_seq_device snd_timer snd soundcore cmac nls_utf8 cifs cifs_arc4 cifs_md4 fscache netfs cuse ipmi_ssif binfmt_misc nls_iso8859_1 intel_rapl_msr intel_rapl_common sb_edac x86_pkg_temp_thermal intel_powerclamp coretemp drm_vram_helper crct10dif_pclmul ghash_clmulni_intel drm_ttm_helper aesni_intel ttm crypto_simd drm_kms_helper cryptd cec rc_core fb_sys_fops rapl syscopyarea sysfillrect joydev input_leds sysimgblt intel_cstate efi_pstore mei_me ioatdma mei acpi_ipmi ipmi_si acpi_power_meter acpi_pad mac_hid sch_fq_codel ipmi_devintf ipmi_msghandler msr parport_pc ppdev lp parport drm ip_tables x_tables autofs4 mlx5_ib ib_uverbs ib_core hid_generic usbhid hid mlx5_core crc32_pclmul igb mlxfw psample i2c_i801 i2c_algo_bit ahci i2c_smbus lpc_ich xhci_pci dca tls libahci xhci_pci_renesas
[Tue Mar  1 13:13:19 2022]  pci_hyperv_intf wmi
[Tue Mar  1 13:13:19 2022] CPU: 5 PID: 41414 Comm: nvme Tainted: G           OE     5.17.0-051700rc4-generic #202202132130
[Tue Mar  1 13:13:19 2022] Hardware name: Supermicro SYS-5018R-WR/X10SRW-F, BIOS 2.0a 08/02/2016
[Tue Mar  1 13:13:19 2022] RIP: 0010:cleanup_srcu_struct+0xe8/0xf0
[Tue Mar  1 13:13:19 2022] Code: ff 84 c0 0f 85 f9 45 b0 00 49 8b bd f0 c3 00 00 e8 cd 37 17 00 49 c7 85 f0 c3 00 00 00 00 00 00 5b 41 5c 41 5d 5d c3 0f 0b c3 <0f> 0b eb b5 0f 0b 66 90 0f 1f 44 00 00 48 8b 7f 28 f0 ff 8f 40 c4
[Tue Mar  1 13:13:19 2022] RSP: 0018:ffff97844b7cfac8 EFLAGS: 00010202
[Tue Mar  1 13:13:19 2022] RAX: 0000000000000001 RBX: ffff888575920080 RCX: 0000000000000010
[Tue Mar  1 13:13:19 2022] RDX: 0000000000000010 RSI: 0000000000000000 RDI: 0000000000000010
[Tue Mar  1 13:13:19 2022] RBP: ffff97844b7cfae0 R08: 0000000000000000 R09: 0000000000000000
[Tue Mar  1 13:13:19 2022] R10: 0000000000000010 R11: 0000000000000000 R12: ffff888575920000
[Tue Mar  1 13:13:19 2022] R13: ffff888575920578 R14: 0000000000000000 R15: ffff888512eecfc0
[Tue Mar  1 13:13:19 2022] FS:  00007f010e45c740(0000) GS:ffff888c5fb40000(0000) knlGS:0000000000000000
[Tue Mar  1 13:13:19 2022] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[Tue Mar  1 13:13:19 2022] CR2: 00007ffe64848e98 CR3: 00000002765f4002 CR4: 00000000003706e0
[Tue Mar  1 13:13:19 2022] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[Tue Mar  1 13:13:19 2022] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
[Tue Mar  1 13:13:19 2022] Call Trace:
[Tue Mar  1 13:13:19 2022]  <TASK>
[Tue Mar  1 13:13:19 2022]  blk_release_queue+0x10d/0x110
[Tue Mar  1 13:13:19 2022]  kobject_cleanup+0x41/0x140
[Tue Mar  1 13:13:19 2022]  kobject_put+0x53/0x70
[Tue Mar  1 13:13:19 2022]  blk_put_queue+0x12/0x20
[Tue Mar  1 13:13:19 2022]  disk_release+0x63/0x80
[Tue Mar  1 13:13:19 2022]  device_release+0x3b/0xa0
[Tue Mar  1 13:13:19 2022]  kobject_cleanup+0x41/0x140
[Tue Mar  1 13:13:19 2022]  kobject_put+0x53/0x70
[Tue Mar  1 13:13:19 2022]  put_device+0x13/0x20
[Tue Mar  1 13:13:19 2022]  put_disk+0x1b/0x20
[Tue Mar  1 13:13:19 2022]  nvme_free_ns+0x28/0x160 [nvme_core]
[Tue Mar  1 13:13:19 2022]  nvme_ns_remove+0x124/0x1b0 [nvme_core]
[Tue Mar  1 13:13:19 2022]  nvme_remove_namespaces+0xb0/0xf0 [nvme_core]
[Tue Mar  1 13:13:19 2022]  nvme_do_delete_ctrl+0x5d/0x7a [nvme_core]
[Tue Mar  1 13:13:19 2022]  nvme_sysfs_delete.cold+0x8/0xd [nvme_core]
[Tue Mar  1 13:13:19 2022]  dev_attr_store+0x17/0x30
[Tue Mar  1 13:13:19 2022]  sysfs_kf_write+0x3e/0x50
[Tue Mar  1 13:13:19 2022]  kernfs_fop_write_iter+0x137/0x1c0
[Tue Mar  1 13:13:19 2022]  new_sync_write+0x117/0x1a0
[Tue Mar  1 13:13:19 2022]  ? get_kcore_size+0xf0/0xf0
[Tue Mar  1 13:13:19 2022]  vfs_write+0x1f3/0x290
[Tue Mar  1 13:13:19 2022]  ksys_write+0x67/0xe0
[Tue Mar  1 13:13:19 2022]  __x64_sys_write+0x19/0x20
[Tue Mar  1 13:13:19 2022]  do_syscall_64+0x5c/0xc0
[Tue Mar  1 13:13:19 2022]  ? do_sys_openat2+0x87/0x160
[Tue Mar  1 13:13:19 2022]  ? exit_to_user_mode_prepare+0x37/0xb0
[Tue Mar  1 13:13:19 2022]  ? syscall_exit_to_user_mode+0x27/0x50
[Tue Mar  1 13:13:19 2022]  ? do_syscall_64+0x69/0xc0
[Tue Mar  1 13:13:19 2022]  ? exit_to_user_mode_prepare+0x37/0xb0
[Tue Mar  1 13:13:19 2022]  ? irqentry_exit_to_user_mode+0x9/0x20
[Tue Mar  1 13:13:19 2022]  ? irqentry_exit+0x33/0x40
[Tue Mar  1 13:13:19 2022]  ? exc_page_fault+0x89/0x180
[Tue Mar  1 13:13:19 2022]  ? asm_exc_page_fault+0x8/0x30
[Tue Mar  1 13:13:19 2022]  entry_SYSCALL_64_after_hwframe+0x44/0xae
[Tue Mar  1 13:13:19 2022] RIP: 0033:0x7f010e577a37
[Tue Mar  1 13:13:19 2022] Code: 0f 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b7 0f 1f 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 48 89 54 24 18 48 89 74 24
[Tue Mar  1 13:13:19 2022] RSP: 002b:00007ffdd3c13538 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
[Tue Mar  1 13:13:19 2022] RAX: ffffffffffffffda RBX: 0000000000000037 RCX: 00007f010e577a37
[Tue Mar  1 13:13:19 2022] RDX: 0000000000000001 RSI: 0000557cd45d4e8e RDI: 0000000000000003
[Tue Mar  1 13:13:19 2022] RBP: 0000000000000003 R08: 0000557cd5dcd4a0 R09: 0000557cd5dcd510
[Tue Mar  1 13:13:19 2022] R10: 0000000000000000 R11: 0000000000000246 R12: 0000557cd5dcd510
[Tue Mar  1 13:13:19 2022] R13: 0000557cd45ed060 R14: 0000000000000000 R15: 0000000000000003
[Tue Mar  1 13:13:19 2022]  </TASK>
[Tue Mar  1 13:13:19 2022] ---[ end trace 0000000000000000 ]---



^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v4] nvme-tcp: send H2CData PDUs based on MAXH2CDATA
  2022-03-01 22:56       ` Keith Busch
@ 2022-03-02  1:50         ` Chaitanya Kulkarni
  2022-03-04 21:11         ` Keith Busch
  1 sibling, 0 replies; 9+ messages in thread
From: Chaitanya Kulkarni @ 2022-03-02  1:50 UTC (permalink / raw)
  To: Keith Busch; +Cc: Varun Prakash, Sagi Grimberg, hch, Ran.Anner, linux-nvme

Keith,

On 3/1/22 2:56 PM, Keith Busch wrote:
> On Sun, Feb 20, 2022 at 03:05:18PM +0200, Sagi Grimberg wrote:
>>
>>>> Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
>>>
>>> Let's wait with this for a moment..
>>>
>>> Keith,
>>> Is it possible that you guys take this for a test run?
>>> This is the area that exposed a few issues in the past
>>> working against your target device...
>>>
>>> Ran, maybe you can run it against your target as well?
>>>
>>> Would love to get your tested-by/acked-by tags.
>>
>> I think we can move forward with this patch. If this turns
>> out to cause any issues we can address those as they come...
> 
> Hmm, it looks like this triggers the below warning on a disconnect test.
> 

If it is not triggered by the blktests then maybe it is worth adding
a testcase for that scenario.

-ck


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v4] nvme-tcp: send H2CData PDUs based on MAXH2CDATA
  2022-03-01 22:56       ` Keith Busch
  2022-03-02  1:50         ` Chaitanya Kulkarni
@ 2022-03-04 21:11         ` Keith Busch
  1 sibling, 0 replies; 9+ messages in thread
From: Keith Busch @ 2022-03-04 21:11 UTC (permalink / raw)
  To: Sagi Grimberg; +Cc: Varun Prakash, hch, Ran.Anner, linux-nvme

Correction from the testing side, the below error was introduced
sometime after the most recent 5.15 stable and current 5.17-rc6. It has
nothing to do with this patch. I've requested a bisect to identify the
actual commit, and will report back any findings.

> [Tue Mar  1 13:13:19 2022] nvme nvme0: Removing ctrl: NQN "nqn.2015-09.com.wdc:nvme.1"
> [Tue Mar  1 13:13:19 2022] ------------[ cut here ]------------
> [Tue Mar  1 13:13:19 2022] WARNING: CPU: 5 PID: 41414 at kernel/rcu/srcutree.c:373 cleanup_srcu_struct+0xe8/0xf0
> [Tue Mar  1 13:13:19 2022] Modules linked in: nvme_tcp(OE) nvme_rdma nvme_fabrics nvme nvme_core ib_umad rdma_ucm rdma_cm iw_cm ib_cm snd_seq_dummy snd_hrtimer snd_seq_midi snd_seq_midi_event snd_rawmidi snd_seq snd_seq_device snd_timer snd soundcore cmac nls_utf8 cifs cifs_arc4 cifs_md4 fscache netfs cuse ipmi_ssif binfmt_misc nls_iso8859_1 intel_rapl_msr intel_rapl_common sb_edac x86_pkg_temp_thermal intel_powerclamp coretemp drm_vram_helper crct10dif_pclmul ghash_clmulni_intel drm_ttm_helper aesni_intel ttm crypto_simd drm_kms_helper cryptd cec rc_core fb_sys_fops rapl syscopyarea sysfillrect joydev input_leds sysimgblt intel_cstate efi_pstore mei_me ioatdma mei acpi_ipmi ipmi_si acpi_power_meter acpi_pad mac_hid sch_fq_codel ipmi_devintf ipmi_msghandler msr parport_pc ppdev lp parport drm ip_tables x_tables autofs4 mlx5_ib ib_uverbs ib_core hid_generic usbhid hid mlx5_core crc32_pclmul igb mlxfw psample i2c_i801 i2c_algo_bit ahci i2c_smbus lpc_ich xhci_pci dca tls libahci xhci_pci_renesas
> [Tue Mar  1 13:13:19 2022]  pci_hyperv_intf wmi
> [Tue Mar  1 13:13:19 2022] CPU: 5 PID: 41414 Comm: nvme Tainted: G           OE     5.17.0-051700rc4-generic #202202132130
> [Tue Mar  1 13:13:19 2022] Hardware name: Supermicro SYS-5018R-WR/X10SRW-F, BIOS 2.0a 08/02/2016
> [Tue Mar  1 13:13:19 2022] RIP: 0010:cleanup_srcu_struct+0xe8/0xf0
> [Tue Mar  1 13:13:19 2022] Code: ff 84 c0 0f 85 f9 45 b0 00 49 8b bd f0 c3 00 00 e8 cd 37 17 00 49 c7 85 f0 c3 00 00 00 00 00 00 5b 41 5c 41 5d 5d c3 0f 0b c3 <0f> 0b eb b5 0f 0b 66 90 0f 1f 44 00 00 48 8b 7f 28 f0 ff 8f 40 c4
> [Tue Mar  1 13:13:19 2022] RSP: 0018:ffff97844b7cfac8 EFLAGS: 00010202
> [Tue Mar  1 13:13:19 2022] RAX: 0000000000000001 RBX: ffff888575920080 RCX: 0000000000000010
> [Tue Mar  1 13:13:19 2022] RDX: 0000000000000010 RSI: 0000000000000000 RDI: 0000000000000010
> [Tue Mar  1 13:13:19 2022] RBP: ffff97844b7cfae0 R08: 0000000000000000 R09: 0000000000000000
> [Tue Mar  1 13:13:19 2022] R10: 0000000000000010 R11: 0000000000000000 R12: ffff888575920000
> [Tue Mar  1 13:13:19 2022] R13: ffff888575920578 R14: 0000000000000000 R15: ffff888512eecfc0
> [Tue Mar  1 13:13:19 2022] FS:  00007f010e45c740(0000) GS:ffff888c5fb40000(0000) knlGS:0000000000000000
> [Tue Mar  1 13:13:19 2022] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [Tue Mar  1 13:13:19 2022] CR2: 00007ffe64848e98 CR3: 00000002765f4002 CR4: 00000000003706e0
> [Tue Mar  1 13:13:19 2022] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
> [Tue Mar  1 13:13:19 2022] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
> [Tue Mar  1 13:13:19 2022] Call Trace:
> [Tue Mar  1 13:13:19 2022]  <TASK>
> [Tue Mar  1 13:13:19 2022]  blk_release_queue+0x10d/0x110
> [Tue Mar  1 13:13:19 2022]  kobject_cleanup+0x41/0x140
> [Tue Mar  1 13:13:19 2022]  kobject_put+0x53/0x70
> [Tue Mar  1 13:13:19 2022]  blk_put_queue+0x12/0x20
> [Tue Mar  1 13:13:19 2022]  disk_release+0x63/0x80
> [Tue Mar  1 13:13:19 2022]  device_release+0x3b/0xa0
> [Tue Mar  1 13:13:19 2022]  kobject_cleanup+0x41/0x140
> [Tue Mar  1 13:13:19 2022]  kobject_put+0x53/0x70
> [Tue Mar  1 13:13:19 2022]  put_device+0x13/0x20
> [Tue Mar  1 13:13:19 2022]  put_disk+0x1b/0x20
> [Tue Mar  1 13:13:19 2022]  nvme_free_ns+0x28/0x160 [nvme_core]
> [Tue Mar  1 13:13:19 2022]  nvme_ns_remove+0x124/0x1b0 [nvme_core]
> [Tue Mar  1 13:13:19 2022]  nvme_remove_namespaces+0xb0/0xf0 [nvme_core]
> [Tue Mar  1 13:13:19 2022]  nvme_do_delete_ctrl+0x5d/0x7a [nvme_core]
> [Tue Mar  1 13:13:19 2022]  nvme_sysfs_delete.cold+0x8/0xd [nvme_core]
> [Tue Mar  1 13:13:19 2022]  dev_attr_store+0x17/0x30
> [Tue Mar  1 13:13:19 2022]  sysfs_kf_write+0x3e/0x50
> [Tue Mar  1 13:13:19 2022]  kernfs_fop_write_iter+0x137/0x1c0
> [Tue Mar  1 13:13:19 2022]  new_sync_write+0x117/0x1a0
> [Tue Mar  1 13:13:19 2022]  ? get_kcore_size+0xf0/0xf0
> [Tue Mar  1 13:13:19 2022]  vfs_write+0x1f3/0x290
> [Tue Mar  1 13:13:19 2022]  ksys_write+0x67/0xe0
> [Tue Mar  1 13:13:19 2022]  __x64_sys_write+0x19/0x20
> [Tue Mar  1 13:13:19 2022]  do_syscall_64+0x5c/0xc0
> [Tue Mar  1 13:13:19 2022]  ? do_sys_openat2+0x87/0x160
> [Tue Mar  1 13:13:19 2022]  ? exit_to_user_mode_prepare+0x37/0xb0
> [Tue Mar  1 13:13:19 2022]  ? syscall_exit_to_user_mode+0x27/0x50
> [Tue Mar  1 13:13:19 2022]  ? do_syscall_64+0x69/0xc0
> [Tue Mar  1 13:13:19 2022]  ? exit_to_user_mode_prepare+0x37/0xb0
> [Tue Mar  1 13:13:19 2022]  ? irqentry_exit_to_user_mode+0x9/0x20
> [Tue Mar  1 13:13:19 2022]  ? irqentry_exit+0x33/0x40
> [Tue Mar  1 13:13:19 2022]  ? exc_page_fault+0x89/0x180
> [Tue Mar  1 13:13:19 2022]  ? asm_exc_page_fault+0x8/0x30
> [Tue Mar  1 13:13:19 2022]  entry_SYSCALL_64_after_hwframe+0x44/0xae
> [Tue Mar  1 13:13:19 2022] RIP: 0033:0x7f010e577a37
> [Tue Mar  1 13:13:19 2022] Code: 0f 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b7 0f 1f 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 48 89 54 24 18 48 89 74 24
> [Tue Mar  1 13:13:19 2022] RSP: 002b:00007ffdd3c13538 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
> [Tue Mar  1 13:13:19 2022] RAX: ffffffffffffffda RBX: 0000000000000037 RCX: 00007f010e577a37
> [Tue Mar  1 13:13:19 2022] RDX: 0000000000000001 RSI: 0000557cd45d4e8e RDI: 0000000000000003
> [Tue Mar  1 13:13:19 2022] RBP: 0000000000000003 R08: 0000557cd5dcd4a0 R09: 0000557cd5dcd510
> [Tue Mar  1 13:13:19 2022] R10: 0000000000000000 R11: 0000000000000246 R12: 0000557cd5dcd510
> [Tue Mar  1 13:13:19 2022] R13: 0000557cd45ed060 R14: 0000000000000000 R15: 0000000000000003
> [Tue Mar  1 13:13:19 2022]  </TASK>
> [Tue Mar  1 13:13:19 2022] ---[ end trace 0000000000000000 ]---


^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2022-03-04 21:12 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-01-22 16:57 [PATCH v4] nvme-tcp: send H2CData PDUs based on MAXH2CDATA Varun Prakash
2022-01-23  9:16 ` Sagi Grimberg
2022-01-24 21:11   ` Sagi Grimberg
2022-02-20 13:05     ` Sagi Grimberg
2022-02-22 17:18       ` Christoph Hellwig
2022-02-22 17:19         ` Christoph Hellwig
2022-03-01 22:56       ` Keith Busch
2022-03-02  1:50         ` Chaitanya Kulkarni
2022-03-04 21:11         ` Keith Busch

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.