All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] nvme-tcp: proper handling of tcp socket closing flows
@ 2021-01-26 15:37 elad.grupi
  2021-01-28  7:54 ` Sagi Grimberg
  0 siblings, 1 reply; 14+ messages in thread
From: elad.grupi @ 2021-01-26 15:37 UTC (permalink / raw)
  To: sagi, linux-nvme; +Cc: Elad Grupi

From: Elad Grupi <elad.grupi@dell.com>

avoid calling nvmet_tcp_release_queue_work if tcp socket was closed
before setting the sk callbacks.

prevent io_work from enqueuing while closing the tcp queue to
avoid race with nvmet_tcp_release_queue_work.

Signed-off-by: Elad Grupi <elad.grupi@dell.com>
---
 drivers/nvme/target/tcp.c | 26 +++++++++++++++++++++-----
 1 file changed, 21 insertions(+), 5 deletions(-)

diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index d535080b781f..937f2a746d8b 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -647,7 +647,7 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
 	struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
 	int ret = 0;
 
-	if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
+	if (!cmd) {
 		cmd = nvmet_tcp_fetch_cmd(queue);
 		if (unlikely(!cmd))
 			return 0;
@@ -1196,7 +1196,7 @@ static void nvmet_tcp_io_work(struct work_struct *w)
 	/*
 	 * We exahusted our budget, requeue our selves
 	 */
-	if (pending)
+	if (pending && queue->state != NVMET_TCP_Q_DISCONNECTING)
 		queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
 }
 
@@ -1453,9 +1453,27 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
 	sock->sk->sk_state_change = nvmet_tcp_state_change;
 	queue->write_space = sock->sk->sk_write_space;
 	sock->sk->sk_write_space = nvmet_tcp_write_space;
+
+	switch (sk->sk_state) {
+	case TCP_FIN_WAIT1:
+	case TCP_CLOSE_WAIT:
+	case TCP_CLOSE:
+		/* FALLTHRU */
+		sock->sk->sk_data_ready =  queue->data_ready;
+		sock->sk->sk_state_change = queue->state_change;
+		sock->sk->sk_write_space = queue->write_space;
+		sk->sk_user_data = NULL;
+		queue->state = NVMET_TCP_Q_DISCONNECTING;
+		ret = -ENOTCONN;
+		break;
+	default:
+		queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+		ret = 0;
+	}
+
 	write_unlock_bh(&sock->sk->sk_callback_lock);
 
-	return 0;
+	return ret;
 }
 
 static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
@@ -1506,8 +1524,6 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
 	if (ret)
 		goto out_destroy_sq;
 
-	queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
-
 	return 0;
 out_destroy_sq:
 	mutex_lock(&nvmet_tcp_queue_mutex);
-- 
2.16.5


_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply related	[flat|nested] 14+ messages in thread
* [PATCH] nvme-tcp: proper handling of tcp socket closing flows
@ 2021-01-28 15:27 elad.grupi
  2021-01-28 22:03 ` Sagi Grimberg
  2021-01-29 22:10 ` Sagi Grimberg
  0 siblings, 2 replies; 14+ messages in thread
From: elad.grupi @ 2021-01-28 15:27 UTC (permalink / raw)
  To: sagi, linux-nvme; +Cc: Elad Grupi

From: Elad Grupi <elad.grupi@dell.com>

avoid calling nvmet_tcp_release_queue_work if tcp socket was closed
before setting the sk callbacks.

Signed-off-by: Elad Grupi <elad.grupi@dell.com>
---
 drivers/nvme/target/tcp.c | 24 ++++++++++++++++++++----
 1 file changed, 20 insertions(+), 4 deletions(-)

diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index d535080b781f..dac737bac874 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -647,7 +647,7 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
 	struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
 	int ret = 0;
 
-	if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
+	if (!cmd) {
 		cmd = nvmet_tcp_fetch_cmd(queue);
 		if (unlikely(!cmd))
 			return 0;
@@ -1453,9 +1453,27 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
 	sock->sk->sk_state_change = nvmet_tcp_state_change;
 	queue->write_space = sock->sk->sk_write_space;
 	sock->sk->sk_write_space = nvmet_tcp_write_space;
+
+	switch (sk->sk_state) {
+	case TCP_FIN_WAIT1:
+	case TCP_CLOSE_WAIT:
+	case TCP_CLOSE:
+		/* FALLTHRU */
+		sock->sk->sk_data_ready =  queue->data_ready;
+		sock->sk->sk_state_change = queue->state_change;
+		sock->sk->sk_write_space = queue->write_space;
+		sk->sk_user_data = NULL;
+		queue->state = NVMET_TCP_Q_DISCONNECTING;
+		ret = -ENOTCONN;
+		break;
+	default:
+		queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+		ret = 0;
+	}
+
 	write_unlock_bh(&sock->sk->sk_callback_lock);
 
-	return 0;
+	return ret;
 }
 
 static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
@@ -1506,8 +1524,6 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
 	if (ret)
 		goto out_destroy_sq;
 
-	queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
-
 	return 0;
 out_destroy_sq:
 	mutex_lock(&nvmet_tcp_queue_mutex);
-- 
2.16.5


_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply related	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2021-01-31 15:47 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-01-26 15:37 [PATCH] nvme-tcp: proper handling of tcp socket closing flows elad.grupi
2021-01-28  7:54 ` Sagi Grimberg
2021-01-28 15:27   ` Grupi, Elad
2021-01-28 15:27 elad.grupi
2021-01-28 22:03 ` Sagi Grimberg
2021-01-28 23:07   ` Grupi, Elad
2021-01-28 23:33     ` Sagi Grimberg
2021-01-28 23:43       ` Grupi, Elad
2021-01-28 23:54         ` Sagi Grimberg
2021-01-29  0:01           ` Grupi, Elad
2021-01-29  0:07             ` Sagi Grimberg
2021-01-31 15:47       ` Grupi, Elad
2021-01-29 22:10 ` Sagi Grimberg
2021-01-31 15:47   ` Grupi, Elad

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.