All of lore.kernel.org
 help / color / mirror / Atom feed
From: Sagi Grimberg <sagi@grimberg.me>
To: elad.grupi@dell.com, linux-nvme@lists.infradead.org
Subject: Re: [PATCH] nvme-tcp: proper handling of tcp socket closing flows
Date: Thu, 28 Jan 2021 14:03:15 -0800	[thread overview]
Message-ID: <2c2a3aa6-ca43-e9b0-7928-28c6962ea1bc@grimberg.me> (raw)
In-Reply-To: <20210128152758.114112-1-elad.grupi@dell.com>


> ---
>   drivers/nvme/target/tcp.c | 24 ++++++++++++++++++++----
>   1 file changed, 20 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
> index d535080b781f..dac737bac874 100644
> --- a/drivers/nvme/target/tcp.c
> +++ b/drivers/nvme/target/tcp.c
> @@ -647,7 +647,7 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
>   	struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
>   	int ret = 0;
>   
> -	if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
> +	if (!cmd) {
>   		cmd = nvmet_tcp_fetch_cmd(queue);
>   		if (unlikely(!cmd))
>   			return 0;
> @@ -1453,9 +1453,27 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
>   	sock->sk->sk_state_change = nvmet_tcp_state_change;
>   	queue->write_space = sock->sk->sk_write_space;
>   	sock->sk->sk_write_space = nvmet_tcp_write_space;
> +
> +	switch (sk->sk_state) {
> +	case TCP_FIN_WAIT1:
> +	case TCP_CLOSE_WAIT:
> +	case TCP_CLOSE:
> +		/* FALLTHRU */
> +		sock->sk->sk_data_ready =  queue->data_ready;
> +		sock->sk->sk_state_change = queue->state_change;
> +		sock->sk->sk_write_space = queue->write_space;
> +		sk->sk_user_data = NULL;
> +		queue->state = NVMET_TCP_Q_DISCONNECTING;
> +		ret = -ENOTCONN;
> +		break;
> +	default:
> +		queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
> +		ret = 0;
> +	}
> +
>   	write_unlock_bh(&sock->sk->sk_callback_lock);
>   
> -	return 0;
> +	return ret;
>   }
>   
>   static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
> @@ -1506,8 +1524,6 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
>   	if (ret)
>   		goto out_destroy_sq;
>   
> -	queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
> -
>   	return 0;
>   out_destroy_sq:
>   	mutex_lock(&nvmet_tcp_queue_mutex);
> 


What about this instead?
--
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index c41902f7ce39..6388d18ca7c2 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1494,16 +1494,28 @@ static int nvmet_tcp_set_queue_sock(struct 
nvmet_tcp_queue *queue)
                 ip_sock_set_tos(sock->sk, inet->rcv_tos);

         write_lock_bh(&sock->sk->sk_callback_lock);
-       sock->sk->sk_user_data = queue;
-       queue->data_ready = sock->sk->sk_data_ready;
-       sock->sk->sk_data_ready = nvmet_tcp_data_ready;
-       queue->state_change = sock->sk->sk_state_change;
-       sock->sk->sk_state_change = nvmet_tcp_state_change;
-       queue->write_space = sock->sk->sk_write_space;
-       sock->sk->sk_write_space = nvmet_tcp_write_space;
+       switch (sk->sk_state) {
+       case TCP_FIN_WAIT1:
+       case TCP_CLOSE_WAIT:
+       case TCP_CLOSE:
+               /*
+                * If the socket is already closing, don't even start
+                * consuming it
+                */
+               ret = -ENOTCONN;
+               break;
+       default:
+               sock->sk->sk_user_data = queue;
+               queue->data_ready = sock->sk->sk_data_ready;
+               sock->sk->sk_data_ready = nvmet_tcp_data_ready;
+               queue->state_change = sock->sk->sk_state_change;
+               sock->sk->sk_state_change = nvmet_tcp_state_change;
+               queue->write_space = sock->sk->sk_write_space;
+               sock->sk->sk_write_space = nvmet_tcp_write_space;
+       }
         write_unlock_bh(&sock->sk->sk_callback_lock);

-       return 0;
+       return ret;
  }
--

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

  reply	other threads:[~2021-01-28 22:03 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-28 15:27 [PATCH] nvme-tcp: proper handling of tcp socket closing flows elad.grupi
2021-01-28 22:03 ` Sagi Grimberg [this message]
2021-01-28 23:07   ` Grupi, Elad
2021-01-28 23:33     ` Sagi Grimberg
2021-01-28 23:43       ` Grupi, Elad
2021-01-28 23:54         ` Sagi Grimberg
2021-01-29  0:01           ` Grupi, Elad
2021-01-29  0:07             ` Sagi Grimberg
2021-01-31 15:47       ` Grupi, Elad
2021-01-29 22:10 ` Sagi Grimberg
2021-01-31 15:47   ` Grupi, Elad
  -- strict thread matches above, loose matches on Subject: below --
2021-01-26 15:37 elad.grupi
2021-01-28  7:54 ` Sagi Grimberg
2021-01-28 15:27   ` Grupi, Elad

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=2c2a3aa6-ca43-e9b0-7928-28c6962ea1bc@grimberg.me \
    --to=sagi@grimberg.me \
    --cc=elad.grupi@dell.com \
    --cc=linux-nvme@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.