Linux-NVME Archive on lore.kernel.org
 help / color / Atom feed
From: Himanshu Madhani <himanshu.madhani@oracle.com>
To: James Smart <jsmart2021@gmail.com>, linux-nvme@lists.infradead.org
Subject: Re: [PATCH] nvme-fcloop: Fix deallocation of working context
Date: Thu, 26 Mar 2020 14:00:47 -0500
Message-ID: <be54696d-bda7-097a-a69d-119d86309529@oracle.com> (raw)
In-Reply-To: <20200318214112.18317-1-jsmart2021@gmail.com>

On 3/18/2020 4:41 PM, James Smart wrote:
> There's been a longstanding bug of LS completions which freed ls
> op's, particularly the disconnect LS, while executing on a work
> context that is in the memory being free. Not a good thing to do.
> 
> Rework LS handling to make callbacks in the rport context
> rather than the ls_request context.
> 
> Signed-off-by: James Smart <jsmart2021@gmail.com>
> Reviewed-by: Hannes Reinecke <hare@suse.de>
> ---
>   drivers/nvme/target/fcloop.c | 76 ++++++++++++++++++++++++++++++--------------
>   1 file changed, 52 insertions(+), 24 deletions(-)
> 
> diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
> index 1c50af6219f3..9861fcea39f6 100644
> --- a/drivers/nvme/target/fcloop.c
> +++ b/drivers/nvme/target/fcloop.c
> @@ -198,10 +198,13 @@ struct fcloop_lport_priv {
>   };
>   
>   struct fcloop_rport {
> -	struct nvme_fc_remote_port *remoteport;
> -	struct nvmet_fc_target_port *targetport;
> -	struct fcloop_nport *nport;
> -	struct fcloop_lport *lport;
> +	struct nvme_fc_remote_port	*remoteport;
> +	struct nvmet_fc_target_port	*targetport;
> +	struct fcloop_nport		*nport;
> +	struct fcloop_lport		*lport;
> +	spinlock_t			lock;
> +	struct list_head		ls_list;
> +	struct work_struct		ls_work;
>   };
>   
>   struct fcloop_tport {
> @@ -224,11 +227,10 @@ struct fcloop_nport {
>   };
>   
>   struct fcloop_lsreq {
> -	struct fcloop_tport		*tport;
>   	struct nvmefc_ls_req		*lsreq;
> -	struct work_struct		work;
>   	struct nvmefc_tgt_ls_req	tgt_ls_req;
>   	int				status;
> +	struct list_head		ls_list; /* fcloop_rport->ls_list */
>   };
>   
>   struct fcloop_rscn {
> @@ -292,21 +294,32 @@ fcloop_delete_queue(struct nvme_fc_local_port *localport,
>   {
>   }
>   
> -
> -/*
> - * Transmit of LS RSP done (e.g. buffers all set). call back up
> - * initiator "done" flows.
> - */
>   static void
> -fcloop_tgt_lsrqst_done_work(struct work_struct *work)
> +fcloop_rport_lsrqst_work(struct work_struct *work)
>   {
> -	struct fcloop_lsreq *tls_req =
> -		container_of(work, struct fcloop_lsreq, work);
> -	struct fcloop_tport *tport = tls_req->tport;
> -	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
> +	struct fcloop_rport *rport =
> +		container_of(work, struct fcloop_rport, ls_work);
> +	struct fcloop_lsreq *tls_req;
>   
> -	if (!tport || tport->remoteport)
> -		lsreq->done(lsreq, tls_req->status);
> +	spin_lock(&rport->lock);
> +	for (;;) {
> +		tls_req = list_first_entry_or_null(&rport->ls_list,
> +				struct fcloop_lsreq, ls_list);
> +		if (!tls_req)
> +			break;
> +
> +		list_del(&tls_req->ls_list);
> +		spin_unlock(&rport->lock);
> +
> +		tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
> +		/*
> +		 * callee may free memory containing tls_req.
> +		 * do not reference lsreq after this.
> +		 */
> +
> +		spin_lock(&rport->lock);
> +	}
> +	spin_unlock(&rport->lock);
>   }
>   
>   static int
> @@ -319,17 +332,18 @@ fcloop_ls_req(struct nvme_fc_local_port *localport,
>   	int ret = 0;
>   
>   	tls_req->lsreq = lsreq;
> -	INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
> +	INIT_LIST_HEAD(&tls_req->ls_list);
>   
>   	if (!rport->targetport) {
>   		tls_req->status = -ECONNREFUSED;
> -		tls_req->tport = NULL;
> -		schedule_work(&tls_req->work);
> +		spin_lock(&rport->lock);
> +		list_add_tail(&rport->ls_list, &tls_req->ls_list);
> +		spin_unlock(&rport->lock);
> +		schedule_work(&rport->ls_work);
>   		return ret;
>   	}
>   
>   	tls_req->status = 0;
> -	tls_req->tport = rport->targetport->private;
>   	ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
>   				 lsreq->rqstaddr, lsreq->rqstlen);
>   
> @@ -337,18 +351,28 @@ fcloop_ls_req(struct nvme_fc_local_port *localport,
>   }
>   
>   static int
> -fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
> +fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
>   			struct nvmefc_tgt_ls_req *tgt_lsreq)
>   {
>   	struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
>   	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
> +	struct fcloop_tport *tport = targetport->private;
> +	struct nvme_fc_remote_port *remoteport = tport->remoteport;
> +	struct fcloop_rport *rport;
>   
>   	memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
>   		((lsreq->rsplen < tgt_lsreq->rsplen) ?
>   				lsreq->rsplen : tgt_lsreq->rsplen));
> +
>   	tgt_lsreq->done(tgt_lsreq);
>   
> -	schedule_work(&tls_req->work);
> +	if (remoteport) {
> +		rport = remoteport->private;
> +		spin_lock(&rport->lock);
> +		list_add_tail(&rport->ls_list, &tls_req->ls_list);
> +		spin_unlock(&rport->lock);
> +		schedule_work(&rport->ls_work);
> +	}
>   
>   	return 0;
>   }
> @@ -834,6 +858,7 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
>   {
>   	struct fcloop_rport *rport = remoteport->private;
>   
> +	flush_work(&rport->ls_work);
>   	fcloop_nport_put(rport->nport);
>   }
>   
> @@ -1136,6 +1161,9 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
>   	rport->nport = nport;
>   	rport->lport = nport->lport;
>   	nport->rport = rport;
> +	spin_lock_init(&rport->lock);
> +	INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
> +	INIT_LIST_HEAD(&rport->ls_list);
>   
>   	return count;
>   }
> 

Looks Good.

Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>

_______________________________________________
linux-nvme mailing list
linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

  reply index

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-18 21:41 James Smart
2020-03-26 19:00 ` Himanshu Madhani [this message]
2020-03-31 14:17 ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=be54696d-bda7-097a-a69d-119d86309529@oracle.com \
    --to=himanshu.madhani@oracle.com \
    --cc=jsmart2021@gmail.com \
    --cc=linux-nvme@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

Linux-NVME Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/linux-nvme/0 linux-nvme/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 linux-nvme linux-nvme/ https://lore.kernel.org/linux-nvme \
		linux-nvme@lists.infradead.org
	public-inbox-index linux-nvme

Example config snippet for mirrors

Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.infradead.lists.linux-nvme


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git