All of lore.kernel.org
 help / color / mirror / Atom feed
From: Ming Lei <ming.lei@redhat.com>
To: ZiyangZhang <ZiyangZhang@linux.alibaba.com>
Cc: axboe@kernel.dk, xiaoguang.wang@linux.alibaba.com,
	linux-block@vger.kernel.org, linux-kernel@vger.kernel.org,
	joseph.qi@linux.alibaba.com
Subject: Re: [PATCH V4 5/8] ublk_drv: consider recovery feature in aborting mechanism
Date: Thu, 22 Sep 2022 08:18:46 +0800	[thread overview]
Message-ID: <Yyup5vt32fULKIJu@T590> (raw)
In-Reply-To: <20220921095849.84988-6-ZiyangZhang@linux.alibaba.com>

On Wed, Sep 21, 2022 at 05:58:46PM +0800, ZiyangZhang wrote:
> With USER_RECOVERY feature enabled, the monitor_work schedules
> quiesce_work after finding a dying ubq_daemon. The monitor_work
> should also abort all rqs issued to userspace before the ubq_daemon is
> dying. The quiesce_work's job is to:
> (1) quiesce request queue.
> (2) check if there is any INFLIGHT rq. If so, we retry until all these
>     rqs are requeued and become IDLE. These rqs should be requeued by
> 	ublk_queue_rq(), task work, io_uring fallback wq or monitor_work.
> (3) complete all ioucmds by calling io_uring_cmd_done(). We are safe to
>     do so because no ioucmd can be referenced now.
> (5) set ub's state to UBLK_S_DEV_QUIESCED, which means we are ready for
>     recovery. This state is exposed to userspace by GET_DEV_INFO.
> 
> The driver can always handle STOP_DEV and cleanup everything no matter
> ub's state is LIVE or QUIESCED. After ub's state is UBLK_S_DEV_QUIESCED,
> user can recover with new process.
> 
> Note: we do not change the default behavior with reocvery feature
> disabled. monitor_work still schedules stop_work and abort inflight
> rqs. And finally ublk_device is released.
> 
> Signed-off-by: ZiyangZhang <ZiyangZhang@linux.alibaba.com>

This version is close to be ready, just some debug logging needs
be removed, see inline comment. Also I'd suggest you to learn to
use bpftrace a bit, then basically you needn't to rely on kernel
logging.

If these logging is removed, you will see how simple the patch becomes
compared with previous version.

> ---
>  drivers/block/ublk_drv.c | 137 +++++++++++++++++++++++++++++++++++----
>  1 file changed, 125 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
> index b940e490ebab..9610afe11463 100644
> --- a/drivers/block/ublk_drv.c
> +++ b/drivers/block/ublk_drv.c
> @@ -120,7 +120,7 @@ struct ublk_queue {
>  
>  	unsigned long io_addr;	/* mapped vm address */
>  	unsigned int max_io_sz;
> -	bool abort_work_pending;
> +	bool force_abort;
>  	unsigned short nr_io_ready;	/* how many ios setup */
>  	struct ublk_device *dev;
>  	struct ublk_io ios[0];
> @@ -162,6 +162,7 @@ struct ublk_device {
>  	 * monitor each queue's daemon periodically
>  	 */
>  	struct delayed_work	monitor_work;
> +	struct work_struct	quiesce_work;
>  	struct work_struct	stop_work;
>  };
>  
> @@ -628,11 +629,17 @@ static void ublk_complete_rq(struct request *req)
>   * Also aborting may not be started yet, keep in mind that one failed
>   * request may be issued by block layer again.
>   */
> -static void __ublk_fail_req(struct ublk_io *io, struct request *req)
> +static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
> +		struct request *req)
>  {
>  	WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
>  
>  	if (!(io->flags & UBLK_IO_FLAG_ABORTED)) {
> +		pr_devel("%s: abort rq: qid %d tag %d io_flags %x\n",
> +				__func__,
> +				req->mq_hctx->queue_num,
> +				req->tag,
> +				io->flags);

No necessary to add the above log.

>  		io->flags |= UBLK_IO_FLAG_ABORTED;
>  		blk_mq_end_request(req, BLK_STS_IOERR);
>  	}
> @@ -676,10 +683,6 @@ static inline void __ublk_rq_task_work(struct request *req)
>  	struct ublk_io *io = &ubq->ios[tag];
>  	unsigned int mapped_bytes;
>  
> -	pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
> -			__func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
> -			ublk_get_iod(ubq, req->tag)->addr);
> -
>  	/*
>  	 * Task is exiting if either:
>  	 *
> @@ -746,6 +749,9 @@ static inline void __ublk_rq_task_work(struct request *req)
>  			mapped_bytes >> 9;
>  	}
>  
> +	pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
> +			__func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
> +			ublk_get_iod(ubq, req->tag)->addr);
>  	ubq_complete_io_cmd(io, UBLK_IO_RES_OK);
>  }
>  
> @@ -790,6 +796,21 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
>  	res = ublk_setup_iod(ubq, rq);
>  	if (unlikely(res != BLK_STS_OK))
>  		return BLK_STS_IOERR;
> +	/* With recovery feature enabled, force_abort is set in
> +	 * ublk_stop_dev() before calling del_gendisk(). We have to
> +	 * abort all requeued and new rqs here to let del_gendisk()
> +	 * move on. Besides, we cannot not call io_uring_cmd_complete_in_task()
> +	 * to avoid UAF on io_uring ctx.
> +	 *
> +	 * Note: force_abort is guaranteed to be seen because it is set
> +	 * before request queue is unqiuesced.
> +	 */
> +	if (unlikely(ubq->force_abort)) {
> +		pr_devel("%s: abort rq: qid %d tag %d io_flags %x\n",
> +				__func__, ubq->q_id, rq->tag,
> +				ubq->ios[rq->tag].flags);

same with above.

> +		return BLK_STS_IOERR;
> +	}
>  
>  	blk_mq_start_request(bd->rq);
>  
> @@ -967,7 +988,7 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
>  			 */
>  			rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
>  			if (rq)
> -				__ublk_fail_req(io, rq);
> +				__ublk_fail_req(ubq, io, rq);
>  		}
>  	}
>  	ublk_put_device(ub);
> @@ -983,7 +1004,10 @@ static void ublk_daemon_monitor_work(struct work_struct *work)
>  		struct ublk_queue *ubq = ublk_get_queue(ub, i);
>  
>  		if (ubq_daemon_is_dying(ubq)) {
> -			schedule_work(&ub->stop_work);
> +			if (ublk_queue_can_use_recovery(ubq))
> +				schedule_work(&ub->quiesce_work);
> +			else
> +				schedule_work(&ub->stop_work);
>  
>  			/* abort queue is for making forward progress */
>  			ublk_abort_queue(ub, ubq);
> @@ -991,12 +1015,13 @@ static void ublk_daemon_monitor_work(struct work_struct *work)
>  	}
>  
>  	/*
> -	 * We can't schedule monitor work after ublk_remove() is started.
> +	 * We can't schedule monitor work after ub's state is not UBLK_S_DEV_LIVE.
> +	 * after ublk_remove() or __ublk_quiesce_dev() is started.
>  	 *
>  	 * No need ub->mutex, monitor work are canceled after state is marked
> -	 * as DEAD, so DEAD state is observed reliably.
> +	 * as not LIVE, so new state is observed reliably.
>  	 */
> -	if (ub->dev_info.state != UBLK_S_DEV_DEAD)
> +	if (ub->dev_info.state == UBLK_S_DEV_LIVE)
>  		schedule_delayed_work(&ub->monitor_work,
>  				UBLK_DAEMON_MONITOR_PERIOD);
>  }
> @@ -1050,12 +1075,97 @@ static void ublk_cancel_dev(struct ublk_device *ub)
>  		ublk_cancel_queue(ublk_get_queue(ub, i));
>  }
>  
> -static void ublk_stop_dev(struct ublk_device *ub)
> +static bool ublk_check_inflight_rq(struct request *rq, void *data)
> +{
> +	bool *idle = data;
> +
> +	if (blk_mq_request_started(rq)) {
> +		pr_devel("%s: rq qid %d tag %d is not IDLE.\n",
> +				__func__, rq->mq_hctx->queue_num,
> +				rq->tag);

Please remove above log, otherwise it may overflow printk buffer.
Also you can observe pending requests info from blk-mq debugfs.

> +		*idle = false;
> +		return false;
> +	}
> +	return true;
> +}
> +
> +static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub)
> +{
> +	bool idle;
> +
> +	WARN_ON_ONCE(!blk_queue_quiesced(ub->ub_disk->queue));
> +	while (true) {
> +		idle = true;
> +		blk_mq_tagset_busy_iter(&ub->tag_set,
> +				ublk_check_inflight_rq, &idle);
> +		if (idle)
> +			break;
> +		pr_devel("%s: not all tags are idle, ub: dev_id %d\n",
> +				__func__, ub->dev_info.dev_id);

The above logging isn't useful, we can conclude easily that
the wait isn't done by checking stack trace or debugfs log.

> +		msleep(UBLK_REQUEUE_DELAY_MS);
> +	}
> +}
> +
> +static void __ublk_quiesce_dev(struct ublk_device *ub)
>  {
> +	pr_devel("%s: quiesce ub: dev_id %d state %s\n",
> +			__func__, ub->dev_info.dev_id,
> +			ub->dev_info.state == UBLK_S_DEV_LIVE ?
> +			"LIVE" : "QUIESCED");
> +	blk_mq_quiesce_queue(ub->ub_disk->queue);
> +	ublk_wait_tagset_rqs_idle(ub);
> +	pr_devel("%s: all tags are idle, ub: dev_id %d\n",
> +			__func__, ub->dev_info.dev_id);

The above logging can be removed too.

> +	ublk_cancel_dev(ub);
> +	ub->dev_info.state = UBLK_S_DEV_QUIESCED;
> +}
> +
> +static void ublk_quiesce_work_fn(struct work_struct *work)
> +{
> +	struct ublk_device *ub =
> +		container_of(work, struct ublk_device, quiesce_work);
> +
>  	mutex_lock(&ub->mutex);
>  	if (ub->dev_info.state != UBLK_S_DEV_LIVE)
>  		goto unlock;
> +	pr_devel("%s: start __ublk_quiesce_dev: dev_id %d\n",
> +			__func__, ub->dev_info.dev_id);

The above logging isn't needed, since you do add one
at the beginning of __ublk_quiesce_dev().


Thanks,
Ming


  reply	other threads:[~2022-09-22  0:19 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-21  9:58 [PATCH V4 0/8] ublk_drv: add USER_RECOVERY support ZiyangZhang
2022-09-21  9:58 ` [PATCH V4 1/8] ublk_drv: check 'current' instead of 'ubq_daemon' ZiyangZhang
2022-09-21  9:58 ` [PATCH V4 2/8] ublk_drv: refactor ublk_cancel_queue() ZiyangZhang
2022-09-21  9:58 ` [PATCH V4 3/8] ublk_drv: define macros for recovery feature and check them ZiyangZhang
2022-09-21  9:58 ` [PATCH V4 4/8] ublk_drv: requeue rqs with recovery feature enabled ZiyangZhang
2022-09-22  0:28   ` Ming Lei
2022-09-22  2:04     ` Ziyang Zhang
2022-09-21  9:58 ` [PATCH V4 5/8] ublk_drv: consider recovery feature in aborting mechanism ZiyangZhang
2022-09-22  0:18   ` Ming Lei [this message]
2022-09-22  2:06     ` Ziyang Zhang
2022-09-21  9:58 ` [PATCH V4 6/8] ublk_drv: support UBLK_F_USER_RECOVERY_REISSUE ZiyangZhang
2022-09-22  2:13   ` Ming Lei
2022-09-21  9:58 ` [PATCH V4 7/8] ublk_drv: allow new process to open ublk chardev with recovery feature enabled ZiyangZhang
2022-09-22  2:38   ` Ming Lei
2022-09-22  3:00     ` Ziyang Zhang
2022-09-21  9:58 ` [PATCH V4 8/8] Documentation: document ublk user recovery feature ZiyangZhang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=Yyup5vt32fULKIJu@T590 \
    --to=ming.lei@redhat.com \
    --cc=ZiyangZhang@linux.alibaba.com \
    --cc=axboe@kernel.dk \
    --cc=joseph.qi@linux.alibaba.com \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=xiaoguang.wang@linux.alibaba.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.