All of lore.kernel.org
 help / color / mirror / Atom feed
From: Oleksandr Natalenko <oleksandr@natalenko.name>
To: Ming Lei <ming.lei@redhat.com>
Cc: linux-kernel@vger.kernel.org, Jens Axboe <axboe@fb.com>,
	Christoph Hellwig <hch@lst.de>, Sagi Grimberg <sagi@grimberg.me>,
	linux-nvme@lists.infradead.org,
	David Jeffery <djeffery@redhat.com>,
	Laurence Oberman <loberman@redhat.com>,
	Paolo Valente <paolo.valente@linaro.org>, Jan Kara <jack@suse.cz>,
	Sasha Levin <sashal@kernel.org>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	Keith Busch <kbusch@kernel.org>
Subject: Re: New warning in nvme_setup_discard
Date: Wed, 28 Jul 2021 15:44:06 +0200	[thread overview]
Message-ID: <3180854.nXyytZ0Y3r@natalenko.name> (raw)
In-Reply-To: <YQAtL5i0pjlnBpHV@T590>

Hello.

On úterý 27. července 2021 17:58:39 CEST Ming Lei wrote:
> BTW, can you test the following patch? which is another approach on the same
> issue with other benefits.
> 
> From c853e7ed05a75f631da5b7952b9a989983437819 Mon Sep 17 00:00:00 2001
> From: Ming Lei <ming.lei@redhat.com>
> Date: Mon, 7 Jun 2021 16:03:51 +0800
> Subject: [PATCH 2/2] block: support bio merge for multi-range discard
> 
> So far multi-range discard treats each bio as one segment(range) of single
> discard request. This way becomes not efficient if lots of small sized
> discard bios are submitted, and one example is raid456.
> 
> Support bio merge for multi-range discard for improving lots of small
> sized discard bios.
> 
> Turns out it is easy to support it:
> 
> 1) always try to merge bio first
> 
> 2) run into multi-range discard only if bio merge can't be done
> 
> 3) add rq_for_each_discard_range() for retrieving each range(segment)
> of discard request
> 
> Reported-by: Wang Shanker <shankerwangmiao@gmail.com>
> Signed-off-by: Ming Lei <ming.lei@redhat.com>
> ---
>  block/blk-merge.c          | 12 ++++-----
>  drivers/block/virtio_blk.c |  9 ++++---
>  drivers/nvme/host/core.c   |  8 +++---
>  include/linux/blkdev.h     | 51 ++++++++++++++++++++++++++++++++++++++
>  4 files changed, 66 insertions(+), 14 deletions(-)
> 
> diff --git a/block/blk-merge.c b/block/blk-merge.c
> index bcdff1879c34..65210e9a8efa 100644
> --- a/block/blk-merge.c
> +++ b/block/blk-merge.c
> @@ -724,10 +724,10 @@ static inline bool blk_discard_mergable(struct request
> *req) static enum elv_merge blk_try_req_merge(struct request *req,
>  					struct request *next)
>  {
> -	if (blk_discard_mergable(req))
> -		return ELEVATOR_DISCARD_MERGE;
> -	else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
> +	if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
>  		return ELEVATOR_BACK_MERGE;
> +	else if (blk_discard_mergable(req))
> +		return ELEVATOR_DISCARD_MERGE;
> 
>  	return ELEVATOR_NO_MERGE;
>  }
> @@ -908,12 +908,12 @@ bool blk_rq_merge_ok(struct request *rq, struct bio
> *bio)
> 
>  enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
>  {
> -	if (blk_discard_mergable(rq))
> -		return ELEVATOR_DISCARD_MERGE;
> -	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
> +	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
>  		return ELEVATOR_BACK_MERGE;
>  	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
>  		return ELEVATOR_FRONT_MERGE;
> +	else if (blk_discard_mergable(rq))
> +		return ELEVATOR_DISCARD_MERGE;
>  	return ELEVATOR_NO_MERGE;
>  }
> 
> diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
> index b9fa3ef5b57c..970cb0d8acaa 100644
> --- a/drivers/block/virtio_blk.c
> +++ b/drivers/block/virtio_blk.c
> @@ -116,7 +116,6 @@ static int virtblk_setup_discard_write_zeroes(struct
> request *req, bool unmap) unsigned short segments =
> blk_rq_nr_discard_segments(req);
>  	unsigned short n = 0;
>  	struct virtio_blk_discard_write_zeroes *range;
> -	struct bio *bio;
>  	u32 flags = 0;
> 
>  	if (unmap)
> @@ -138,9 +137,11 @@ static int virtblk_setup_discard_write_zeroes(struct
> request *req, bool unmap) range[0].sector = cpu_to_le64(blk_rq_pos(req));
>  		n = 1;
>  	} else {
> -		__rq_for_each_bio(bio, req) {
> -			u64 sector = bio->bi_iter.bi_sector;
> -			u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
> +		struct req_discard_range r;
> +
> +		rq_for_each_discard_range(r, req) {
> +			u64 sector = r.sector;
> +			u32 num_sectors = r.size >> SECTOR_SHIFT;
> 
>  			range[n].flags = cpu_to_le32(flags);
>  			range[n].num_sectors = cpu_to_le32(num_sectors);
> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> index 24bcae88587a..4b0a39360ce9 100644
> --- a/drivers/nvme/host/core.c
> +++ b/drivers/nvme/host/core.c
> @@ -813,7 +813,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns
> *ns, struct request *req, {
>  	unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
>  	struct nvme_dsm_range *range;
> -	struct bio *bio;
> +	struct req_discard_range r;
> 
>  	/*
>  	 * Some devices do not consider the DSM 'Number of Ranges' field when
> @@ -835,9 +835,9 @@ static blk_status_t nvme_setup_discard(struct nvme_ns
> *ns, struct request *req, range = page_address(ns->ctrl->discard_page);
>  	}
> 
> -	__rq_for_each_bio(bio, req) {
> -		u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
> -		u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
> +	rq_for_each_discard_range(r, req) {
> +		u64 slba = nvme_sect_to_lba(ns, r.sector);
> +		u32 nlb = r.size >> ns->lba_shift;
> 
>  		if (n < segments) {
>  			range[n].cattr = cpu_to_le32(0);
> diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
> index d66d0da72529..bd9d22269a7b 100644
> --- a/include/linux/blkdev.h
> +++ b/include/linux/blkdev.h
> @@ -1007,6 +1007,57 @@ static inline unsigned int blk_rq_stats_sectors(const
> struct request *rq) return rq->stats_sectors;
>  }
> 
> +struct req_discard_range {
> +	sector_t	sector;
> +	unsigned int	size;
> +
> +	/*
> +	 * internal field: driver don't use it, and it always points to
> +	 * next bio to be processed
> +	 */
> +	struct bio *__bio;
> +};
> +
> +static inline void req_init_discard_range_iter(const struct request *rq,
> +		struct req_discard_range *range)
> +{
> +	range->__bio = rq->bio;
> +}
> +
> +/* return true if @range stores one valid discard range */
> +static inline bool req_get_discard_range(struct req_discard_range *range)
> +{
> +	struct bio *bio;
> +
> +	if (!range->__bio)
> +		return false;
> +
> +	bio = range->__bio;
> +	range->sector = bio->bi_iter.bi_sector;
> +	range->size = bio->bi_iter.bi_size;
> +	range->__bio = bio->bi_next;
> +
> +	while (range->__bio) {
> +		struct bio *bio = range->__bio;
> +
> +		if (range->sector + (range->size >> SECTOR_SHIFT) !=
> +				bio->bi_iter.bi_sector)
> +			break;
> +
> +		/*
> +		 * ->size won't overflow because req->__data_len is defined
> +		 *  as 'unsigned int'
> +		 */
> +		range->size += bio->bi_iter.bi_size;
> +		range->__bio = bio->bi_next;
> +	}
> +	return true;
> +}
> +
> +#define rq_for_each_discard_range(range, rq) \
> +	for (req_init_discard_range_iter((rq), &range); \
> +			req_get_discard_range(&range);)
> +
>  #ifdef CONFIG_BLK_DEV_ZONED
> 
>  /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */

Do I have to revert the previous one and apply this one? If so, with this one the issue is triggered pretty quick:

```
kernel: ------------[ cut here ]------------
kernel: WARNING: CPU: 20 PID: 490 at drivers/nvme/host/core.c:850 nvme_setup_discard+0x1b9/0x220
…
kernel: CPU: 20 PID: 490 Comm: md0_raid10 Not tainted 5.13.0-pf4 #1
kernel: Hardware name: ASUS System Product Name/Pro WS X570-ACE, BIOS 3601 05/26/2021
kernel: RIP: 0010:nvme_setup_discard+0x1b9/0x220
kernel: Code: 38 4c 8b 88 40 0b 00 00 4c 2b 0d f2 06 d8 00 49 c1 f9 06 49 c1 e1 0c 4c 03 0d f3 06 d8 00 4d 89 c8 48 85 d2 0f 85 9f fe ff ff <0f> 0b b8 00 00 00 80 4c 01 c8 72 52 48 c7 c2 00 00 00 80 48 2b 15
kernel: RSP: 0018:ffffa3a34152ba10 EFLAGS: 00010202
kernel: RAX: ffff8b78d80db0c0 RBX: 000000000000000f RCX: 0000000000000400
kernel: RDX: 0000000000000000 RSI: 00000000241b5c00 RDI: 000000000000000d
kernel: RBP: ffff8b78cbd70380 R08: ffff8b78d80db000 R09: ffff8b78d80db000
kernel: R10: 00000000241b5c00 R11: 0000000000000000 R12: ffff8b78c5a4b800
kernel: R13: ffff8b78cbd704c8 R14: ffff8b78c5bd8000 R15: ffff8b78cabbf000
kernel: FS:  0000000000000000(0000) GS:ffff8b7fcef00000(0000) knlGS:0000000000000000
kernel: CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
kernel: CR2: 00007faaaf746020 CR3: 00000001e0342000 CR4: 0000000000350ee0
kernel: Call Trace:
kernel:  nvme_setup_cmd+0x2d0/0x670
kernel:  nvme_queue_rq+0x79/0xc90
kernel:  ? __sbitmap_get_word+0x30/0x80
kernel:  ? sbitmap_get+0x85/0x180
kernel:  blk_mq_dispatch_rq_list+0x15c/0x810
kernel:  __blk_mq_do_dispatch_sched+0xca/0x320
kernel:  ? ktime_get+0x38/0xa0
kernel:  __blk_mq_sched_dispatch_requests+0x14d/0x190
kernel:  blk_mq_sched_dispatch_requests+0x2f/0x60
kernel:  __blk_mq_run_hw_queue+0x30/0xa0
kernel:  __blk_mq_delay_run_hw_queue+0x142/0x170
kernel:  blk_mq_sched_insert_requests+0x6d/0xf0
kernel:  blk_mq_flush_plug_list+0x111/0x1c0
kernel:  blk_finish_plug+0x21/0x30
kernel:  raid10d+0x7c8/0x1960 [raid10]
kernel:  ? psi_task_switch+0xf2/0x330
kernel:  ? __switch_to_asm+0x42/0x70
kernel:  ? finish_task_switch.isra.0+0xaa/0x290
kernel:  ? md_thread+0xc3/0x190 [md_mod]
kernel:  md_thread+0xc3/0x190 [md_mod]
kernel:  ? finish_wait+0x80/0x80
kernel:  ? md_rdev_init+0xb0/0xb0 [md_mod]
kernel:  kthread+0x1b3/0x1e0
kernel:  ? __kthread_init_worker+0x50/0x50
kernel:  ret_from_fork+0x22/0x30
kernel: ---[ end trace dc148fcea235e799 ]---
kernel: blk_update_request: I/O error, dev nvme0n1, sector 605615104 op 0x3:(DISCARD) flags 0x0 phys_seg 15 prio class 0
kernel: blk_update_request: I/O error, dev nvme1n1, sector 118159360 op 0x3:(DISCARD) flags 0x0 phys_seg 15 prio class 0
kernel: blk_update_request: I/O error, dev nvme0n1, sector 118200320 op 0x3:(DISCARD) flags 0x0 phys_seg 50 prio class 0
kernel: blk_update_request: I/O error, dev nvme1n1, sector 118326272 op 0x3:(DISCARD) flags 0x0 phys_seg 165 prio class 0
```

-- 
Oleksandr Natalenko (post-factum)



WARNING: multiple messages have this Message-ID (diff)
From: Oleksandr Natalenko <oleksandr@natalenko.name>
To: Ming Lei <ming.lei@redhat.com>
Cc: linux-kernel@vger.kernel.org, Jens Axboe <axboe@fb.com>,
	Christoph Hellwig <hch@lst.de>, Sagi Grimberg <sagi@grimberg.me>,
	linux-nvme@lists.infradead.org,
	David Jeffery <djeffery@redhat.com>,
	Laurence Oberman <loberman@redhat.com>,
	Paolo Valente <paolo.valente@linaro.org>, Jan Kara <jack@suse.cz>,
	Sasha Levin <sashal@kernel.org>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	Keith Busch <kbusch@kernel.org>
Subject: Re: New warning in nvme_setup_discard
Date: Wed, 28 Jul 2021 15:44:06 +0200	[thread overview]
Message-ID: <3180854.nXyytZ0Y3r@natalenko.name> (raw)
In-Reply-To: <YQAtL5i0pjlnBpHV@T590>

Hello.

On úterý 27. července 2021 17:58:39 CEST Ming Lei wrote:
> BTW, can you test the following patch? which is another approach on the same
> issue with other benefits.
> 
> From c853e7ed05a75f631da5b7952b9a989983437819 Mon Sep 17 00:00:00 2001
> From: Ming Lei <ming.lei@redhat.com>
> Date: Mon, 7 Jun 2021 16:03:51 +0800
> Subject: [PATCH 2/2] block: support bio merge for multi-range discard
> 
> So far multi-range discard treats each bio as one segment(range) of single
> discard request. This way becomes not efficient if lots of small sized
> discard bios are submitted, and one example is raid456.
> 
> Support bio merge for multi-range discard for improving lots of small
> sized discard bios.
> 
> Turns out it is easy to support it:
> 
> 1) always try to merge bio first
> 
> 2) run into multi-range discard only if bio merge can't be done
> 
> 3) add rq_for_each_discard_range() for retrieving each range(segment)
> of discard request
> 
> Reported-by: Wang Shanker <shankerwangmiao@gmail.com>
> Signed-off-by: Ming Lei <ming.lei@redhat.com>
> ---
>  block/blk-merge.c          | 12 ++++-----
>  drivers/block/virtio_blk.c |  9 ++++---
>  drivers/nvme/host/core.c   |  8 +++---
>  include/linux/blkdev.h     | 51 ++++++++++++++++++++++++++++++++++++++
>  4 files changed, 66 insertions(+), 14 deletions(-)
> 
> diff --git a/block/blk-merge.c b/block/blk-merge.c
> index bcdff1879c34..65210e9a8efa 100644
> --- a/block/blk-merge.c
> +++ b/block/blk-merge.c
> @@ -724,10 +724,10 @@ static inline bool blk_discard_mergable(struct request
> *req) static enum elv_merge blk_try_req_merge(struct request *req,
>  					struct request *next)
>  {
> -	if (blk_discard_mergable(req))
> -		return ELEVATOR_DISCARD_MERGE;
> -	else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
> +	if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
>  		return ELEVATOR_BACK_MERGE;
> +	else if (blk_discard_mergable(req))
> +		return ELEVATOR_DISCARD_MERGE;
> 
>  	return ELEVATOR_NO_MERGE;
>  }
> @@ -908,12 +908,12 @@ bool blk_rq_merge_ok(struct request *rq, struct bio
> *bio)
> 
>  enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
>  {
> -	if (blk_discard_mergable(rq))
> -		return ELEVATOR_DISCARD_MERGE;
> -	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
> +	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
>  		return ELEVATOR_BACK_MERGE;
>  	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
>  		return ELEVATOR_FRONT_MERGE;
> +	else if (blk_discard_mergable(rq))
> +		return ELEVATOR_DISCARD_MERGE;
>  	return ELEVATOR_NO_MERGE;
>  }
> 
> diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
> index b9fa3ef5b57c..970cb0d8acaa 100644
> --- a/drivers/block/virtio_blk.c
> +++ b/drivers/block/virtio_blk.c
> @@ -116,7 +116,6 @@ static int virtblk_setup_discard_write_zeroes(struct
> request *req, bool unmap) unsigned short segments =
> blk_rq_nr_discard_segments(req);
>  	unsigned short n = 0;
>  	struct virtio_blk_discard_write_zeroes *range;
> -	struct bio *bio;
>  	u32 flags = 0;
> 
>  	if (unmap)
> @@ -138,9 +137,11 @@ static int virtblk_setup_discard_write_zeroes(struct
> request *req, bool unmap) range[0].sector = cpu_to_le64(blk_rq_pos(req));
>  		n = 1;
>  	} else {
> -		__rq_for_each_bio(bio, req) {
> -			u64 sector = bio->bi_iter.bi_sector;
> -			u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
> +		struct req_discard_range r;
> +
> +		rq_for_each_discard_range(r, req) {
> +			u64 sector = r.sector;
> +			u32 num_sectors = r.size >> SECTOR_SHIFT;
> 
>  			range[n].flags = cpu_to_le32(flags);
>  			range[n].num_sectors = cpu_to_le32(num_sectors);
> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> index 24bcae88587a..4b0a39360ce9 100644
> --- a/drivers/nvme/host/core.c
> +++ b/drivers/nvme/host/core.c
> @@ -813,7 +813,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns
> *ns, struct request *req, {
>  	unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
>  	struct nvme_dsm_range *range;
> -	struct bio *bio;
> +	struct req_discard_range r;
> 
>  	/*
>  	 * Some devices do not consider the DSM 'Number of Ranges' field when
> @@ -835,9 +835,9 @@ static blk_status_t nvme_setup_discard(struct nvme_ns
> *ns, struct request *req, range = page_address(ns->ctrl->discard_page);
>  	}
> 
> -	__rq_for_each_bio(bio, req) {
> -		u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
> -		u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
> +	rq_for_each_discard_range(r, req) {
> +		u64 slba = nvme_sect_to_lba(ns, r.sector);
> +		u32 nlb = r.size >> ns->lba_shift;
> 
>  		if (n < segments) {
>  			range[n].cattr = cpu_to_le32(0);
> diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
> index d66d0da72529..bd9d22269a7b 100644
> --- a/include/linux/blkdev.h
> +++ b/include/linux/blkdev.h
> @@ -1007,6 +1007,57 @@ static inline unsigned int blk_rq_stats_sectors(const
> struct request *rq) return rq->stats_sectors;
>  }
> 
> +struct req_discard_range {
> +	sector_t	sector;
> +	unsigned int	size;
> +
> +	/*
> +	 * internal field: driver don't use it, and it always points to
> +	 * next bio to be processed
> +	 */
> +	struct bio *__bio;
> +};
> +
> +static inline void req_init_discard_range_iter(const struct request *rq,
> +		struct req_discard_range *range)
> +{
> +	range->__bio = rq->bio;
> +}
> +
> +/* return true if @range stores one valid discard range */
> +static inline bool req_get_discard_range(struct req_discard_range *range)
> +{
> +	struct bio *bio;
> +
> +	if (!range->__bio)
> +		return false;
> +
> +	bio = range->__bio;
> +	range->sector = bio->bi_iter.bi_sector;
> +	range->size = bio->bi_iter.bi_size;
> +	range->__bio = bio->bi_next;
> +
> +	while (range->__bio) {
> +		struct bio *bio = range->__bio;
> +
> +		if (range->sector + (range->size >> SECTOR_SHIFT) !=
> +				bio->bi_iter.bi_sector)
> +			break;
> +
> +		/*
> +		 * ->size won't overflow because req->__data_len is defined
> +		 *  as 'unsigned int'
> +		 */
> +		range->size += bio->bi_iter.bi_size;
> +		range->__bio = bio->bi_next;
> +	}
> +	return true;
> +}
> +
> +#define rq_for_each_discard_range(range, rq) \
> +	for (req_init_discard_range_iter((rq), &range); \
> +			req_get_discard_range(&range);)
> +
>  #ifdef CONFIG_BLK_DEV_ZONED
> 
>  /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */

Do I have to revert the previous one and apply this one? If so, with this one the issue is triggered pretty quick:

```
kernel: ------------[ cut here ]------------
kernel: WARNING: CPU: 20 PID: 490 at drivers/nvme/host/core.c:850 nvme_setup_discard+0x1b9/0x220
…
kernel: CPU: 20 PID: 490 Comm: md0_raid10 Not tainted 5.13.0-pf4 #1
kernel: Hardware name: ASUS System Product Name/Pro WS X570-ACE, BIOS 3601 05/26/2021
kernel: RIP: 0010:nvme_setup_discard+0x1b9/0x220
kernel: Code: 38 4c 8b 88 40 0b 00 00 4c 2b 0d f2 06 d8 00 49 c1 f9 06 49 c1 e1 0c 4c 03 0d f3 06 d8 00 4d 89 c8 48 85 d2 0f 85 9f fe ff ff <0f> 0b b8 00 00 00 80 4c 01 c8 72 52 48 c7 c2 00 00 00 80 48 2b 15
kernel: RSP: 0018:ffffa3a34152ba10 EFLAGS: 00010202
kernel: RAX: ffff8b78d80db0c0 RBX: 000000000000000f RCX: 0000000000000400
kernel: RDX: 0000000000000000 RSI: 00000000241b5c00 RDI: 000000000000000d
kernel: RBP: ffff8b78cbd70380 R08: ffff8b78d80db000 R09: ffff8b78d80db000
kernel: R10: 00000000241b5c00 R11: 0000000000000000 R12: ffff8b78c5a4b800
kernel: R13: ffff8b78cbd704c8 R14: ffff8b78c5bd8000 R15: ffff8b78cabbf000
kernel: FS:  0000000000000000(0000) GS:ffff8b7fcef00000(0000) knlGS:0000000000000000
kernel: CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
kernel: CR2: 00007faaaf746020 CR3: 00000001e0342000 CR4: 0000000000350ee0
kernel: Call Trace:
kernel:  nvme_setup_cmd+0x2d0/0x670
kernel:  nvme_queue_rq+0x79/0xc90
kernel:  ? __sbitmap_get_word+0x30/0x80
kernel:  ? sbitmap_get+0x85/0x180
kernel:  blk_mq_dispatch_rq_list+0x15c/0x810
kernel:  __blk_mq_do_dispatch_sched+0xca/0x320
kernel:  ? ktime_get+0x38/0xa0
kernel:  __blk_mq_sched_dispatch_requests+0x14d/0x190
kernel:  blk_mq_sched_dispatch_requests+0x2f/0x60
kernel:  __blk_mq_run_hw_queue+0x30/0xa0
kernel:  __blk_mq_delay_run_hw_queue+0x142/0x170
kernel:  blk_mq_sched_insert_requests+0x6d/0xf0
kernel:  blk_mq_flush_plug_list+0x111/0x1c0
kernel:  blk_finish_plug+0x21/0x30
kernel:  raid10d+0x7c8/0x1960 [raid10]
kernel:  ? psi_task_switch+0xf2/0x330
kernel:  ? __switch_to_asm+0x42/0x70
kernel:  ? finish_task_switch.isra.0+0xaa/0x290
kernel:  ? md_thread+0xc3/0x190 [md_mod]
kernel:  md_thread+0xc3/0x190 [md_mod]
kernel:  ? finish_wait+0x80/0x80
kernel:  ? md_rdev_init+0xb0/0xb0 [md_mod]
kernel:  kthread+0x1b3/0x1e0
kernel:  ? __kthread_init_worker+0x50/0x50
kernel:  ret_from_fork+0x22/0x30
kernel: ---[ end trace dc148fcea235e799 ]---
kernel: blk_update_request: I/O error, dev nvme0n1, sector 605615104 op 0x3:(DISCARD) flags 0x0 phys_seg 15 prio class 0
kernel: blk_update_request: I/O error, dev nvme1n1, sector 118159360 op 0x3:(DISCARD) flags 0x0 phys_seg 15 prio class 0
kernel: blk_update_request: I/O error, dev nvme0n1, sector 118200320 op 0x3:(DISCARD) flags 0x0 phys_seg 50 prio class 0
kernel: blk_update_request: I/O error, dev nvme1n1, sector 118326272 op 0x3:(DISCARD) flags 0x0 phys_seg 165 prio class 0
```

-- 
Oleksandr Natalenko (post-factum)



_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

  reply	other threads:[~2021-07-28 13:44 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-15 13:56 New warning in nvme_setup_discard Oleksandr Natalenko
2021-07-15 13:56 ` Oleksandr Natalenko
2021-07-15 14:19 ` Greg Kroah-Hartman
2021-07-15 14:19   ` Greg Kroah-Hartman
2021-07-15 14:21   ` Oleksandr Natalenko
2021-07-15 14:21     ` Oleksandr Natalenko
2021-07-15 21:37   ` Laurence Oberman
2021-07-15 21:37     ` Laurence Oberman
2021-07-16  5:50     ` Oleksandr Natalenko
2021-07-16  5:50       ` Oleksandr Natalenko
2021-07-16  2:16 ` Ming Lei
2021-07-16  2:16   ` Ming Lei
2021-07-16  5:53   ` Oleksandr Natalenko
2021-07-16  5:53     ` Oleksandr Natalenko
2021-07-16  9:33     ` Ming Lei
2021-07-16  9:33       ` Ming Lei
2021-07-16 10:03       ` Oleksandr Natalenko
2021-07-16 10:03         ` Oleksandr Natalenko
2021-07-16 10:41         ` Ming Lei
2021-07-16 10:41           ` Ming Lei
2021-07-16 12:56           ` Oleksandr Natalenko
2021-07-16 12:56             ` Oleksandr Natalenko
2021-07-17  9:35             ` Ming Lei
2021-07-17  9:35               ` Ming Lei
2021-07-17 12:11               ` Oleksandr Natalenko
2021-07-17 12:11                 ` Oleksandr Natalenko
2021-07-17 12:19                 ` Oleksandr Natalenko
2021-07-17 12:19                   ` Oleksandr Natalenko
2021-07-17 12:35                   ` Oleksandr Natalenko
2021-07-17 12:35                     ` Oleksandr Natalenko
2021-07-19  1:40                     ` Ming Lei
2021-07-19  1:40                       ` Ming Lei
2021-07-19  6:27                       ` Oleksandr Natalenko
2021-07-19  6:27                         ` Oleksandr Natalenko
2021-07-20  9:05                         ` Oleksandr Natalenko
2021-07-20  9:05                           ` Oleksandr Natalenko
2021-07-21  8:00                           ` Ming Lei
2021-07-21  8:00                             ` Ming Lei
2021-07-27 15:12                             ` Oleksandr Natalenko
2021-07-27 15:12                               ` Oleksandr Natalenko
2021-07-27 15:58                               ` Ming Lei
2021-07-27 15:58                                 ` Ming Lei
2021-07-28 13:44                                 ` Oleksandr Natalenko [this message]
2021-07-28 13:44                                   ` Oleksandr Natalenko
2021-07-28 15:53                                   ` Ming Lei
2021-07-28 15:53                                     ` Ming Lei
2021-07-28 16:38                                     ` Oleksandr Natalenko
2021-07-28 16:38                                       ` Oleksandr Natalenko
2021-07-29  3:33                                       ` Ming Lei
2021-07-29  3:33                                         ` Ming Lei
2021-07-29  9:29                                         ` Ming Lei
2021-07-29  9:29                                           ` Ming Lei

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=3180854.nXyytZ0Y3r@natalenko.name \
    --to=oleksandr@natalenko.name \
    --cc=axboe@fb.com \
    --cc=djeffery@redhat.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=hch@lst.de \
    --cc=jack@suse.cz \
    --cc=kbusch@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=loberman@redhat.com \
    --cc=ming.lei@redhat.com \
    --cc=paolo.valente@linaro.org \
    --cc=sagi@grimberg.me \
    --cc=sashal@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.