All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH V3 1/1] md/raid0: Add mddev->io_acct_cnt for raid0_quiesce
@ 2023-02-01 12:46 Xiao Ni
  2023-02-01 18:00 ` Song Liu
  0 siblings, 1 reply; 6+ messages in thread
From: Xiao Ni @ 2023-02-01 12:46 UTC (permalink / raw)
  To: song; +Cc: linux-raid, ming.lei, ncroxon, heinzm

It has added io_acct_set for raid0/raid5 io accounting and it needs to
alloc md_io_acct in the i/o path. They are free when the bios come back
from member disks. Now we don't have a method to monitor if those bios
are all come back. In the takeover process, it needs to free the raid0
memory resource including the memory pool for md_io_acct. But maybe some
bios are still not returned. When those bios are returned, it can cause
panic bcause of introducing NULL pointer or invalid address. Something
like this:

[ 6973.767999] RIP: 0010:mempool_free+0x52/0x80
[ 6973.786098] Call Trace:
[ 6973.786549]  md_end_io_acct+0x31/0x40
[ 6973.787227]  blk_update_request+0x224/0x380
[ 6973.787994]  blk_mq_end_request+0x1a/0x130
[ 6973.788739]  blk_complete_reqs+0x35/0x50
[ 6973.789456]  __do_softirq+0xd7/0x2c8
[ 6973.790114]  ? sort_range+0x20/0x20
[ 6973.790763]  run_ksoftirqd+0x2a/0x40
[ 6973.791400]  smpboot_thread_fn+0xb5/0x150
[ 6973.792114]  kthread+0x10b/0x130
[ 6973.792724]  ? set_kthread_struct+0x50/0x50
[ 6973.793491]  ret_from_fork+0x1f/0x40

This patch adds io_acct_cnt. So when stopping raid0, it can use this
to wait until all bios come back. And I did a simple performance test
with fio:

-direct=1 -ioengine=libaio -iodepth=128 -bs=64K -rw=write -numjobs=1
With the patch set: 2676MB/s, without the patch set: 2670MB/s
-direct=1 -ioengine=libaio -iodepth=128 -bs=64K -rw=read -numjobs=1
With the patch set: 4676MB/s, without the patch set: 4654MB/s

Reported-by: Fine Fan <ffan@redhat.com>
Reported-by: kernel test robot <oliver.sang@intel.com>
Signed-off-by: Xiao Ni <xni@redhat.com>
---
v2: Fixes a bug. It needs to check if io_acct is dead state when
resurrecting
v3: add calltraces in the commit log
 drivers/md/md.c    | 23 ++++++++++++++++++++++-
 drivers/md/md.h    |  9 ++++++---
 drivers/md/raid0.c |  7 +++++++
 3 files changed, 35 insertions(+), 4 deletions(-)

diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0eb31bef1f01..66c3639bdbfd 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -683,6 +683,7 @@ void mddev_init(struct mddev *mddev)
 	atomic_set(&mddev->flush_pending, 0);
 	init_waitqueue_head(&mddev->sb_wait);
 	init_waitqueue_head(&mddev->recovery_wait);
+	init_waitqueue_head(&mddev->wait_io_acct);
 	mddev->reshape_position = MaxSector;
 	mddev->reshape_backwards = 0;
 	mddev->last_sync_action = "none";
@@ -8604,13 +8605,28 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
 }
 EXPORT_SYMBOL_GPL(md_submit_discard_bio);
 
+static void io_acct_release(struct percpu_ref *ref)
+{
+	struct mddev *mddev = container_of(ref, struct mddev, io_acct_cnt);
+
+	wake_up(&mddev->wait_io_acct);
+}
+
 int acct_bioset_init(struct mddev *mddev)
 {
 	int err = 0;
 
-	if (!bioset_initialized(&mddev->io_acct_set))
+	if (!bioset_initialized(&mddev->io_acct_set)) {
+		err = percpu_ref_init(&mddev->io_acct_cnt, io_acct_release,
+			PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
+		if (err)
+			return err;
+
 		err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE,
 			offsetof(struct md_io_acct, bio_clone), 0);
+		if (err)
+			percpu_ref_exit(&mddev->io_acct_cnt);
+	}
 	return err;
 }
 EXPORT_SYMBOL_GPL(acct_bioset_init);
@@ -8618,6 +8634,7 @@ EXPORT_SYMBOL_GPL(acct_bioset_init);
 void acct_bioset_exit(struct mddev *mddev)
 {
 	bioset_exit(&mddev->io_acct_set);
+	percpu_ref_exit(&mddev->io_acct_cnt);
 }
 EXPORT_SYMBOL_GPL(acct_bioset_exit);
 
@@ -8625,9 +8642,11 @@ static void md_end_io_acct(struct bio *bio)
 {
 	struct md_io_acct *md_io_acct = bio->bi_private;
 	struct bio *orig_bio = md_io_acct->orig_bio;
+	struct mddev *mddev = md_io_acct->mddev;
 
 	orig_bio->bi_status = bio->bi_status;
 
+	percpu_ref_put(&mddev->io_acct_cnt);
 	bio_end_io_acct(orig_bio, md_io_acct->start_time);
 	bio_put(bio);
 	bio_endio(orig_bio);
@@ -8650,6 +8669,8 @@ void md_account_bio(struct mddev *mddev, struct bio **bio)
 	md_io_acct = container_of(clone, struct md_io_acct, bio_clone);
 	md_io_acct->orig_bio = *bio;
 	md_io_acct->start_time = bio_start_io_acct(*bio);
+	md_io_acct->mddev = mddev;
+	percpu_ref_get(&mddev->io_acct_cnt);
 
 	clone->bi_end_io = md_end_io_acct;
 	clone->bi_private = md_io_acct;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 6335cb86e52e..c0e869bdde42 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -513,6 +513,8 @@ struct mddev {
 						   * metadata and bitmap writes
 						   */
 	struct bio_set			io_acct_set; /* for raid0 and raid5 io accounting */
+	struct percpu_ref		io_acct_cnt;
+	wait_queue_head_t		wait_io_acct;
 
 	/* Generic flush handling.
 	 * The last to finish preflush schedules a worker to submit
@@ -710,9 +712,10 @@ struct md_thread {
 };
 
 struct md_io_acct {
-	struct bio *orig_bio;
-	unsigned long start_time;
-	struct bio bio_clone;
+	struct mddev	*mddev;
+	struct bio	*orig_bio;
+	unsigned long	start_time;
+	struct bio	bio_clone;
 };
 
 #define THREAD_WAKEUP  0
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index b536befd8898..d8e9ed139bc0 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -753,6 +753,13 @@ static void *raid0_takeover(struct mddev *mddev)
 
 static void raid0_quiesce(struct mddev *mddev, int quiesce)
 {
+	if (quiesce) {
+		percpu_ref_kill(&mddev->io_acct_cnt);
+		wait_event(mddev->wait_io_acct,
+			percpu_ref_is_zero(&mddev->io_acct_cnt));
+	} else
+		if (percpu_ref_is_dying(&mddev->io_acct_cnt))
+			percpu_ref_resurrect(&mddev->io_acct_cnt);
 }
 
 static struct md_personality raid0_personality=
-- 
2.32.0 (Apple Git-132)


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH V3 1/1] md/raid0: Add mddev->io_acct_cnt for raid0_quiesce
  2023-02-01 12:46 [PATCH V3 1/1] md/raid0: Add mddev->io_acct_cnt for raid0_quiesce Xiao Ni
@ 2023-02-01 18:00 ` Song Liu
  2023-02-02  0:23   ` Xiao Ni
  0 siblings, 1 reply; 6+ messages in thread
From: Song Liu @ 2023-02-01 18:00 UTC (permalink / raw)
  To: Xiao Ni; +Cc: linux-raid, ming.lei, ncroxon, heinzm

On Wed, Feb 1, 2023 at 4:46 AM Xiao Ni <xni@redhat.com> wrote:
>
> It has added io_acct_set for raid0/raid5 io accounting and it needs to
> alloc md_io_acct in the i/o path. They are free when the bios come back
> from member disks. Now we don't have a method to monitor if those bios
> are all come back. In the takeover process, it needs to free the raid0
> memory resource including the memory pool for md_io_acct. But maybe some
> bios are still not returned. When those bios are returned, it can cause
> panic bcause of introducing NULL pointer or invalid address. Something
> like this:

Can we use mddev->active_io for this? If not, please explain the reason
in the comments (in the code).

[...]

> +       } else

Please add { } for the else clause.

Thanks,
Song

> +               if (percpu_ref_is_dying(&mddev->io_acct_cnt))
> +                       percpu_ref_resurrect(&mddev->io_acct_cnt);
>  }
>
>  static struct md_personality raid0_personality=
> --
> 2.32.0 (Apple Git-132)
>

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH V3 1/1] md/raid0: Add mddev->io_acct_cnt for raid0_quiesce
  2023-02-01 18:00 ` Song Liu
@ 2023-02-02  0:23   ` Xiao Ni
  2023-02-02  0:40     ` Xiao Ni
  0 siblings, 1 reply; 6+ messages in thread
From: Xiao Ni @ 2023-02-02  0:23 UTC (permalink / raw)
  To: Song Liu; +Cc: linux-raid, ming.lei, ncroxon, heinzm

On Thu, Feb 2, 2023 at 2:00 AM Song Liu <song@kernel.org> wrote:
>
> On Wed, Feb 1, 2023 at 4:46 AM Xiao Ni <xni@redhat.com> wrote:
> >
> > It has added io_acct_set for raid0/raid5 io accounting and it needs to
> > alloc md_io_acct in the i/o path. They are free when the bios come back
> > from member disks. Now we don't have a method to monitor if those bios
> > are all come back. In the takeover process, it needs to free the raid0
> > memory resource including the memory pool for md_io_acct. But maybe some
> > bios are still not returned. When those bios are returned, it can cause
> > panic bcause of introducing NULL pointer or invalid address. Something
> > like this:
>
> Can we use mddev->active_io for this? If not, please explain the reason
> in the comments (in the code).

Hi Song

At first, we thought this way. Now ->acitve_io is used to wait all
submit processes to exit.
If we use ->active_io to count acct_bio, it means we change the usage
of ->active_io.
In mddev_suspend, first it waits for all submit processes to finish,
then it calls ->quiesce
to wait all inflight io to come back. For raid0, it's ok to use
->acitve_io to count acct_bio.
But for raid5, not sure if it's ok. What's your opinion?

>
> [...]
>
> > +       } else
>
> Please add { } for the else clause.

ok

Regards
Xiao
>
> Thanks,
> Song
>
> > +               if (percpu_ref_is_dying(&mddev->io_acct_cnt))
> > +                       percpu_ref_resurrect(&mddev->io_acct_cnt);
> >  }
> >
> >  static struct md_personality raid0_personality=
> > --
> > 2.32.0 (Apple Git-132)
> >
>


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH V3 1/1] md/raid0: Add mddev->io_acct_cnt for raid0_quiesce
  2023-02-02  0:23   ` Xiao Ni
@ 2023-02-02  0:40     ` Xiao Ni
  2023-02-02  6:57       ` Song Liu
  0 siblings, 1 reply; 6+ messages in thread
From: Xiao Ni @ 2023-02-02  0:40 UTC (permalink / raw)
  To: Song Liu; +Cc: linux-raid, ming.lei, ncroxon, heinzm

On Thu, Feb 2, 2023 at 8:23 AM Xiao Ni <xni@redhat.com> wrote:
>
> On Thu, Feb 2, 2023 at 2:00 AM Song Liu <song@kernel.org> wrote:
> >
> > On Wed, Feb 1, 2023 at 4:46 AM Xiao Ni <xni@redhat.com> wrote:
> > >
> > > It has added io_acct_set for raid0/raid5 io accounting and it needs to
> > > alloc md_io_acct in the i/o path. They are free when the bios come back
> > > from member disks. Now we don't have a method to monitor if those bios
> > > are all come back. In the takeover process, it needs to free the raid0
> > > memory resource including the memory pool for md_io_acct. But maybe some
> > > bios are still not returned. When those bios are returned, it can cause
> > > panic bcause of introducing NULL pointer or invalid address. Something
> > > like this:
> >
> > Can we use mddev->active_io for this? If not, please explain the reason
> > in the comments (in the code).
>
> Hi Song
>
> At first, we thought this way. Now ->acitve_io is used to wait all
> submit processes to exit.
> If we use ->active_io to count acct_bio, it means we change the usage
> of ->active_io.
> In mddev_suspend, first it waits for all submit processes to finish,
> then it calls ->quiesce
> to wait all inflight io to come back. For raid0, it's ok to use
> ->acitve_io to count acct_bio.
> But for raid5, not sure if it's ok. What's your opinion?

Hi Song

I've sent V4. If you think ->active_io is a better way to count acct_io,
I'll re-write the patch to use ->active_io

Regards
Xiao
>
> >
> > [...]
> >
> > > +       } else
> >
> > Please add { } for the else clause.
>
> ok
>
> Regards
> Xiao
> >
> > Thanks,
> > Song
> >
> > > +               if (percpu_ref_is_dying(&mddev->io_acct_cnt))
> > > +                       percpu_ref_resurrect(&mddev->io_acct_cnt);
> > >  }
> > >
> > >  static struct md_personality raid0_personality=
> > > --
> > > 2.32.0 (Apple Git-132)
> > >
> >


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH V3 1/1] md/raid0: Add mddev->io_acct_cnt for raid0_quiesce
  2023-02-02  0:40     ` Xiao Ni
@ 2023-02-02  6:57       ` Song Liu
  2023-02-02 13:52         ` Xiao Ni
  0 siblings, 1 reply; 6+ messages in thread
From: Song Liu @ 2023-02-02  6:57 UTC (permalink / raw)
  To: Xiao Ni; +Cc: linux-raid, ming.lei, ncroxon, heinzm

On Wed, Feb 1, 2023 at 4:41 PM Xiao Ni <xni@redhat.com> wrote:
>
> On Thu, Feb 2, 2023 at 8:23 AM Xiao Ni <xni@redhat.com> wrote:
> >
> > On Thu, Feb 2, 2023 at 2:00 AM Song Liu <song@kernel.org> wrote:
> > >
> > > On Wed, Feb 1, 2023 at 4:46 AM Xiao Ni <xni@redhat.com> wrote:
> > > >
> > > > It has added io_acct_set for raid0/raid5 io accounting and it needs to
> > > > alloc md_io_acct in the i/o path. They are free when the bios come back
> > > > from member disks. Now we don't have a method to monitor if those bios
> > > > are all come back. In the takeover process, it needs to free the raid0
> > > > memory resource including the memory pool for md_io_acct. But maybe some
> > > > bios are still not returned. When those bios are returned, it can cause
> > > > panic bcause of introducing NULL pointer or invalid address. Something
> > > > like this:
> > >
> > > Can we use mddev->active_io for this? If not, please explain the reason
> > > in the comments (in the code).
> >
> > Hi Song
> >
> > At first, we thought this way. Now ->acitve_io is used to wait all
> > submit processes to exit.
> > If we use ->active_io to count acct_bio, it means we change the usage
> > of ->active_io.
> > In mddev_suspend, first it waits for all submit processes to finish,
> > then it calls ->quiesce
> > to wait all inflight io to come back. For raid0, it's ok to use
> > ->acitve_io to count acct_bio.
> > But for raid5, not sure if it's ok. What's your opinion?
>
> Hi Song
>
> I've sent V4. If you think ->active_io is a better way to count acct_io,
> I'll re-write the patch to use ->active_io

I haven't thought about all the details. But we should try very hard to
avoid adding another percpu_ref. So let's try to use active_io to count
acct_io.

Thanks,
Song

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH V3 1/1] md/raid0: Add mddev->io_acct_cnt for raid0_quiesce
  2023-02-02  6:57       ` Song Liu
@ 2023-02-02 13:52         ` Xiao Ni
  0 siblings, 0 replies; 6+ messages in thread
From: Xiao Ni @ 2023-02-02 13:52 UTC (permalink / raw)
  To: Song Liu; +Cc: linux-raid, ming.lei, ncroxon, heinzm

On Thu, Feb 2, 2023 at 2:58 PM Song Liu <song@kernel.org> wrote:
>
> On Wed, Feb 1, 2023 at 4:41 PM Xiao Ni <xni@redhat.com> wrote:
> >
> > On Thu, Feb 2, 2023 at 8:23 AM Xiao Ni <xni@redhat.com> wrote:
> > >
> > > On Thu, Feb 2, 2023 at 2:00 AM Song Liu <song@kernel.org> wrote:
> > > >
> > > > On Wed, Feb 1, 2023 at 4:46 AM Xiao Ni <xni@redhat.com> wrote:
> > > > >
> > > > > It has added io_acct_set for raid0/raid5 io accounting and it needs to
> > > > > alloc md_io_acct in the i/o path. They are free when the bios come back
> > > > > from member disks. Now we don't have a method to monitor if those bios
> > > > > are all come back. In the takeover process, it needs to free the raid0
> > > > > memory resource including the memory pool for md_io_acct. But maybe some
> > > > > bios are still not returned. When those bios are returned, it can cause
> > > > > panic bcause of introducing NULL pointer or invalid address. Something
> > > > > like this:
> > > >
> > > > Can we use mddev->active_io for this? If not, please explain the reason
> > > > in the comments (in the code).
> > >
> > > Hi Song
> > >
> > > At first, we thought this way. Now ->acitve_io is used to wait all
> > > submit processes to exit.
> > > If we use ->active_io to count acct_bio, it means we change the usage
> > > of ->active_io.
> > > In mddev_suspend, first it waits for all submit processes to finish,
> > > then it calls ->quiesce
> > > to wait all inflight io to come back. For raid0, it's ok to use
> > > ->acitve_io to count acct_bio.
> > > But for raid5, not sure if it's ok. What's your opinion?
> >
> > Hi Song
> >
> > I've sent V4. If you think ->active_io is a better way to count acct_io,
> > I'll re-write the patch to use ->active_io
>
> I haven't thought about all the details. But we should try very hard to
> avoid adding another percpu_ref. So let's try to use active_io to count
> acct_io.
>
> Thanks,
> Song
>

Ok, I'll try to send a version using active_io.

Regards
Xiao


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2023-02-02 13:53 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-02-01 12:46 [PATCH V3 1/1] md/raid0: Add mddev->io_acct_cnt for raid0_quiesce Xiao Ni
2023-02-01 18:00 ` Song Liu
2023-02-02  0:23   ` Xiao Ni
2023-02-02  0:40     ` Xiao Ni
2023-02-02  6:57       ` Song Liu
2023-02-02 13:52         ` Xiao Ni

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.