All of lore.kernel.org
 help / color / mirror / Atom feed
From: Marcos Paulo de Souza <marcos.souza.org@gmail.com>
To: linux-kernel@vger.kernel.org
Cc: neilb@suse.com, Shaohua Li <shli@kernel.org>,
	"open list:SOFTWARE RAID (Multiple Disks) SUPPORT"
	<linux-raid@vger.kernel.org>
Subject: Re: [PATCH] drivers: md: Unify common definitions of raid1 and raid10
Date: Thu, 16 May 2019 08:23:19 -0300	[thread overview]
Message-ID: <20190516112317.GA8611@geeko> (raw)
In-Reply-To: <20190509111849.22927-1-marcos.souza.org@gmail.com>

ping.

On Thu, May 09, 2019 at 08:18:49AM -0300, Marcos Paulo de Souza wrote:
> These definitions are being moved to raid1-10.c.
> 
> Signed-off-by: Marcos Paulo de Souza <marcos.souza.org@gmail.com>
> ---
>  drivers/md/raid1-10.c | 25 +++++++++++++++++++++++++
>  drivers/md/raid1.c    | 29 ++---------------------------
>  drivers/md/raid10.c   | 27 +--------------------------
>  3 files changed, 28 insertions(+), 53 deletions(-)
> 
> diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
> index 400001b815db..7d968bf08e54 100644
> --- a/drivers/md/raid1-10.c
> +++ b/drivers/md/raid1-10.c
> @@ -3,6 +3,31 @@
>  #define RESYNC_BLOCK_SIZE (64*1024)
>  #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
>  
> +/*
> + * Number of guaranteed raid bios in case of extreme VM load:
> + */
> +#define	NR_RAID_BIOS 256
> +
> +/* when we get a read error on a read-only array, we redirect to another
> + * device without failing the first device, or trying to over-write to
> + * correct the read error.  To keep track of bad blocks on a per-bio
> + * level, we store IO_BLOCKED in the appropriate 'bios' pointer
> + */
> +#define IO_BLOCKED ((struct bio *)1)
> +/* When we successfully write to a known bad-block, we need to remove the
> + * bad-block marking which must be done from process context.  So we record
> + * the success by setting devs[n].bio to IO_MADE_GOOD
> + */
> +#define IO_MADE_GOOD ((struct bio *)2)
> +
> +#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
> +
> +/* When there are this many requests queue to be written by
> + * the raid thread, we become 'congested' to provide back-pressure
> + * for writeback.
> + */
> +static int max_queued_requests = 1024;
> +
>  /* for managing resync I/O pages */
>  struct resync_pages {
>  	void		*raid_bio;
> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
> index 0c8a098d220e..bb052c35bf29 100644
> --- a/drivers/md/raid1.c
> +++ b/drivers/md/raid1.c
> @@ -50,31 +50,6 @@
>  	 (1L << MD_HAS_PPL) |		\
>  	 (1L << MD_HAS_MULTIPLE_PPLS))
>  
> -/*
> - * Number of guaranteed r1bios in case of extreme VM load:
> - */
> -#define	NR_RAID1_BIOS 256
> -
> -/* when we get a read error on a read-only array, we redirect to another
> - * device without failing the first device, or trying to over-write to
> - * correct the read error.  To keep track of bad blocks on a per-bio
> - * level, we store IO_BLOCKED in the appropriate 'bios' pointer
> - */
> -#define IO_BLOCKED ((struct bio *)1)
> -/* When we successfully write to a known bad-block, we need to remove the
> - * bad-block marking which must be done from process context.  So we record
> - * the success by setting devs[n].bio to IO_MADE_GOOD
> - */
> -#define IO_MADE_GOOD ((struct bio *)2)
> -
> -#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
> -
> -/* When there are this many requests queue to be written by
> - * the raid1 thread, we become 'congested' to provide back-pressure
> - * for writeback.
> - */
> -static int max_queued_requests = 1024;
> -
>  static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
>  static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
>  
> @@ -2955,7 +2930,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
>  	if (!conf->poolinfo)
>  		goto abort;
>  	conf->poolinfo->raid_disks = mddev->raid_disks * 2;
> -	err = mempool_init(&conf->r1bio_pool, NR_RAID1_BIOS, r1bio_pool_alloc,
> +	err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc,
>  			   r1bio_pool_free, conf->poolinfo);
>  	if (err)
>  		goto abort;
> @@ -3240,7 +3215,7 @@ static int raid1_reshape(struct mddev *mddev)
>  	newpoolinfo->mddev = mddev;
>  	newpoolinfo->raid_disks = raid_disks * 2;
>  
> -	ret = mempool_init(&newpool, NR_RAID1_BIOS, r1bio_pool_alloc,
> +	ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc,
>  			   r1bio_pool_free, newpoolinfo);
>  	if (ret) {
>  		kfree(newpoolinfo);
> diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
> index 3b6880dd648d..24cb116d950f 100644
> --- a/drivers/md/raid10.c
> +++ b/drivers/md/raid10.c
> @@ -73,31 +73,6 @@
>   *    [B A] [D C]    [B A] [E C D]
>   */
>  
> -/*
> - * Number of guaranteed r10bios in case of extreme VM load:
> - */
> -#define	NR_RAID10_BIOS 256
> -
> -/* when we get a read error on a read-only array, we redirect to another
> - * device without failing the first device, or trying to over-write to
> - * correct the read error.  To keep track of bad blocks on a per-bio
> - * level, we store IO_BLOCKED in the appropriate 'bios' pointer
> - */
> -#define IO_BLOCKED ((struct bio *)1)
> -/* When we successfully write to a known bad-block, we need to remove the
> - * bad-block marking which must be done from process context.  So we record
> - * the success by setting devs[n].bio to IO_MADE_GOOD
> - */
> -#define IO_MADE_GOOD ((struct bio *)2)
> -
> -#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
> -
> -/* When there are this many requests queued to be written by
> - * the raid10 thread, we become 'congested' to provide back-pressure
> - * for writeback.
> - */
> -static int max_queued_requests = 1024;
> -
>  static void allow_barrier(struct r10conf *conf);
>  static void lower_barrier(struct r10conf *conf);
>  static int _enough(struct r10conf *conf, int previous, int ignore);
> @@ -3684,7 +3659,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
>  
>  	conf->geo = geo;
>  	conf->copies = copies;
> -	err = mempool_init(&conf->r10bio_pool, NR_RAID10_BIOS, r10bio_pool_alloc,
> +	err = mempool_init(&conf->r10bio_pool, NR_RAID_BIOS, r10bio_pool_alloc,
>  			   r10bio_pool_free, conf);
>  	if (err)
>  		goto out;
> -- 
> 2.21.0
> 

-- 
Thanks,
Marcos

  reply	other threads:[~2019-05-16 11:23 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-09 11:18 [PATCH] drivers: md: Unify common definitions of raid1 and raid10 Marcos Paulo de Souza
2019-05-09 11:18 ` Marcos Paulo de Souza
2019-05-16 11:23 ` Marcos Paulo de Souza [this message]
2019-05-16 11:23   ` Marcos Paulo de Souza
2019-05-16 15:39   ` Song Liu
2019-05-16 15:39     ` Song Liu
2019-05-21  5:59     ` Song Liu
2019-05-21  5:59       ` Song Liu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190516112317.GA8611@geeko \
    --to=marcos.souza.org@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-raid@vger.kernel.org \
    --cc=neilb@suse.com \
    --cc=shli@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.