All of lore.kernel.org
 help / color / mirror / Atom feed
From: Brian Foster <bfoster@redhat.com>
To: Dave Chinner <david@fromorbit.com>
Cc: xfs@oss.sgi.com
Subject: Re: [PATCH 05/10] repair: factor out threading setup code
Date: Mon, 24 Feb 2014 15:43:05 -0500	[thread overview]
Message-ID: <20140224204304.GB49654@bfoster.bfoster> (raw)
In-Reply-To: <1393223369-4696-6-git-send-email-david@fromorbit.com>

On Mon, Feb 24, 2014 at 05:29:24PM +1100, Dave Chinner wrote:
> From: Dave Chinner <dchinner@redhat.com>
> 
> The same code is repeated in different places to set up
> multithreaded prefetching. This can all be factored into a single
> implementation.
> 
> Signed-off-by: Dave Chinner <dchinner@redhat.com>
> ---
>  repair/dinode.h   | 15 ++++++------
>  repair/phase3.c   | 40 +++----------------------------
>  repair/phase4.c   | 48 +++----------------------------------
>  repair/phase6.c   | 22 ++++-------------
>  repair/prefetch.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  repair/prefetch.h | 10 ++++++++
>  6 files changed, 98 insertions(+), 108 deletions(-)
> 
...
> diff --git a/repair/phase6.c b/repair/phase6.c
> index cdbf4db..08f78d2 100644
> --- a/repair/phase6.c
> +++ b/repair/phase6.c
> @@ -17,6 +17,8 @@
>   */
>  
>  #include <libxfs.h>
> +#include "threads.h"
> +#include "prefetch.h"
>  #include "avl.h"
>  #include "globals.h"
>  #include "agheader.h"
> @@ -25,9 +27,7 @@
>  #include "protos.h"
>  #include "err_protos.h"
>  #include "dinode.h"
> -#include "prefetch.h"
>  #include "progress.h"
> -#include "threads.h"
>  #include "versions.h"
>  
>  static struct cred		zerocr;
> @@ -3031,23 +3031,9 @@ update_missing_dotdot_entries(
>  
>  static void
>  traverse_ags(
> -	xfs_mount_t 		*mp)
> +	struct xfs_mount	*mp)
>  {
> -	int			i;
> -	work_queue_t		queue;
> -	prefetch_args_t		*pf_args[2];
> -
> -	/*
> -	 * we always do prefetch for phase 6 as it will fill in the gaps
> -	 * not read during phase 3 prefetch.
> -	 */
> -	queue.mp = mp;
> -	pf_args[0] = start_inode_prefetch(0, 1, NULL);
> -	for (i = 0; i < glob_agcount; i++) {
> -		pf_args[(~i) & 1] = start_inode_prefetch(i + 1, 1,
> -				pf_args[i & 1]);
> -		traverse_function(&queue, i, pf_args[i & 1]);
> -	}
> +	do_inode_prefetch(mp, 0, traverse_function, true, true);

The cover letter indicates the parallelization of phase 6 was dropped,
but this appears to (conditionally) enable it.

Brian

>  }
>  
>  void
> diff --git a/repair/prefetch.c b/repair/prefetch.c
> index 984beda..e573e35 100644
> --- a/repair/prefetch.c
> +++ b/repair/prefetch.c
> @@ -865,6 +865,77 @@ start_inode_prefetch(
>  	return args;
>  }
>  
> +/*
> + * Do inode prefetch in the most optimal way for the context under which repair
> + * has been run.
> + */
> +void
> +do_inode_prefetch(
> +	struct xfs_mount	*mp,
> +	int			stride,
> +	void			(*func)(struct work_queue *,
> +					xfs_agnumber_t, void *),
> +	bool			check_cache,
> +	bool			dirs_only)
> +{
> +	int			i, j;
> +	xfs_agnumber_t		agno;
> +	struct work_queue	queue;
> +	struct work_queue	*queues;
> +	struct prefetch_args	*pf_args[2];
> +
> +	/*
> +	 * If the previous phases of repair have not overflowed the buffer
> +	 * cache, then we don't need to re-read any of the metadata in the
> +	 * filesystem - it's all in the cache. In that case, run a thread per
> +	 * CPU to maximise parallelism of the queue to be processed.
> +	 */
> +	if (check_cache && !libxfs_bcache_overflowed()) {
> +		queue.mp = mp;
> +		create_work_queue(&queue, mp, libxfs_nproc());
> +		for (i = 0; i < mp->m_sb.sb_agcount; i++)
> +			queue_work(&queue, func, i, NULL);
> +		destroy_work_queue(&queue);
> +		return;
> +	}
> +
> +	/*
> +	 * single threaded behaviour - single prefetch thread, processed
> +	 * directly after each AG is queued.
> +	 */
> +	if (!stride) {
> +		queue.mp = mp;
> +		pf_args[0] = start_inode_prefetch(0, dirs_only, NULL);
> +		for (i = 0; i < mp->m_sb.sb_agcount; i++) {
> +			pf_args[(~i) & 1] = start_inode_prefetch(i + 1,
> +					dirs_only, pf_args[i & 1]);
> +			func(&queue, i, pf_args[i & 1]);
> +		}
> +		return;
> +	}
> +
> +	/*
> +	 * create one worker thread for each segment of the volume
> +	 */
> +	queues = malloc(thread_count * sizeof(work_queue_t));
> +	for (i = 0, agno = 0; i < thread_count; i++) {
> +		create_work_queue(&queues[i], mp, 1);
> +		pf_args[0] = NULL;
> +		for (j = 0; j < stride && agno < mp->m_sb.sb_agcount;
> +				j++, agno++) {
> +			pf_args[0] = start_inode_prefetch(agno, dirs_only,
> +							  pf_args[0]);
> +			queue_work(&queues[i], func, agno, pf_args[0]);
> +		}
> +	}
> +	/*
> +	 * wait for workers to complete
> +	 */
> +	for (i = 0; i < thread_count; i++)
> +		destroy_work_queue(&queues[i]);
> +	free(queues);
> +}
> +
>  void
>  wait_for_inode_prefetch(
>  	prefetch_args_t		*args)
> diff --git a/repair/prefetch.h b/repair/prefetch.h
> index 44a406c..b837752 100644
> --- a/repair/prefetch.h
> +++ b/repair/prefetch.h
> @@ -4,6 +4,7 @@
>  #include <semaphore.h>
>  #include "incore.h"
>  
> +struct work_queue;
>  
>  extern int 	do_prefetch;
>  
> @@ -41,6 +42,15 @@ start_inode_prefetch(
>  	prefetch_args_t		*prev_args);
>  
>  void
> +do_inode_prefetch(
> +	struct xfs_mount	*mp,
> +	int			stride,
> +	void			(*func)(struct work_queue *,
> +					xfs_agnumber_t, void *),
> +	bool			check_cache,
> +	bool			dirs_only);
> +
> +void
>  wait_for_inode_prefetch(
>  	prefetch_args_t		*args);
>  
> -- 
> 1.8.4.rc3
> 
> _______________________________________________
> xfs mailing list
> xfs@oss.sgi.com
> http://oss.sgi.com/mailman/listinfo/xfs

_______________________________________________
xfs mailing list
xfs@oss.sgi.com
http://oss.sgi.com/mailman/listinfo/xfs

  reply	other threads:[~2014-02-24 20:43 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-02-24  6:29 [PATCH 00/10, v2] repair: scalability and prefetch fixes Dave Chinner
2014-02-24  6:29 ` [PATCH 01/10] repair: translation lookups limit scalability Dave Chinner
2014-02-24 20:42   ` Brian Foster
2014-02-25 20:01   ` Christoph Hellwig
2014-02-24  6:29 ` [PATCH 02/10] repair: per AG locks contend for cachelines Dave Chinner
2014-02-24  6:29 ` [PATCH 03/10] libxfs: buffer cache hashing is suboptimal Dave Chinner
2014-02-24  6:29 ` [PATCH 04/10] repair: limit auto-striding concurrency apprpriately Dave Chinner
2014-02-24  6:29 ` [PATCH 05/10] repair: factor out threading setup code Dave Chinner
2014-02-24 20:43   ` Brian Foster [this message]
2014-02-24 23:16     ` Dave Chinner
2014-02-24 23:30       ` Brian Foster
2014-02-24  6:29 ` [PATCH 06/10] repair: use a listhead for the dotdot list Dave Chinner
2014-02-25 20:03   ` Christoph Hellwig
2014-02-27  2:06     ` Dave Chinner
2014-02-24  6:29 ` [PATCH 07/10] repair: prefetch runs too far ahead Dave Chinner
2014-02-26  1:52   ` Christoph Hellwig
2014-02-26  5:51     ` Dave Chinner
2014-02-24  6:29 ` [PATCH 08/10] libxfs: remove a couple of locks Dave Chinner
2014-02-25 20:05   ` Christoph Hellwig
2014-02-25 23:43     ` Dave Chinner
2014-02-26  1:54       ` Christoph Hellwig
2014-02-26  5:53         ` Dave Chinner
2014-02-24  6:29 ` [PATCH 09/10] repair: fix prefetch queue limiting Dave Chinner
2014-02-25 20:08   ` Christoph Hellwig
2014-02-24  6:29 ` [PATCH 10/10] repair: BMBT prefetch needs to be CRC aware Dave Chinner
2014-02-25 17:25   ` Christoph Hellwig
2014-02-25 23:51     ` Dave Chinner
2014-02-26  1:40       ` Christoph Hellwig
2014-02-26  1:44   ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20140224204304.GB49654@bfoster.bfoster \
    --to=bfoster@redhat.com \
    --cc=david@fromorbit.com \
    --cc=xfs@oss.sgi.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.