linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* Re: + mm-swap-add-cluster-lock-v5.patch added to -mm tree
       [not found] <587eaca3.MRSwND8OEi+lF+VH%akpm@linux-foundation.org>
@ 2017-01-18  8:37 ` Michal Hocko
  2017-01-18 20:23   ` Andrew Morton
  0 siblings, 1 reply; 3+ messages in thread
From: Michal Hocko @ 2017-01-18  8:37 UTC (permalink / raw)
  To: akpm
  Cc: ying.huang, aarcange, aaron.lu, ak, borntraeger, corbet,
	dave.hansen, hannes, hillf.zj, hughd, kirill.shutemov, minchan,
	riel, shli, tim.c.chen, vdavydov.dev, mm-commits, linux-mm

On Tue 17-01-17 15:45:39, Andrew Morton wrote:
[...]
> From: "Huang\, Ying" <ying.huang@intel.com>
> Subject: mm-swap-add-cluster-lock-v5

I assume you are going to fold this into the original patch. Do you
think it would make sense to have it in a separate patch along with
the reasoning provided via email?

> Link: http://lkml.kernel.org/r/878tqeuuic.fsf_-_@yhuang-dev.intel.com
> Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
> Cc: Tim Chen <tim.c.chen@linux.intel.com>
> Cc: Aaron Lu <aaron.lu@intel.com>
> Cc: Andi Kleen <ak@linux.intel.com>
> Cc: Andrea Arcangeli <aarcange@redhat.com>
> Cc: Christian Borntraeger <borntraeger@de.ibm.com>
> Cc: Dave Hansen <dave.hansen@intel.com>
> Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
> Cc: Huang Ying <ying.huang@intel.com>
> Cc: Hugh Dickins <hughd@google.com>
> Cc: Johannes Weiner <hannes@cmpxchg.org>
> Cc: Jonathan Corbet <corbet@lwn.net> escreveu:
> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> Cc: Michal Hocko <mhocko@kernel.org>
> Cc: Minchan Kim <minchan@kernel.org>
> Cc: Rik van Riel <riel@redhat.com>
> Cc: Shaohua Li <shli@kernel.org>
> Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
> ---
> 
>  include/linux/swap.h |   19 ++++++++++---------
>  mm/swapfile.c        |   32 ++++++++++++++++----------------
>  2 files changed, 26 insertions(+), 25 deletions(-)
> 
> diff -puN include/linux/swap.h~mm-swap-add-cluster-lock-v5 include/linux/swap.h
> --- a/include/linux/swap.h~mm-swap-add-cluster-lock-v5
> +++ a/include/linux/swap.h
> @@ -176,16 +176,17 @@ enum {
>   * protected by swap_info_struct.lock.
>   */
>  struct swap_cluster_info {
> -	unsigned long data;
> +	spinlock_t lock;	/*
> +				 * Protect swap_cluster_info fields
> +				 * and swap_info_struct->swap_map
> +				 * elements correspond to the swap
> +				 * cluster
> +				 */
> +	unsigned int data:24;
> +	unsigned int flags:8;
>  };
> -#define CLUSTER_COUNT_SHIFT		8
> -#define CLUSTER_FLAG_MASK		((1UL << CLUSTER_COUNT_SHIFT) - 1)
> -#define CLUSTER_COUNT_MASK		(~CLUSTER_FLAG_MASK)
> -#define CLUSTER_FLAG_FREE		1 /* This cluster is free */
> -#define CLUSTER_FLAG_NEXT_NULL		2 /* This cluster has no next cluster */
> -/* cluster lock, protect cluster_info contents and sis->swap_map */
> -#define CLUSTER_FLAG_LOCK_BIT		2
> -#define CLUSTER_FLAG_LOCK		(1 << CLUSTER_FLAG_LOCK_BIT)
> +#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
> +#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
>  
>  /*
>   * We assign a cluster to each CPU, so each CPU can allocate swap entry from
> diff -puN mm/swapfile.c~mm-swap-add-cluster-lock-v5 mm/swapfile.c
> --- a/mm/swapfile.c~mm-swap-add-cluster-lock-v5
> +++ a/mm/swapfile.c
> @@ -200,66 +200,66 @@ static void discard_swap_cluster(struct
>  #define LATENCY_LIMIT		256
>  
>  static inline void cluster_set_flag(struct swap_cluster_info *info,
> -				    unsigned int flag)
> +	unsigned int flag)
>  {
> -	info->data = (info->data & (CLUSTER_COUNT_MASK | CLUSTER_FLAG_LOCK)) |
> -		(flag & ~CLUSTER_FLAG_LOCK);
> +	info->flags = flag;
>  }
>  
>  static inline unsigned int cluster_count(struct swap_cluster_info *info)
>  {
> -	return info->data >> CLUSTER_COUNT_SHIFT;
> +	return info->data;
>  }
>  
>  static inline void cluster_set_count(struct swap_cluster_info *info,
>  				     unsigned int c)
>  {
> -	info->data = (c << CLUSTER_COUNT_SHIFT) | (info->data & CLUSTER_FLAG_MASK);
> +	info->data = c;
>  }
>  
>  static inline void cluster_set_count_flag(struct swap_cluster_info *info,
>  					 unsigned int c, unsigned int f)
>  {
> -	info->data = (info->data & CLUSTER_FLAG_LOCK) |
> -		(c << CLUSTER_COUNT_SHIFT) | (f & ~CLUSTER_FLAG_LOCK);
> +	info->flags = f;
> +	info->data = c;
>  }
>  
>  static inline unsigned int cluster_next(struct swap_cluster_info *info)
>  {
> -	return cluster_count(info);
> +	return info->data;
>  }
>  
>  static inline void cluster_set_next(struct swap_cluster_info *info,
>  				    unsigned int n)
>  {
> -	cluster_set_count(info, n);
> +	info->data = n;
>  }
>  
>  static inline void cluster_set_next_flag(struct swap_cluster_info *info,
>  					 unsigned int n, unsigned int f)
>  {
> -	cluster_set_count_flag(info, n, f);
> +	info->flags = f;
> +	info->data = n;
>  }
>  
>  static inline bool cluster_is_free(struct swap_cluster_info *info)
>  {
> -	return info->data & CLUSTER_FLAG_FREE;
> +	return info->flags & CLUSTER_FLAG_FREE;
>  }
>  
>  static inline bool cluster_is_null(struct swap_cluster_info *info)
>  {
> -	return info->data & CLUSTER_FLAG_NEXT_NULL;
> +	return info->flags & CLUSTER_FLAG_NEXT_NULL;
>  }
>  
>  static inline void cluster_set_null(struct swap_cluster_info *info)
>  {
> -	cluster_set_next_flag(info, 0, CLUSTER_FLAG_NEXT_NULL);
> +	info->flags = CLUSTER_FLAG_NEXT_NULL;
> +	info->data = 0;
>  }
>  
> -/* Protect swap_cluster_info fields and si->swap_map */
>  static inline void __lock_cluster(struct swap_cluster_info *ci)
>  {
> -	bit_spin_lock(CLUSTER_FLAG_LOCK_BIT, &ci->data);
> +	spin_lock(&ci->lock);
>  }
>  
>  static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
> @@ -278,7 +278,7 @@ static inline struct swap_cluster_info *
>  static inline void unlock_cluster(struct swap_cluster_info *ci)
>  {
>  	if (ci)
> -		bit_spin_unlock(CLUSTER_FLAG_LOCK_BIT, &ci->data);
> +		spin_unlock(&ci->lock);
>  }
>  
>  static inline struct swap_cluster_info *lock_cluster_or_swap_info(
> _
> 
> Patches currently in -mm which might be from ying.huang@intel.com are
> 
> mm-swap-fix-kernel-message-in-swap_info_get.patch
> mm-swap-add-cluster-lock.patch
> mm-swap-add-cluster-lock-v5.patch
> mm-swap-split-swap-cache-into-64mb-trunks.patch
> mm-swap-add-cache-for-swap-slots-allocation-fix.patch
> mm-swap-skip-readahead-only-when-swap-slot-cache-is-enabled.patch

-- 
Michal Hocko
SUSE Labs

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: + mm-swap-add-cluster-lock-v5.patch added to -mm tree
  2017-01-18  8:37 ` + mm-swap-add-cluster-lock-v5.patch added to -mm tree Michal Hocko
@ 2017-01-18 20:23   ` Andrew Morton
  2017-01-18 21:18     ` Michal Hocko
  0 siblings, 1 reply; 3+ messages in thread
From: Andrew Morton @ 2017-01-18 20:23 UTC (permalink / raw)
  To: Michal Hocko
  Cc: ying.huang, aarcange, aaron.lu, ak, borntraeger, corbet,
	dave.hansen, hannes, hillf.zj, hughd, kirill.shutemov, minchan,
	riel, shli, tim.c.chen, vdavydov.dev, mm-commits, linux-mm

On Wed, 18 Jan 2017 09:37:31 +0100 Michal Hocko <mhocko@kernel.org> wrote:

> On Tue 17-01-17 15:45:39, Andrew Morton wrote:
> [...]
> > From: "Huang\, Ying" <ying.huang@intel.com>
> > Subject: mm-swap-add-cluster-lock-v5
> 
> I assume you are going to fold this into the original patch. Do you
> think it would make sense to have it in a separate patch along with
> the reasoning provided via email?

It should be OK - the v5 changelog (which I shall use for the folded
patch, as usual) has

: Compared with a previous implementation using bit_spin_lock, the
: sequential swap out throughput improved about 3.2%.  Test was done on a
: Xeon E5 v3 system.  The swap device used is a RAM simulated PMEM
: (persistent memory) device.  To test the sequential swapping out, the test
: case created 32 processes, which sequentially allocate and write to the
: anonymous pages until the RAM and part of the swap device is used.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: + mm-swap-add-cluster-lock-v5.patch added to -mm tree
  2017-01-18 20:23   ` Andrew Morton
@ 2017-01-18 21:18     ` Michal Hocko
  0 siblings, 0 replies; 3+ messages in thread
From: Michal Hocko @ 2017-01-18 21:18 UTC (permalink / raw)
  To: Andrew Morton
  Cc: ying.huang, aarcange, aaron.lu, ak, borntraeger, corbet,
	dave.hansen, hannes, hillf.zj, hughd, kirill.shutemov, minchan,
	riel, shli, tim.c.chen, vdavydov.dev, mm-commits, linux-mm

On Wed 18-01-17 12:23:54, Andrew Morton wrote:
> On Wed, 18 Jan 2017 09:37:31 +0100 Michal Hocko <mhocko@kernel.org> wrote:
> 
> > On Tue 17-01-17 15:45:39, Andrew Morton wrote:
> > [...]
> > > From: "Huang\, Ying" <ying.huang@intel.com>
> > > Subject: mm-swap-add-cluster-lock-v5
> > 
> > I assume you are going to fold this into the original patch. Do you
> > think it would make sense to have it in a separate patch along with
> > the reasoning provided via email?
> 
> It should be OK - the v5 changelog (which I shall use for the folded
> patch, as usual) has
> 
> : Compared with a previous implementation using bit_spin_lock, the
> : sequential swap out throughput improved about 3.2%.  Test was done on a
> : Xeon E5 v3 system.  The swap device used is a RAM simulated PMEM
> : (persistent memory) device.  To test the sequential swapping out, the test
> : case created 32 processes, which sequentially allocate and write to the
> : anonymous pages until the RAM and part of the swap device is used.

But there are more reasons than the throughput improvements. I would
consider the full lockdep support and fairness more important. The
drawback is the memory footprint which should be mentioned as well.

That being said, I will not insist, I just thought that this would be a
nice incremental change and easier to understand later rather than
searching the archives...

So take all this as my 2c...

-- 
Michal Hocko
SUSE Labs

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2017-01-18 21:18 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <587eaca3.MRSwND8OEi+lF+VH%akpm@linux-foundation.org>
2017-01-18  8:37 ` + mm-swap-add-cluster-lock-v5.patch added to -mm tree Michal Hocko
2017-01-18 20:23   ` Andrew Morton
2017-01-18 21:18     ` Michal Hocko

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).