All of lore.kernel.org
 help / color / mirror / Atom feed
From: Minchan Kim <minchan@kernel.org>
To: Nhat Pham <nphamcs@gmail.com>
Cc: akpm@linux-foundation.org, hannes@cmpxchg.org,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	ngupta@vflare.org, senozhatsky@chromium.org, sjenning@redhat.com,
	ddstreet@ieee.org, vitaly.wool@konsulko.com
Subject: Re: [PATCH v5 6/6] zsmalloc: Implement writeback mechanism for zsmalloc
Date: Fri, 18 Nov 2022 13:01:22 -0800	[thread overview]
Message-ID: <Y3fyon7v00ABtU6M@google.com> (raw)
In-Reply-To: <20221118182407.82548-7-nphamcs@gmail.com>

On Fri, Nov 18, 2022 at 10:24:07AM -0800, Nhat Pham wrote:
> This commit adds the writeback mechanism for zsmalloc, analogous to the
> zbud allocator. Zsmalloc will attempt to determine the coldest zspage
> (i.e least recently used) in the pool, and attempt to write back all the
> stored compressed objects via the pool's evict handler.
> 
> Signed-off-by: Nhat Pham <nphamcs@gmail.com>
> ---
>  mm/zsmalloc.c | 193 +++++++++++++++++++++++++++++++++++++++++++++++---
>  1 file changed, 182 insertions(+), 11 deletions(-)
> 
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index 3ff86f57d08c..d73b9f9e9adf 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -271,12 +271,13 @@ struct zspage {
>  #ifdef CONFIG_ZPOOL
>  	/* links the zspage to the lru list in the pool */
>  	struct list_head lru;
> +	bool under_reclaim;
> +	/* list of unfreed handles whose objects have been reclaimed */
> +	unsigned long *deferred_handles;
>  #endif
> 
>  	struct zs_pool *pool;
> -#ifdef CONFIG_COMPACTION
>  	rwlock_t lock;
> -#endif
>  };
> 
>  struct mapping_area {
> @@ -297,10 +298,11 @@ static bool ZsHugePage(struct zspage *zspage)
>  	return zspage->huge;
>  }
> 
> -#ifdef CONFIG_COMPACTION
>  static void migrate_lock_init(struct zspage *zspage);
>  static void migrate_read_lock(struct zspage *zspage);
>  static void migrate_read_unlock(struct zspage *zspage);
> +
> +#ifdef CONFIG_COMPACTION
>  static void migrate_write_lock(struct zspage *zspage);
>  static void migrate_write_lock_nested(struct zspage *zspage);
>  static void migrate_write_unlock(struct zspage *zspage);
> @@ -308,9 +310,6 @@ static void kick_deferred_free(struct zs_pool *pool);
>  static void init_deferred_free(struct zs_pool *pool);
>  static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
>  #else
> -static void migrate_lock_init(struct zspage *zspage) {}
> -static void migrate_read_lock(struct zspage *zspage) {}
> -static void migrate_read_unlock(struct zspage *zspage) {}
>  static void migrate_write_lock(struct zspage *zspage) {}
>  static void migrate_write_lock_nested(struct zspage *zspage) {}
>  static void migrate_write_unlock(struct zspage *zspage) {}
> @@ -425,6 +424,27 @@ static void zs_zpool_free(void *pool, unsigned long handle)
>  	zs_free(pool, handle);
>  }
> 
> +static int zs_reclaim_page(struct zs_pool *pool, unsigned int retries);
> +
> +static int zs_zpool_shrink(void *pool, unsigned int pages,
> +			unsigned int *reclaimed)
> +{
> +	unsigned int total = 0;
> +	int ret = -EINVAL;
> +
> +	while (total < pages) {
> +		ret = zs_reclaim_page(pool, 8);
> +		if (ret < 0)
> +			break;
> +		total++;
> +	}
> +
> +	if (reclaimed)
> +		*reclaimed = total;
> +
> +	return ret;
> +}
> +
>  static void *zs_zpool_map(void *pool, unsigned long handle,
>  			enum zpool_mapmode mm)
>  {
> @@ -463,6 +483,7 @@ static struct zpool_driver zs_zpool_driver = {
>  	.malloc_support_movable = true,
>  	.malloc =		  zs_zpool_malloc,
>  	.free =			  zs_zpool_free,
> +	.shrink =		  zs_zpool_shrink,
>  	.map =			  zs_zpool_map,
>  	.unmap =		  zs_zpool_unmap,
>  	.total_size =		  zs_zpool_total_size,
> @@ -936,6 +957,23 @@ static int trylock_zspage(struct zspage *zspage)
>  	return 0;
>  }
> 
> +#ifdef CONFIG_ZPOOL
> +/*
> + * Free all the deferred handles whose objects are freed in zs_free.
> + */
> +static void free_handles(struct zs_pool *pool, struct zspage *zspage)
> +{
> +	unsigned long handle = (unsigned long)zspage->deferred_handles;
> +
> +	while (handle) {
> +		unsigned long nxt_handle = handle_to_obj(handle);
> +
> +		cache_free_handle(pool, handle);
> +		handle = nxt_handle;
> +	}
> +}

# else 

 static inline void free_handles(struct zs_pool *pool, struct zspage *zspage) {}

> +#endif
> +
>  static void __free_zspage(struct zs_pool *pool, struct size_class *class,
>  				struct zspage *zspage)
>  {
> @@ -950,6 +988,11 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
>  	VM_BUG_ON(get_zspage_inuse(zspage));
>  	VM_BUG_ON(fg != ZS_EMPTY);
> 
> +#ifdef CONFIG_ZPOOL

Let's remove the ifdef machinery here.

> +	/* Free all deferred handles from zs_free */
> +	free_handles(pool, zspage);
> +#endif
> +

Other than that, looks good to me.

  reply	other threads:[~2022-11-18 21:01 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-18 18:24 [PATCH v5 0/6] Implement writeback for zsmalloc Nhat Pham
2022-11-18 18:24 ` [PATCH v5 1/6] zswap: fix writeback lock ordering " Nhat Pham
2022-11-18 18:24 ` [PATCH v5 2/6] zpool: clean out dead code Nhat Pham
2022-11-18 18:24 ` [PATCH v5 3/6] zsmalloc: Consolidate zs_pool's migrate_lock and size_class's locks Nhat Pham
2022-11-18 18:24 ` [PATCH v5 4/6] zsmalloc: Add a LRU to zs_pool to keep track of zspages in LRU order Nhat Pham
2022-11-18 19:32   ` Minchan Kim
2022-11-18 20:05     ` Johannes Weiner
2022-11-18 21:35       ` Minchan Kim
2022-11-18 23:23         ` Johannes Weiner
2022-11-18 18:24 ` [PATCH v5 5/6] zsmalloc: Add zpool_ops field to zs_pool to store evict handlers Nhat Pham
2022-11-18 19:33   ` Minchan Kim
2022-11-18 18:24 ` [PATCH v5 6/6] zsmalloc: Implement writeback mechanism for zsmalloc Nhat Pham
2022-11-18 21:01   ` Minchan Kim [this message]
2022-11-18 22:08     ` Nhat Pham
2022-11-18 22:38       ` Minchan Kim
2022-11-19  0:37         ` Nhat Pham

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=Y3fyon7v00ABtU6M@google.com \
    --to=minchan@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=ddstreet@ieee.org \
    --cc=hannes@cmpxchg.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=ngupta@vflare.org \
    --cc=nphamcs@gmail.com \
    --cc=senozhatsky@chromium.org \
    --cc=sjenning@redhat.com \
    --cc=vitaly.wool@konsulko.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.