All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] zsmalloc: use class->pages_per_zspage
@ 2015-07-15 23:42 ` Minchan Kim
  0 siblings, 0 replies; 8+ messages in thread
From: Minchan Kim @ 2015-07-15 23:42 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Sergey Senozhatsky, linux-kernel, linux-mm, Minchan Kim

There is no need to recalcurate pages_per_zspage in runtime.
Just use class->pages_per_zspage to avoid unnecessary runtime
overhead.

Signed-off-by: Minchan Kim <minchan@kernel.org>
---
 mm/zsmalloc.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 27b9661c8fa6..154a30e9c8a8 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1711,7 +1711,7 @@ static unsigned long zs_can_compact(struct size_class *class)
 	obj_wasted /= get_maxobj_per_zspage(class->size,
 			class->pages_per_zspage);
 
-	return obj_wasted * get_pages_per_zspage(class->size);
+	return obj_wasted * class->pages_per_zspage;
 }
 
 static void __zs_compact(struct zs_pool *pool, struct size_class *class)
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH] zsmalloc: use class->pages_per_zspage
@ 2015-07-15 23:42 ` Minchan Kim
  0 siblings, 0 replies; 8+ messages in thread
From: Minchan Kim @ 2015-07-15 23:42 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Sergey Senozhatsky, linux-kernel, linux-mm, Minchan Kim

There is no need to recalcurate pages_per_zspage in runtime.
Just use class->pages_per_zspage to avoid unnecessary runtime
overhead.

Signed-off-by: Minchan Kim <minchan@kernel.org>
---
 mm/zsmalloc.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 27b9661c8fa6..154a30e9c8a8 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1711,7 +1711,7 @@ static unsigned long zs_can_compact(struct size_class *class)
 	obj_wasted /= get_maxobj_per_zspage(class->size,
 			class->pages_per_zspage);
 
-	return obj_wasted * get_pages_per_zspage(class->size);
+	return obj_wasted * class->pages_per_zspage;
 }
 
 static void __zs_compact(struct zs_pool *pool, struct size_class *class)
-- 
1.9.1

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH] zsmalloc: use class->pages_per_zspage
  2015-07-15 23:42 ` Minchan Kim
@ 2015-07-16  0:02   ` Sergey Senozhatsky
  -1 siblings, 0 replies; 8+ messages in thread
From: Sergey Senozhatsky @ 2015-07-16  0:02 UTC (permalink / raw)
  To: Minchan Kim; +Cc: Andrew Morton, Sergey Senozhatsky, linux-kernel, linux-mm

On (07/16/15 08:42), Minchan Kim wrote:
> There is no need to recalcurate pages_per_zspage in runtime.
> Just use class->pages_per_zspage to avoid unnecessary runtime
> overhead.
> 
> Signed-off-by: Minchan Kim <minchan@kernel.org>
> ---
>  mm/zsmalloc.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index 27b9661c8fa6..154a30e9c8a8 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -1711,7 +1711,7 @@ static unsigned long zs_can_compact(struct size_class *class)
>  	obj_wasted /= get_maxobj_per_zspage(class->size,
>  			class->pages_per_zspage);
>  
> -	return obj_wasted * get_pages_per_zspage(class->size);
> +	return obj_wasted * class->pages_per_zspage;
>  }

plus __zs_compact():

@@ -1761,8 +1761,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
 
                putback_zspage(pool, class, dst_page);
                if (putback_zspage(pool, class, src_page) == ZS_EMPTY)
-                       pool->stats.pages_compacted +=
-                               get_pages_per_zspage(class->size);
+                       pool->stats.pages_compacted += class->pages_per_zspage;
                spin_unlock(&class->lock);
                cond_resched();
                spin_lock(&class->lock);

	-ss

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] zsmalloc: use class->pages_per_zspage
@ 2015-07-16  0:02   ` Sergey Senozhatsky
  0 siblings, 0 replies; 8+ messages in thread
From: Sergey Senozhatsky @ 2015-07-16  0:02 UTC (permalink / raw)
  To: Minchan Kim; +Cc: Andrew Morton, Sergey Senozhatsky, linux-kernel, linux-mm

On (07/16/15 08:42), Minchan Kim wrote:
> There is no need to recalcurate pages_per_zspage in runtime.
> Just use class->pages_per_zspage to avoid unnecessary runtime
> overhead.
> 
> Signed-off-by: Minchan Kim <minchan@kernel.org>
> ---
>  mm/zsmalloc.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index 27b9661c8fa6..154a30e9c8a8 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -1711,7 +1711,7 @@ static unsigned long zs_can_compact(struct size_class *class)
>  	obj_wasted /= get_maxobj_per_zspage(class->size,
>  			class->pages_per_zspage);
>  
> -	return obj_wasted * get_pages_per_zspage(class->size);
> +	return obj_wasted * class->pages_per_zspage;
>  }

plus __zs_compact():

@@ -1761,8 +1761,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
 
                putback_zspage(pool, class, dst_page);
                if (putback_zspage(pool, class, src_page) == ZS_EMPTY)
-                       pool->stats.pages_compacted +=
-                               get_pages_per_zspage(class->size);
+                       pool->stats.pages_compacted += class->pages_per_zspage;
                spin_unlock(&class->lock);
                cond_resched();
                spin_lock(&class->lock);

	-ss

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] zsmalloc: use class->pages_per_zspage
  2015-07-16  0:02   ` Sergey Senozhatsky
@ 2015-07-16  0:11     ` Minchan Kim
  -1 siblings, 0 replies; 8+ messages in thread
From: Minchan Kim @ 2015-07-16  0:11 UTC (permalink / raw)
  To: Sergey Senozhatsky
  Cc: Andrew Morton, Sergey Senozhatsky, linux-kernel, linux-mm

On Thu, Jul 16, 2015 at 09:02:25AM +0900, Sergey Senozhatsky wrote:
> On (07/16/15 08:42), Minchan Kim wrote:
> > There is no need to recalcurate pages_per_zspage in runtime.
> > Just use class->pages_per_zspage to avoid unnecessary runtime
> > overhead.
> > 
> > Signed-off-by: Minchan Kim <minchan@kernel.org>
> > ---
> >  mm/zsmalloc.c | 2 +-
> >  1 file changed, 1 insertion(+), 1 deletion(-)
> > 
> > diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> > index 27b9661c8fa6..154a30e9c8a8 100644
> > --- a/mm/zsmalloc.c
> > +++ b/mm/zsmalloc.c
> > @@ -1711,7 +1711,7 @@ static unsigned long zs_can_compact(struct size_class *class)
> >  	obj_wasted /= get_maxobj_per_zspage(class->size,
> >  			class->pages_per_zspage);
> >  
> > -	return obj_wasted * get_pages_per_zspage(class->size);
> > +	return obj_wasted * class->pages_per_zspage;
> >  }
> 
> plus __zs_compact():
> 
> @@ -1761,8 +1761,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
>  
>                 putback_zspage(pool, class, dst_page);
>                 if (putback_zspage(pool, class, src_page) == ZS_EMPTY)
> -                       pool->stats.pages_compacted +=
> -                               get_pages_per_zspage(class->size);
> +                       pool->stats.pages_compacted += class->pages_per_zspage;
>                 spin_unlock(&class->lock);
>                 cond_resched();
>                 spin_lock(&class->lock);
> 
> 	-ss

Thanks. I sent a v2.


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] zsmalloc: use class->pages_per_zspage
@ 2015-07-16  0:11     ` Minchan Kim
  0 siblings, 0 replies; 8+ messages in thread
From: Minchan Kim @ 2015-07-16  0:11 UTC (permalink / raw)
  To: Sergey Senozhatsky
  Cc: Andrew Morton, Sergey Senozhatsky, linux-kernel, linux-mm

On Thu, Jul 16, 2015 at 09:02:25AM +0900, Sergey Senozhatsky wrote:
> On (07/16/15 08:42), Minchan Kim wrote:
> > There is no need to recalcurate pages_per_zspage in runtime.
> > Just use class->pages_per_zspage to avoid unnecessary runtime
> > overhead.
> > 
> > Signed-off-by: Minchan Kim <minchan@kernel.org>
> > ---
> >  mm/zsmalloc.c | 2 +-
> >  1 file changed, 1 insertion(+), 1 deletion(-)
> > 
> > diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> > index 27b9661c8fa6..154a30e9c8a8 100644
> > --- a/mm/zsmalloc.c
> > +++ b/mm/zsmalloc.c
> > @@ -1711,7 +1711,7 @@ static unsigned long zs_can_compact(struct size_class *class)
> >  	obj_wasted /= get_maxobj_per_zspage(class->size,
> >  			class->pages_per_zspage);
> >  
> > -	return obj_wasted * get_pages_per_zspage(class->size);
> > +	return obj_wasted * class->pages_per_zspage;
> >  }
> 
> plus __zs_compact():
> 
> @@ -1761,8 +1761,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
>  
>                 putback_zspage(pool, class, dst_page);
>                 if (putback_zspage(pool, class, src_page) == ZS_EMPTY)
> -                       pool->stats.pages_compacted +=
> -                               get_pages_per_zspage(class->size);
> +                       pool->stats.pages_compacted += class->pages_per_zspage;
>                 spin_unlock(&class->lock);
>                 cond_resched();
>                 spin_lock(&class->lock);
> 
> 	-ss

Thanks. I sent a v2.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] zsmalloc: use class->pages_per_zspage
  2015-07-15 23:42 ` Minchan Kim
@ 2015-07-16  0:13   ` Sergey Senozhatsky
  -1 siblings, 0 replies; 8+ messages in thread
From: Sergey Senozhatsky @ 2015-07-16  0:13 UTC (permalink / raw)
  To: Minchan Kim; +Cc: Andrew Morton, Sergey Senozhatsky, linux-kernel, linux-mm

On (07/16/15 08:42), Minchan Kim wrote:
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index 27b9661c8fa6..154a30e9c8a8 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -1711,7 +1711,7 @@ static unsigned long zs_can_compact(struct size_class *class)
>  	obj_wasted /= get_maxobj_per_zspage(class->size,
>  			class->pages_per_zspage);
>  
> -	return obj_wasted * get_pages_per_zspage(class->size);
> +	return obj_wasted * class->pages_per_zspage;
>  }
>  
>  static void __zs_compact(struct zs_pool *pool, struct size_class *class)

[resending, not sure that my previous reply was delivered. connectivity
problems on my side]


plus __zs_compact():

@@ -1761,8 +1761,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
 
                putback_zspage(pool, class, dst_page);
                if (putback_zspage(pool, class, src_page) == ZS_EMPTY)
-                       pool->stats.pages_compacted +=
-                               get_pages_per_zspage(class->size);
+                       pool->stats.pages_compacted += class->pages_per_zspage;
                spin_unlock(&class->lock);
                cond_resched();
                spin_lock(&class->lock);

	-ss

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] zsmalloc: use class->pages_per_zspage
@ 2015-07-16  0:13   ` Sergey Senozhatsky
  0 siblings, 0 replies; 8+ messages in thread
From: Sergey Senozhatsky @ 2015-07-16  0:13 UTC (permalink / raw)
  To: Minchan Kim; +Cc: Andrew Morton, Sergey Senozhatsky, linux-kernel, linux-mm

On (07/16/15 08:42), Minchan Kim wrote:
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index 27b9661c8fa6..154a30e9c8a8 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -1711,7 +1711,7 @@ static unsigned long zs_can_compact(struct size_class *class)
>  	obj_wasted /= get_maxobj_per_zspage(class->size,
>  			class->pages_per_zspage);
>  
> -	return obj_wasted * get_pages_per_zspage(class->size);
> +	return obj_wasted * class->pages_per_zspage;
>  }
>  
>  static void __zs_compact(struct zs_pool *pool, struct size_class *class)

[resending, not sure that my previous reply was delivered. connectivity
problems on my side]


plus __zs_compact():

@@ -1761,8 +1761,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
 
                putback_zspage(pool, class, dst_page);
                if (putback_zspage(pool, class, src_page) == ZS_EMPTY)
-                       pool->stats.pages_compacted +=
-                               get_pages_per_zspage(class->size);
+                       pool->stats.pages_compacted += class->pages_per_zspage;
                spin_unlock(&class->lock);
                cond_resched();
                spin_lock(&class->lock);

	-ss

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2015-07-16  0:12 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-07-15 23:42 [PATCH] zsmalloc: use class->pages_per_zspage Minchan Kim
2015-07-15 23:42 ` Minchan Kim
2015-07-16  0:02 ` Sergey Senozhatsky
2015-07-16  0:02   ` Sergey Senozhatsky
2015-07-16  0:11   ` Minchan Kim
2015-07-16  0:11     ` Minchan Kim
2015-07-16  0:13 ` Sergey Senozhatsky
2015-07-16  0:13   ` Sergey Senozhatsky

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.