linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] mm/mmu_notifier: init notifier if necessary
@ 2012-08-24 14:37 Wanpeng Li
  0 siblings, 0 replies; 4+ messages in thread
From: Wanpeng Li @ 2012-08-24 14:37 UTC (permalink / raw)
  To: linux-mm
  Cc: linux-kernel, Michal Hocko, KAMEZAWA Hiroyuki, Minchan Kim,
	Andrew Morton, Gavin Shan, Wanpeng Li

From: Gavin Shan <shangw@linux.vnet.ibm.com>

While registering MMU notifier, new instance of MMU notifier_mm will
be allocated and later free'd if currrent mm_struct's MMU notifier_mm
has been initialized. That cause some overhead. The patch tries to
eleminate that.

Signed-off-by: Gavin Shan <shangw@linux.vnet.ibm.com>
Signed-off-by: Wanpeng Li <liwanp@linux.vnet.ibm.com>
---
 mm/mmu_notifier.c |   22 +++++++++++-----------
 1 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 862b608..fb4067f 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -192,22 +192,23 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
 
 	BUG_ON(atomic_read(&mm->mm_users) <= 0);
 
-	ret = -ENOMEM;
-	mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
-	if (unlikely(!mmu_notifier_mm))
-		goto out;
-
 	if (take_mmap_sem)
 		down_write(&mm->mmap_sem);
 	ret = mm_take_all_locks(mm);
 	if (unlikely(ret))
-		goto out_cleanup;
+		goto out;
 
 	if (!mm_has_notifiers(mm)) {
+		mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm),
+					GFP_ATOMIC);
+		if (unlikely(!mmu_notifier_mm)) {
+			ret = -ENOMEM;
+			goto out_of_mem;
+		}
 		INIT_HLIST_HEAD(&mmu_notifier_mm->list);
 		spin_lock_init(&mmu_notifier_mm->lock);
+
 		mm->mmu_notifier_mm = mmu_notifier_mm;
-		mmu_notifier_mm = NULL;
 	}
 	atomic_inc(&mm->mm_count);
 
@@ -223,13 +224,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
 	hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
 	spin_unlock(&mm->mmu_notifier_mm->lock);
 
+out_of_mem:
 	mm_drop_all_locks(mm);
-out_cleanup:
+out:
 	if (take_mmap_sem)
 		up_write(&mm->mmap_sem);
-	/* kfree() does nothing if mmu_notifier_mm is NULL */
-	kfree(mmu_notifier_mm);
-out:
+
 	BUG_ON(atomic_read(&mm->mm_users) <= 0);
 	return ret;
 }
-- 
1.7.7.6


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/2] mm/mmu_notifier: init notifier if necessary
       [not found]   ` <50389f4d.0793b60a.1627.7710SMTPIN_ADDED@mx.google.com>
@ 2012-08-30 19:13     ` Andrew Morton
  0 siblings, 0 replies; 4+ messages in thread
From: Andrew Morton @ 2012-08-30 19:13 UTC (permalink / raw)
  To: Gavin Shan
  Cc: Wanpeng Li, linux-mm, linux-kernel, Michal Hocko,
	KAMEZAWA Hiroyuki, Minchan Kim

On Sat, 25 Aug 2012 17:47:50 +0800
Gavin Shan <shangw@linux.vnet.ibm.com> wrote:

> >> --- a/mm/mmu_notifier.c
> >> +++ b/mm/mmu_notifier.c
> >> @@ -192,22 +192,23 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
> >>  
> >>  	BUG_ON(atomic_read(&mm->mm_users) <= 0);
> >>  
> >> -	ret = -ENOMEM;
> >> -	mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
> >> -	if (unlikely(!mmu_notifier_mm))
> >> -		goto out;
> >> -
> >>  	if (take_mmap_sem)
> >>  		down_write(&mm->mmap_sem);
> >>  	ret = mm_take_all_locks(mm);
> >>  	if (unlikely(ret))
> >> -		goto out_cleanup;
> >> +		goto out;
> >>  
> >>  	if (!mm_has_notifiers(mm)) {
> >> +		mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm),
> >> +					GFP_ATOMIC);
> >
> >Why was the code switched to the far weaker GFP_ATOMIC?  We can still
> >perform sleeping allocations inside mmap_sem.
> >
> 
> Yes, we can perform sleeping while allocating memory, but we're holding
> the "mmap_sem". GFP_KERNEL possiblly block somebody else who also waits
> on mmap_sem for long time even though the case should be rare :-)

GFP_ATOMIC allocations are unreliable.  If the allocation attempt fails
here, an entire kernel subsystem will have failed, quite probably
requiring a reboot.  It's a bad tradeoff.

Please fix this and retest.  With lockdep enabled, of course.

And please do not attempt to sneak changes like this into the kernel
without even mentioning them in the changelog.  If I hadn't have
happened to notice this, we'd have ended up with a less reliable
kernel.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/2] mm/mmu_notifier: init notifier if necessary
  2012-08-24 14:37 Wanpeng Li
@ 2012-08-24 21:51 ` Andrew Morton
       [not found]   ` <50389f4d.0793b60a.1627.7710SMTPIN_ADDED@mx.google.com>
  0 siblings, 1 reply; 4+ messages in thread
From: Andrew Morton @ 2012-08-24 21:51 UTC (permalink / raw)
  To: Wanpeng Li
  Cc: linux-mm, linux-kernel, Michal Hocko, KAMEZAWA Hiroyuki,
	Minchan Kim, Gavin Shan

On Fri, 24 Aug 2012 22:37:55 +0800
Wanpeng Li <liwanp@linux.vnet.ibm.com> wrote:

> From: Gavin Shan <shangw@linux.vnet.ibm.com>
> 
> While registering MMU notifier, new instance of MMU notifier_mm will
> be allocated and later free'd if currrent mm_struct's MMU notifier_mm
> has been initialized. That cause some overhead. The patch tries to
> eleminate that.
> 
> Signed-off-by: Gavin Shan <shangw@linux.vnet.ibm.com>
> Signed-off-by: Wanpeng Li <liwanp@linux.vnet.ibm.com>
> ---
>  mm/mmu_notifier.c |   22 +++++++++++-----------
>  1 files changed, 11 insertions(+), 11 deletions(-)
> 
> diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
> index 862b608..fb4067f 100644
> --- a/mm/mmu_notifier.c
> +++ b/mm/mmu_notifier.c
> @@ -192,22 +192,23 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
>  
>  	BUG_ON(atomic_read(&mm->mm_users) <= 0);
>  
> -	ret = -ENOMEM;
> -	mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
> -	if (unlikely(!mmu_notifier_mm))
> -		goto out;
> -
>  	if (take_mmap_sem)
>  		down_write(&mm->mmap_sem);
>  	ret = mm_take_all_locks(mm);
>  	if (unlikely(ret))
> -		goto out_cleanup;
> +		goto out;
>  
>  	if (!mm_has_notifiers(mm)) {
> +		mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm),
> +					GFP_ATOMIC);

Why was the code switched to the far weaker GFP_ATOMIC?  We can still
perform sleeping allocations inside mmap_sem.

> +		if (unlikely(!mmu_notifier_mm)) {
> +			ret = -ENOMEM;
> +			goto out_of_mem;
> +		}
>  		INIT_HLIST_HEAD(&mmu_notifier_mm->list);
>  		spin_lock_init(&mmu_notifier_mm->lock);
> +
>  		mm->mmu_notifier_mm = mmu_notifier_mm;
> -		mmu_notifier_mm = NULL;
>  	}
>  	atomic_inc(&mm->mm_count);
>  


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 1/2] mm/mmu_notifier: init notifier if necessary
@ 2012-08-24 14:37 Wanpeng Li
  2012-08-24 21:51 ` Andrew Morton
  0 siblings, 1 reply; 4+ messages in thread
From: Wanpeng Li @ 2012-08-24 14:37 UTC (permalink / raw)
  To: linux-mm
  Cc: linux-kernel, Michal Hocko, KAMEZAWA Hiroyuki, Minchan Kim,
	Andrew Morton, Gavin Shan, Wanpeng Li

From: Gavin Shan <shangw@linux.vnet.ibm.com>

While registering MMU notifier, new instance of MMU notifier_mm will
be allocated and later free'd if currrent mm_struct's MMU notifier_mm
has been initialized. That cause some overhead. The patch tries to
eleminate that.

Signed-off-by: Gavin Shan <shangw@linux.vnet.ibm.com>
Signed-off-by: Wanpeng Li <liwanp@linux.vnet.ibm.com>
---
 mm/mmu_notifier.c |   22 +++++++++++-----------
 1 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 862b608..fb4067f 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -192,22 +192,23 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
 
 	BUG_ON(atomic_read(&mm->mm_users) <= 0);
 
-	ret = -ENOMEM;
-	mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
-	if (unlikely(!mmu_notifier_mm))
-		goto out;
-
 	if (take_mmap_sem)
 		down_write(&mm->mmap_sem);
 	ret = mm_take_all_locks(mm);
 	if (unlikely(ret))
-		goto out_cleanup;
+		goto out;
 
 	if (!mm_has_notifiers(mm)) {
+		mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm),
+					GFP_ATOMIC);
+		if (unlikely(!mmu_notifier_mm)) {
+			ret = -ENOMEM;
+			goto out_of_mem;
+		}
 		INIT_HLIST_HEAD(&mmu_notifier_mm->list);
 		spin_lock_init(&mmu_notifier_mm->lock);
+
 		mm->mmu_notifier_mm = mmu_notifier_mm;
-		mmu_notifier_mm = NULL;
 	}
 	atomic_inc(&mm->mm_count);
 
@@ -223,13 +224,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
 	hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
 	spin_unlock(&mm->mmu_notifier_mm->lock);
 
+out_of_mem:
 	mm_drop_all_locks(mm);
-out_cleanup:
+out:
 	if (take_mmap_sem)
 		up_write(&mm->mmap_sem);
-	/* kfree() does nothing if mmu_notifier_mm is NULL */
-	kfree(mmu_notifier_mm);
-out:
+
 	BUG_ON(atomic_read(&mm->mm_users) <= 0);
 	return ret;
 }
-- 
1.7.7.6


^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2012-08-30 19:13 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-08-24 14:37 [PATCH 1/2] mm/mmu_notifier: init notifier if necessary Wanpeng Li
2012-08-24 14:37 Wanpeng Li
2012-08-24 21:51 ` Andrew Morton
     [not found]   ` <50389f4d.0793b60a.1627.7710SMTPIN_ADDED@mx.google.com>
2012-08-30 19:13     ` Andrew Morton

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).