All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] x86: saving vmcore with non-lazy freeing of vmas
@ 2010-09-16 16:44 ` Cliff Wickman
  0 siblings, 0 replies; 3+ messages in thread
From: Cliff Wickman @ 2010-09-16 16:44 UTC (permalink / raw)
  To: kexec, mingo; +Cc: linux-kernel

From: Cliff Wickman <cpw@sgi.com>

During the reading of /proc/vmcore the kernel is doing ioremap()/iounmap()
repeatedly. And the buildup of un-flushed vm_area_struct's is causing
a great deal of overhead. (rb_next() is chewing up most of that time).

This solution is to provide function set_iounmap_nonlazy(). It causes a
subsequent call to iounmap() to immediately purge the vma area (with
try_purge_vmap_area_lazy()).

With this patch we have seen the time for writing a 250MB compressed dump
drop from 71 seconds to 44 seconds.

Diffed against 2.6.36-rc3

Signed-off-by: Cliff Wickman <cpw@sgi.com>

---
 arch/x86/include/asm/io.h       |    1 +
 arch/x86/kernel/crash_dump_64.c |    1 +
 mm/vmalloc.c                    |    9 +++++++++
 3 files changed, 11 insertions(+)

Index: linus.current/arch/x86/kernel/crash_dump_64.c
===================================================================
--- linus.current.orig/arch/x86/kernel/crash_dump_64.c
+++ linus.current/arch/x86/kernel/crash_dump_64.c
@@ -46,6 +46,7 @@ ssize_t copy_oldmem_page(unsigned long p
 	} else
 		memcpy(buf, vaddr + offset, csize);
 
+	set_iounmap_nonlazy();
 	iounmap(vaddr);
 	return csize;
 }
Index: linus.current/mm/vmalloc.c
===================================================================
--- linus.current.orig/mm/vmalloc.c
+++ linus.current/mm/vmalloc.c
@@ -517,6 +517,15 @@ static atomic_t vmap_lazy_nr = ATOMIC_IN
 static void purge_fragmented_blocks_allcpus(void);
 
 /*
+ * called before a call to iounmap() if the caller wants vm_area_struct's
+ * immediately freed.
+ */
+void set_iounmap_nonlazy(void)
+{
+	atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
+}
+
+/*
  * Purges all lazily-freed vmap areas.
  *
  * If sync is 0 then don't purge if there is already a purge in progress.
Index: linus.current/arch/x86/include/asm/io.h
===================================================================
--- linus.current.orig/arch/x86/include/asm/io.h
+++ linus.current/arch/x86/include/asm/io.h
@@ -206,6 +206,7 @@ static inline void __iomem *ioremap(reso
 
 extern void iounmap(volatile void __iomem *addr);
 
+extern void set_iounmap_nonlazy(void);
 
 #ifdef __KERNEL__
 

^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH] x86: saving vmcore with non-lazy freeing of vmas
@ 2010-09-16 16:44 ` Cliff Wickman
  0 siblings, 0 replies; 3+ messages in thread
From: Cliff Wickman @ 2010-09-16 16:44 UTC (permalink / raw)
  To: kexec, mingo; +Cc: linux-kernel

From: Cliff Wickman <cpw@sgi.com>

During the reading of /proc/vmcore the kernel is doing ioremap()/iounmap()
repeatedly. And the buildup of un-flushed vm_area_struct's is causing
a great deal of overhead. (rb_next() is chewing up most of that time).

This solution is to provide function set_iounmap_nonlazy(). It causes a
subsequent call to iounmap() to immediately purge the vma area (with
try_purge_vmap_area_lazy()).

With this patch we have seen the time for writing a 250MB compressed dump
drop from 71 seconds to 44 seconds.

Diffed against 2.6.36-rc3

Signed-off-by: Cliff Wickman <cpw@sgi.com>

---
 arch/x86/include/asm/io.h       |    1 +
 arch/x86/kernel/crash_dump_64.c |    1 +
 mm/vmalloc.c                    |    9 +++++++++
 3 files changed, 11 insertions(+)

Index: linus.current/arch/x86/kernel/crash_dump_64.c
===================================================================
--- linus.current.orig/arch/x86/kernel/crash_dump_64.c
+++ linus.current/arch/x86/kernel/crash_dump_64.c
@@ -46,6 +46,7 @@ ssize_t copy_oldmem_page(unsigned long p
 	} else
 		memcpy(buf, vaddr + offset, csize);
 
+	set_iounmap_nonlazy();
 	iounmap(vaddr);
 	return csize;
 }
Index: linus.current/mm/vmalloc.c
===================================================================
--- linus.current.orig/mm/vmalloc.c
+++ linus.current/mm/vmalloc.c
@@ -517,6 +517,15 @@ static atomic_t vmap_lazy_nr = ATOMIC_IN
 static void purge_fragmented_blocks_allcpus(void);
 
 /*
+ * called before a call to iounmap() if the caller wants vm_area_struct's
+ * immediately freed.
+ */
+void set_iounmap_nonlazy(void)
+{
+	atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
+}
+
+/*
  * Purges all lazily-freed vmap areas.
  *
  * If sync is 0 then don't purge if there is already a purge in progress.
Index: linus.current/arch/x86/include/asm/io.h
===================================================================
--- linus.current.orig/arch/x86/include/asm/io.h
+++ linus.current/arch/x86/include/asm/io.h
@@ -206,6 +206,7 @@ static inline void __iomem *ioremap(reso
 
 extern void iounmap(volatile void __iomem *addr);
 
+extern void set_iounmap_nonlazy(void);
 
 #ifdef __KERNEL__
 

_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

^ permalink raw reply	[flat|nested] 3+ messages in thread

* [tip:x86/mm] mm, x86: Saving vmcore with non-lazy freeing of vmas
  2010-09-16 16:44 ` Cliff Wickman
  (?)
@ 2010-09-17  8:29 ` tip-bot for Cliff Wickman
  -1 siblings, 0 replies; 3+ messages in thread
From: tip-bot for Cliff Wickman @ 2010-09-17  8:29 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, akpm, stable, cpw, tglx, mingo

Commit-ID:  3ee48b6af49cf534ca2f481ecc484b156a41451d
Gitweb:     http://git.kernel.org/tip/3ee48b6af49cf534ca2f481ecc484b156a41451d
Author:     Cliff Wickman <cpw@sgi.com>
AuthorDate: Thu, 16 Sep 2010 11:44:02 -0500
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Fri, 17 Sep 2010 09:11:56 +0200

mm, x86: Saving vmcore with non-lazy freeing of vmas

During the reading of /proc/vmcore the kernel is doing
ioremap()/iounmap() repeatedly. And the buildup of un-flushed
vm_area_struct's is causing a great deal of overhead. (rb_next()
is chewing up most of that time).

This solution is to provide function set_iounmap_nonlazy(). It
causes a subsequent call to iounmap() to immediately purge the
vma area (with try_purge_vmap_area_lazy()).

With this patch we have seen the time for writing a 250MB
compressed dump drop from 71 seconds to 44 seconds.

Signed-off-by: Cliff Wickman <cpw@sgi.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: kexec@lists.infradead.org
Cc: <stable@kernel.org>
LKML-Reference: <E1OwHZ4-0005WK-Tw@eag09.americas.sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
 arch/x86/include/asm/io.h       |    1 +
 arch/x86/kernel/crash_dump_64.c |    1 +
 mm/vmalloc.c                    |    9 +++++++++
 3 files changed, 11 insertions(+), 0 deletions(-)

diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 30a3e97..6a45ec4 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -206,6 +206,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
 
 extern void iounmap(volatile void __iomem *addr);
 
+extern void set_iounmap_nonlazy(void);
 
 #ifdef __KERNEL__
 
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
index bf43188..9948288 100644
--- a/arch/x86/kernel/crash_dump_64.c
+++ b/arch/x86/kernel/crash_dump_64.c
@@ -46,6 +46,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
 	} else
 		memcpy(buf, vaddr + offset, csize);
 
+	set_iounmap_nonlazy();
 	iounmap(vaddr);
 	return csize;
 }
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 6b8889d..d8087f0 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -517,6 +517,15 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
 static void purge_fragmented_blocks_allcpus(void);
 
 /*
+ * called before a call to iounmap() if the caller wants vm_area_struct's
+ * immediately freed.
+ */
+void set_iounmap_nonlazy(void)
+{
+	atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
+}
+
+/*
  * Purges all lazily-freed vmap areas.
  *
  * If sync is 0 then don't purge if there is already a purge in progress.

^ permalink raw reply related	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2010-09-17  8:29 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-09-16 16:44 [PATCH] x86: saving vmcore with non-lazy freeing of vmas Cliff Wickman
2010-09-16 16:44 ` Cliff Wickman
2010-09-17  8:29 ` [tip:x86/mm] mm, x86: Saving " tip-bot for Cliff Wickman

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.