All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] x86: don't use destroy_xen_mappings() for vunmap()
@ 2013-07-17  7:17 Jan Beulich
  2013-07-17  7:48 ` Keir Fraser
  0 siblings, 1 reply; 2+ messages in thread
From: Jan Beulich @ 2013-07-17  7:17 UTC (permalink / raw)
  To: xen-devel; +Cc: Keir Fraser

[-- Attachment #1: Type: text/plain, Size: 1955 bytes --]

Its attempt to tear down intermediate page table levels may race with
map_pages_to_xen() establishing them, and now that
map_domain_page_global() is backed by vmap() this teardown is also
wasteful (as it's very likely to need the same address space populated
again within foreseeable time).

As the race between vmap() and vunmap(), according to the latest stage
tester logs, doesn't appear to be the only one still left, the patch
also adds logging for vmap() and vunmap() uses (there shouldn't be too
many of them, so logs shouldn't get flooded). These are supposed to
get removed (and are made stand out clearly) as soon as we're certain
that there's no issue left.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/common/vmap.c
+++ b/xen/common/vmap.c
@@ -177,6 +177,7 @@ void *__vmap(const unsigned long *mfn, u
     void *va = vm_alloc(nr * granularity, align);
     unsigned long cur = (unsigned long)va;
 
+printk("vmap(%p:%#x)\n", va, nr * granularity);//temp
     for ( ; va && nr--; ++mfn, cur += PAGE_SIZE * granularity )
     {
         if ( map_pages_to_xen(cur, *mfn, granularity, flags) )
@@ -196,9 +197,14 @@ void *vmap(const unsigned long *mfn, uns
 
 void vunmap(const void *va)
 {
+#ifndef _PAGE_NONE
     unsigned long addr = (unsigned long)va;
 
     destroy_xen_mappings(addr, addr + PAGE_SIZE * vm_size(va));
+#else /* Avoid tearing down intermediate page tables. */
+printk("vunmap(%p:%#x)\n", va, vm_size(va));//temp
+    map_pages_to_xen((unsigned long)va, 0, vm_size(va), _PAGE_NONE);
+#endif
     vm_free(va);
 }
 #endif
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -288,6 +288,7 @@ extern l1_pgentry_t l1_identmap[L1_PAGET
 void paging_init(void);
 #endif /* !defined(__ASSEMBLY__) */
 
+#define _PAGE_NONE     _AC(0x000,U)
 #define _PAGE_PRESENT  _AC(0x001,U)
 #define _PAGE_RW       _AC(0x002,U)
 #define _PAGE_USER     _AC(0x004,U)




[-- Attachment #2: vmap-no-dxm.patch --]
[-- Type: text/plain, Size: 2003 bytes --]

x86: don't use destroy_xen_mappings() for vunmap()

Its attempt to tear down intermediate page table levels may race with
map_pages_to_xen() establishing them, and now that
map_domain_page_global() is backed by vmap() this teardown is also
wasteful (as it's very likely to need the same address space populated
again within foreseeable time).

As the race between vmap() and vunmap(), according to the latest stage
tester logs, doesn't appear to be the only one still left, the patch
also adds logging for vmap() and vunmap() uses (there shouldn't be too
many of them, so logs shouldn't get flooded). These are supposed to
get removed (and are made stand out clearly) as soon as we're certain
that there's no issue left.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/common/vmap.c
+++ b/xen/common/vmap.c
@@ -177,6 +177,7 @@ void *__vmap(const unsigned long *mfn, u
     void *va = vm_alloc(nr * granularity, align);
     unsigned long cur = (unsigned long)va;
 
+printk("vmap(%p:%#x)\n", va, nr * granularity);//temp
     for ( ; va && nr--; ++mfn, cur += PAGE_SIZE * granularity )
     {
         if ( map_pages_to_xen(cur, *mfn, granularity, flags) )
@@ -196,9 +197,14 @@ void *vmap(const unsigned long *mfn, uns
 
 void vunmap(const void *va)
 {
+#ifndef _PAGE_NONE
     unsigned long addr = (unsigned long)va;
 
     destroy_xen_mappings(addr, addr + PAGE_SIZE * vm_size(va));
+#else /* Avoid tearing down intermediate page tables. */
+printk("vunmap(%p:%#x)\n", va, vm_size(va));//temp
+    map_pages_to_xen((unsigned long)va, 0, vm_size(va), _PAGE_NONE);
+#endif
     vm_free(va);
 }
 #endif
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -288,6 +288,7 @@ extern l1_pgentry_t l1_identmap[L1_PAGET
 void paging_init(void);
 #endif /* !defined(__ASSEMBLY__) */
 
+#define _PAGE_NONE     _AC(0x000,U)
 #define _PAGE_PRESENT  _AC(0x001,U)
 #define _PAGE_RW       _AC(0x002,U)
 #define _PAGE_USER     _AC(0x004,U)

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH] x86: don't use destroy_xen_mappings() for vunmap()
  2013-07-17  7:17 [PATCH] x86: don't use destroy_xen_mappings() for vunmap() Jan Beulich
@ 2013-07-17  7:48 ` Keir Fraser
  0 siblings, 0 replies; 2+ messages in thread
From: Keir Fraser @ 2013-07-17  7:48 UTC (permalink / raw)
  To: Jan Beulich, xen-devel

On 17/07/2013 08:17, "Jan Beulich" <JBeulich@suse.com> wrote:

> Its attempt to tear down intermediate page table levels may race with
> map_pages_to_xen() establishing them, and now that
> map_domain_page_global() is backed by vmap() this teardown is also
> wasteful (as it's very likely to need the same address space populated
> again within foreseeable time).
> 
> As the race between vmap() and vunmap(), according to the latest stage
> tester logs, doesn't appear to be the only one still left, the patch
> also adds logging for vmap() and vunmap() uses (there shouldn't be too
> many of them, so logs shouldn't get flooded). These are supposed to
> get removed (and are made stand out clearly) as soon as we're certain
> that there's no issue left.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Acked-by: Keir Fraser <keir@xen.org>

> --- a/xen/common/vmap.c
> +++ b/xen/common/vmap.c
> @@ -177,6 +177,7 @@ void *__vmap(const unsigned long *mfn, u
>      void *va = vm_alloc(nr * granularity, align);
>      unsigned long cur = (unsigned long)va;
>  
> +printk("vmap(%p:%#x)\n", va, nr * granularity);//temp
>      for ( ; va && nr--; ++mfn, cur += PAGE_SIZE * granularity )
>      {
>          if ( map_pages_to_xen(cur, *mfn, granularity, flags) )
> @@ -196,9 +197,14 @@ void *vmap(const unsigned long *mfn, uns
>  
>  void vunmap(const void *va)
>  {
> +#ifndef _PAGE_NONE
>      unsigned long addr = (unsigned long)va;
>  
>      destroy_xen_mappings(addr, addr + PAGE_SIZE * vm_size(va));
> +#else /* Avoid tearing down intermediate page tables. */
> +printk("vunmap(%p:%#x)\n", va, vm_size(va));//temp
> +    map_pages_to_xen((unsigned long)va, 0, vm_size(va), _PAGE_NONE);
> +#endif
>      vm_free(va);
>  }
>  #endif
> --- a/xen/include/asm-x86/page.h
> +++ b/xen/include/asm-x86/page.h
> @@ -288,6 +288,7 @@ extern l1_pgentry_t l1_identmap[L1_PAGET
>  void paging_init(void);
>  #endif /* !defined(__ASSEMBLY__) */
>  
> +#define _PAGE_NONE     _AC(0x000,U)
>  #define _PAGE_PRESENT  _AC(0x001,U)
>  #define _PAGE_RW       _AC(0x002,U)
>  #define _PAGE_USER     _AC(0x004,U)
> 
> 
> 

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2013-07-17  7:48 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-07-17  7:17 [PATCH] x86: don't use destroy_xen_mappings() for vunmap() Jan Beulich
2013-07-17  7:48 ` Keir Fraser

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.