All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] use clear_domain_page() instead of open coding it
@ 2015-10-19 14:51 Jan Beulich
  2015-10-20  9:53 ` Andrew Cooper
                   ` (3 more replies)
  0 siblings, 4 replies; 8+ messages in thread
From: Jan Beulich @ 2015-10-19 14:51 UTC (permalink / raw)
  To: xen-devel
  Cc: Kevin Tian, Keir Fraser, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Ian Campbell, Jun Nakajima

[-- Attachment #1: Type: text/plain, Size: 4956 bytes --]

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -552,8 +552,7 @@ void stdvga_init(struct domain *d)
 {
     struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
     struct page_info *pg;
-    void *p;
-    int i;
+    unsigned int i;
 
     memset(s, 0, sizeof(*s));
     spin_lock_init(&s->lock);
@@ -564,9 +563,7 @@ void stdvga_init(struct domain *d)
         if ( pg == NULL )
             break;
         s->vram_page[i] = pg;
-        p = __map_domain_page(pg);
-        clear_page(p);
-        unmap_domain_page(p);
+        clear_domain_page(_mfn(page_to_mfn(pg)));
     }
 
     if ( i == ARRAY_SIZE(s->vram_page) )
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -68,7 +68,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
     if ( cpu_has_vmx_vmcs_shadowing )
     {
         struct page_info *vmread_bitmap, *vmwrite_bitmap;
-        unsigned long *vr, *vw;
+        unsigned long *vw;
 
         vmread_bitmap = alloc_domheap_page(NULL, 0);
         if ( !vmread_bitmap )
@@ -78,6 +78,8 @@ int nvmx_vcpu_initialise(struct vcpu *v)
         }
         v->arch.hvm_vmx.vmread_bitmap = vmread_bitmap;
 
+        clear_domain_page(_mfn(page_to_mfn(vmread_bitmap)));
+
         vmwrite_bitmap = alloc_domheap_page(NULL, 0);
         if ( !vmwrite_bitmap )
         {
@@ -86,10 +88,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
         }
         v->arch.hvm_vmx.vmwrite_bitmap = vmwrite_bitmap;
 
-        vr = __map_domain_page(vmread_bitmap);
         vw = __map_domain_page(vmwrite_bitmap);
-
-        clear_page(vr);
         clear_page(vw);
 
         /*
@@ -101,7 +100,6 @@ int nvmx_vcpu_initialise(struct vcpu *v)
         set_bit(IO_BITMAP_B, vw);
         set_bit(VMCS_HIGH(IO_BITMAP_B), vw);
 
-        unmap_domain_page(vr);
         unmap_domain_page(vw);
     }
 
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1907,7 +1907,7 @@ p2m_flush_table(struct p2m_domain *p2m)
 {
     struct page_info *top, *pg;
     struct domain *d = p2m->domain;
-    void *p;
+    mfn_t mfn;
 
     p2m_lock(p2m);
 
@@ -1928,15 +1928,14 @@ p2m_flush_table(struct p2m_domain *p2m)
     p2m->np2m_base = P2M_BASE_EADDR;
     
     /* Zap the top level of the trie */
-    top = mfn_to_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
-    p = __map_domain_page(top);
-    clear_page(p);
-    unmap_domain_page(p);
+    mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
+    clear_domain_page(mfn);
 
     /* Make sure nobody else is using this p2m table */
     nestedhvm_vmcx_flushtlb(p2m);
 
     /* Free the rest of the trie pages back to the paging pool */
+    top = mfn_to_page(mfn);
     while ( (pg = page_list_remove_head(&p2m->pages)) )
         if ( pg != top ) 
             d->arch.paging.free_page(d, pg);
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -78,12 +78,10 @@ static mfn_t paging_new_log_dirty_page(s
 static mfn_t paging_new_log_dirty_leaf(struct domain *d)
 {
     mfn_t mfn = paging_new_log_dirty_page(d);
+
     if ( mfn_valid(mfn) )
-    {
-        void *leaf = map_domain_page(mfn);
-        clear_page(leaf);
-        unmap_domain_page(leaf);
-    }
+        clear_domain_page(mfn);
+
     return mfn;
 }
 
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1437,8 +1437,7 @@ mfn_t shadow_alloc(struct domain *d,
     unsigned int pages = shadow_size(shadow_type);
     struct page_list_head tmp_list;
     cpumask_t mask;
-    void *p;
-    int i;
+    unsigned int i;
 
     ASSERT(paging_locked_by_me(d));
     ASSERT(shadow_type != SH_type_none);
@@ -1484,10 +1483,7 @@ mfn_t shadow_alloc(struct domain *d,
             flush_tlb_mask(&mask);
         }
         /* Now safe to clear the page for reuse */
-        p = __map_domain_page(sp);
-        ASSERT(p != NULL);
-        clear_page(p);
-        unmap_domain_page(p);
+        clear_domain_page(page_to_mfn(sp));
         INIT_PAGE_LIST_ENTRY(&sp->list);
         page_list_add(sp, &tmp_list);
         sp->u.sh.type = shadow_type;
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1959,22 +1959,16 @@ __initcall(pagealloc_keyhandler_init);
 
 void scrub_one_page(struct page_info *pg)
 {
-    void *p;
-
     if ( unlikely(pg->count_info & PGC_broken) )
         return;
 
-    p = __map_domain_page(pg);
-
 #ifndef NDEBUG
     /* Avoid callers relying on allocations returning zeroed pages. */
-    memset(p, 0xc2, PAGE_SIZE);
+    unmap_domain_page(memset(__map_domain_page(pg), 0xc2, PAGE_SIZE));
 #else
     /* For a production build, clear_page() is the fastest way to scrub. */
-    clear_page(p);
+    clear_domain_page(_mfn(page_to_mfn(pg)));
 #endif
-
-    unmap_domain_page(p);
 }
 
 static void dump_heap(unsigned char key)



[-- Attachment #2: use-clear_domain_page.patch --]
[-- Type: text/plain, Size: 5005 bytes --]

use clear_domain_page() instead of open coding it

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -552,8 +552,7 @@ void stdvga_init(struct domain *d)
 {
     struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
     struct page_info *pg;
-    void *p;
-    int i;
+    unsigned int i;
 
     memset(s, 0, sizeof(*s));
     spin_lock_init(&s->lock);
@@ -564,9 +563,7 @@ void stdvga_init(struct domain *d)
         if ( pg == NULL )
             break;
         s->vram_page[i] = pg;
-        p = __map_domain_page(pg);
-        clear_page(p);
-        unmap_domain_page(p);
+        clear_domain_page(_mfn(page_to_mfn(pg)));
     }
 
     if ( i == ARRAY_SIZE(s->vram_page) )
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -68,7 +68,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
     if ( cpu_has_vmx_vmcs_shadowing )
     {
         struct page_info *vmread_bitmap, *vmwrite_bitmap;
-        unsigned long *vr, *vw;
+        unsigned long *vw;
 
         vmread_bitmap = alloc_domheap_page(NULL, 0);
         if ( !vmread_bitmap )
@@ -78,6 +78,8 @@ int nvmx_vcpu_initialise(struct vcpu *v)
         }
         v->arch.hvm_vmx.vmread_bitmap = vmread_bitmap;
 
+        clear_domain_page(_mfn(page_to_mfn(vmread_bitmap)));
+
         vmwrite_bitmap = alloc_domheap_page(NULL, 0);
         if ( !vmwrite_bitmap )
         {
@@ -86,10 +88,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
         }
         v->arch.hvm_vmx.vmwrite_bitmap = vmwrite_bitmap;
 
-        vr = __map_domain_page(vmread_bitmap);
         vw = __map_domain_page(vmwrite_bitmap);
-
-        clear_page(vr);
         clear_page(vw);
 
         /*
@@ -101,7 +100,6 @@ int nvmx_vcpu_initialise(struct vcpu *v)
         set_bit(IO_BITMAP_B, vw);
         set_bit(VMCS_HIGH(IO_BITMAP_B), vw);
 
-        unmap_domain_page(vr);
         unmap_domain_page(vw);
     }
 
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1907,7 +1907,7 @@ p2m_flush_table(struct p2m_domain *p2m)
 {
     struct page_info *top, *pg;
     struct domain *d = p2m->domain;
-    void *p;
+    mfn_t mfn;
 
     p2m_lock(p2m);
 
@@ -1928,15 +1928,14 @@ p2m_flush_table(struct p2m_domain *p2m)
     p2m->np2m_base = P2M_BASE_EADDR;
     
     /* Zap the top level of the trie */
-    top = mfn_to_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
-    p = __map_domain_page(top);
-    clear_page(p);
-    unmap_domain_page(p);
+    mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
+    clear_domain_page(mfn);
 
     /* Make sure nobody else is using this p2m table */
     nestedhvm_vmcx_flushtlb(p2m);
 
     /* Free the rest of the trie pages back to the paging pool */
+    top = mfn_to_page(mfn);
     while ( (pg = page_list_remove_head(&p2m->pages)) )
         if ( pg != top ) 
             d->arch.paging.free_page(d, pg);
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -78,12 +78,10 @@ static mfn_t paging_new_log_dirty_page(s
 static mfn_t paging_new_log_dirty_leaf(struct domain *d)
 {
     mfn_t mfn = paging_new_log_dirty_page(d);
+
     if ( mfn_valid(mfn) )
-    {
-        void *leaf = map_domain_page(mfn);
-        clear_page(leaf);
-        unmap_domain_page(leaf);
-    }
+        clear_domain_page(mfn);
+
     return mfn;
 }
 
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1437,8 +1437,7 @@ mfn_t shadow_alloc(struct domain *d,
     unsigned int pages = shadow_size(shadow_type);
     struct page_list_head tmp_list;
     cpumask_t mask;
-    void *p;
-    int i;
+    unsigned int i;
 
     ASSERT(paging_locked_by_me(d));
     ASSERT(shadow_type != SH_type_none);
@@ -1484,10 +1483,7 @@ mfn_t shadow_alloc(struct domain *d,
             flush_tlb_mask(&mask);
         }
         /* Now safe to clear the page for reuse */
-        p = __map_domain_page(sp);
-        ASSERT(p != NULL);
-        clear_page(p);
-        unmap_domain_page(p);
+        clear_domain_page(page_to_mfn(sp));
         INIT_PAGE_LIST_ENTRY(&sp->list);
         page_list_add(sp, &tmp_list);
         sp->u.sh.type = shadow_type;
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1959,22 +1959,16 @@ __initcall(pagealloc_keyhandler_init);
 
 void scrub_one_page(struct page_info *pg)
 {
-    void *p;
-
     if ( unlikely(pg->count_info & PGC_broken) )
         return;
 
-    p = __map_domain_page(pg);
-
 #ifndef NDEBUG
     /* Avoid callers relying on allocations returning zeroed pages. */
-    memset(p, 0xc2, PAGE_SIZE);
+    unmap_domain_page(memset(__map_domain_page(pg), 0xc2, PAGE_SIZE));
 #else
     /* For a production build, clear_page() is the fastest way to scrub. */
-    clear_page(p);
+    clear_domain_page(_mfn(page_to_mfn(pg)));
 #endif
-
-    unmap_domain_page(p);
 }
 
 static void dump_heap(unsigned char key)

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] use clear_domain_page() instead of open coding it
  2015-10-19 14:51 [PATCH] use clear_domain_page() instead of open coding it Jan Beulich
@ 2015-10-20  9:53 ` Andrew Cooper
  2015-10-20  9:53 ` George Dunlap
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 8+ messages in thread
From: Andrew Cooper @ 2015-10-20  9:53 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Kevin Tian, Keir Fraser, George Dunlap, Tim Deegan, Ian Jackson,
	Ian Campbell, Jun Nakajima

On 19/10/15 15:51, Jan Beulich wrote:
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] use clear_domain_page() instead of open coding it
  2015-10-19 14:51 [PATCH] use clear_domain_page() instead of open coding it Jan Beulich
  2015-10-20  9:53 ` Andrew Cooper
@ 2015-10-20  9:53 ` George Dunlap
  2015-10-26 13:06 ` Ping: " Jan Beulich
  2015-10-29  1:13 ` Tian, Kevin
  3 siblings, 0 replies; 8+ messages in thread
From: George Dunlap @ 2015-10-20  9:53 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Kevin Tian, Keir Fraser, George Dunlap, Andrew Cooper,
	Ian Jackson, Tim Deegan, Ian Campbell, Jun Nakajima

On 19/10/15 15:51, Jan Beulich wrote:
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

mm bits:

Acked-by: George Dunlap <george.dunlap@citrix.com>

> 
> --- a/xen/arch/x86/hvm/stdvga.c
> +++ b/xen/arch/x86/hvm/stdvga.c
> @@ -552,8 +552,7 @@ void stdvga_init(struct domain *d)
>  {
>      struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
>      struct page_info *pg;
> -    void *p;
> -    int i;
> +    unsigned int i;
>  
>      memset(s, 0, sizeof(*s));
>      spin_lock_init(&s->lock);
> @@ -564,9 +563,7 @@ void stdvga_init(struct domain *d)
>          if ( pg == NULL )
>              break;
>          s->vram_page[i] = pg;
> -        p = __map_domain_page(pg);
> -        clear_page(p);
> -        unmap_domain_page(p);
> +        clear_domain_page(_mfn(page_to_mfn(pg)));
>      }
>  
>      if ( i == ARRAY_SIZE(s->vram_page) )
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -68,7 +68,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
>      if ( cpu_has_vmx_vmcs_shadowing )
>      {
>          struct page_info *vmread_bitmap, *vmwrite_bitmap;
> -        unsigned long *vr, *vw;
> +        unsigned long *vw;
>  
>          vmread_bitmap = alloc_domheap_page(NULL, 0);
>          if ( !vmread_bitmap )
> @@ -78,6 +78,8 @@ int nvmx_vcpu_initialise(struct vcpu *v)
>          }
>          v->arch.hvm_vmx.vmread_bitmap = vmread_bitmap;
>  
> +        clear_domain_page(_mfn(page_to_mfn(vmread_bitmap)));
> +
>          vmwrite_bitmap = alloc_domheap_page(NULL, 0);
>          if ( !vmwrite_bitmap )
>          {
> @@ -86,10 +88,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
>          }
>          v->arch.hvm_vmx.vmwrite_bitmap = vmwrite_bitmap;
>  
> -        vr = __map_domain_page(vmread_bitmap);
>          vw = __map_domain_page(vmwrite_bitmap);
> -
> -        clear_page(vr);
>          clear_page(vw);
>  
>          /*
> @@ -101,7 +100,6 @@ int nvmx_vcpu_initialise(struct vcpu *v)
>          set_bit(IO_BITMAP_B, vw);
>          set_bit(VMCS_HIGH(IO_BITMAP_B), vw);
>  
> -        unmap_domain_page(vr);
>          unmap_domain_page(vw);
>      }
>  
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1907,7 +1907,7 @@ p2m_flush_table(struct p2m_domain *p2m)
>  {
>      struct page_info *top, *pg;
>      struct domain *d = p2m->domain;
> -    void *p;
> +    mfn_t mfn;
>  
>      p2m_lock(p2m);
>  
> @@ -1928,15 +1928,14 @@ p2m_flush_table(struct p2m_domain *p2m)
>      p2m->np2m_base = P2M_BASE_EADDR;
>      
>      /* Zap the top level of the trie */
> -    top = mfn_to_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
> -    p = __map_domain_page(top);
> -    clear_page(p);
> -    unmap_domain_page(p);
> +    mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
> +    clear_domain_page(mfn);
>  
>      /* Make sure nobody else is using this p2m table */
>      nestedhvm_vmcx_flushtlb(p2m);
>  
>      /* Free the rest of the trie pages back to the paging pool */
> +    top = mfn_to_page(mfn);
>      while ( (pg = page_list_remove_head(&p2m->pages)) )
>          if ( pg != top ) 
>              d->arch.paging.free_page(d, pg);
> --- a/xen/arch/x86/mm/paging.c
> +++ b/xen/arch/x86/mm/paging.c
> @@ -78,12 +78,10 @@ static mfn_t paging_new_log_dirty_page(s
>  static mfn_t paging_new_log_dirty_leaf(struct domain *d)
>  {
>      mfn_t mfn = paging_new_log_dirty_page(d);
> +
>      if ( mfn_valid(mfn) )
> -    {
> -        void *leaf = map_domain_page(mfn);
> -        clear_page(leaf);
> -        unmap_domain_page(leaf);
> -    }
> +        clear_domain_page(mfn);
> +
>      return mfn;
>  }
>  
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -1437,8 +1437,7 @@ mfn_t shadow_alloc(struct domain *d,
>      unsigned int pages = shadow_size(shadow_type);
>      struct page_list_head tmp_list;
>      cpumask_t mask;
> -    void *p;
> -    int i;
> +    unsigned int i;
>  
>      ASSERT(paging_locked_by_me(d));
>      ASSERT(shadow_type != SH_type_none);
> @@ -1484,10 +1483,7 @@ mfn_t shadow_alloc(struct domain *d,
>              flush_tlb_mask(&mask);
>          }
>          /* Now safe to clear the page for reuse */
> -        p = __map_domain_page(sp);
> -        ASSERT(p != NULL);
> -        clear_page(p);
> -        unmap_domain_page(p);
> +        clear_domain_page(page_to_mfn(sp));
>          INIT_PAGE_LIST_ENTRY(&sp->list);
>          page_list_add(sp, &tmp_list);
>          sp->u.sh.type = shadow_type;
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -1959,22 +1959,16 @@ __initcall(pagealloc_keyhandler_init);
>  
>  void scrub_one_page(struct page_info *pg)
>  {
> -    void *p;
> -
>      if ( unlikely(pg->count_info & PGC_broken) )
>          return;
>  
> -    p = __map_domain_page(pg);
> -
>  #ifndef NDEBUG
>      /* Avoid callers relying on allocations returning zeroed pages. */
> -    memset(p, 0xc2, PAGE_SIZE);
> +    unmap_domain_page(memset(__map_domain_page(pg), 0xc2, PAGE_SIZE));
>  #else
>      /* For a production build, clear_page() is the fastest way to scrub. */
> -    clear_page(p);
> +    clear_domain_page(_mfn(page_to_mfn(pg)));
>  #endif
> -
> -    unmap_domain_page(p);
>  }
>  
>  static void dump_heap(unsigned char key)
> 
> 

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Ping: [PATCH] use clear_domain_page() instead of open coding it
  2015-10-19 14:51 [PATCH] use clear_domain_page() instead of open coding it Jan Beulich
  2015-10-20  9:53 ` Andrew Cooper
  2015-10-20  9:53 ` George Dunlap
@ 2015-10-26 13:06 ` Jan Beulich
  2015-10-26 15:47   ` Ian Jackson
  2015-11-02 13:50   ` Ian Campbell
  2015-10-29  1:13 ` Tian, Kevin
  3 siblings, 2 replies; 8+ messages in thread
From: Jan Beulich @ 2015-10-26 13:06 UTC (permalink / raw)
  To: Ian Campbell, Ian Jackson, Keir Fraser, Tim Deegan
  Cc: George Dunlap, Andrew Cooper, Kevin Tian, Jun Nakajima, xen-devel

>>> On 19.10.15 at 16:51, <JBeulich@suse.com> wrote:

"REST" maintainers, could I please get an ack or otherwise on this
common code change?

Thanks, Jan

> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -1959,22 +1959,16 @@ __initcall(pagealloc_keyhandler_init);
>  
>  void scrub_one_page(struct page_info *pg)
>  {
> -    void *p;
> -
>      if ( unlikely(pg->count_info & PGC_broken) )
>          return;
>  
> -    p = __map_domain_page(pg);
> -
>  #ifndef NDEBUG
>      /* Avoid callers relying on allocations returning zeroed pages. */
> -    memset(p, 0xc2, PAGE_SIZE);
> +    unmap_domain_page(memset(__map_domain_page(pg), 0xc2, PAGE_SIZE));
>  #else
>      /* For a production build, clear_page() is the fastest way to scrub. */
> -    clear_page(p);
> +    clear_domain_page(_mfn(page_to_mfn(pg)));
>  #endif
> -
> -    unmap_domain_page(p);
>  }
>  
>  static void dump_heap(unsigned char key)

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: Ping: [PATCH] use clear_domain_page() instead of open coding it
  2015-10-26 13:06 ` Ping: " Jan Beulich
@ 2015-10-26 15:47   ` Ian Jackson
  2015-10-26 15:59     ` Jan Beulich
  2015-11-02 13:50   ` Ian Campbell
  1 sibling, 1 reply; 8+ messages in thread
From: Ian Jackson @ 2015-10-26 15:47 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Kevin Tian, Keir Fraser, George Dunlap, Andrew Cooper,
	Tim Deegan, Ian Campbell, Jun Nakajima, xen-devel

Jan Beulich writes ("Ping: [PATCH] use clear_domain_page() instead of open coding it"):
> "REST" maintainers, could I please get an ack or otherwise on this
> common code change?

I'm not qualified to review this, but I would have thought an ack from
George should be sufficient ?

(Also is your patch "no functional change" ?  If so it helps IMO to
say so.)

Thanks,
Ian.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: Ping: [PATCH] use clear_domain_page() instead of open coding it
  2015-10-26 15:47   ` Ian Jackson
@ 2015-10-26 15:59     ` Jan Beulich
  0 siblings, 0 replies; 8+ messages in thread
From: Jan Beulich @ 2015-10-26 15:59 UTC (permalink / raw)
  To: Ian Jackson
  Cc: Kevin Tian, Keir Fraser, George Dunlap, Andrew Cooper,
	Tim Deegan, Ian Campbell, Jun Nakajima, xen-devel

>>> On 26.10.15 at 16:47, <Ian.Jackson@eu.citrix.com> wrote:
> Jan Beulich writes ("Ping: [PATCH] use clear_domain_page() instead of open 
> coding it"):
>> "REST" maintainers, could I please get an ack or otherwise on this
>> common code change?
> 
> I'm not qualified to review this, but I would have thought an ack from
> George should be sufficient ?

Not according to ./MAINTAINERS.

> (Also is your patch "no functional change" ?  If so it helps IMO to
> say so.)

It is, and indeed I could have said so.

Jan

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] use clear_domain_page() instead of open coding it
  2015-10-19 14:51 [PATCH] use clear_domain_page() instead of open coding it Jan Beulich
                   ` (2 preceding siblings ...)
  2015-10-26 13:06 ` Ping: " Jan Beulich
@ 2015-10-29  1:13 ` Tian, Kevin
  3 siblings, 0 replies; 8+ messages in thread
From: Tian, Kevin @ 2015-10-29  1:13 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Keir Fraser, George Dunlap, Andrew Cooper, Ian Jackson,
	Tim Deegan, Ian Campbell, Nakajima, Jun

> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: Monday, October 19, 2015 10:52 PM
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 

Acked-by: Kevin Tian <kevin.tian@intel.com>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: Ping: [PATCH] use clear_domain_page() instead of open coding it
  2015-10-26 13:06 ` Ping: " Jan Beulich
  2015-10-26 15:47   ` Ian Jackson
@ 2015-11-02 13:50   ` Ian Campbell
  1 sibling, 0 replies; 8+ messages in thread
From: Ian Campbell @ 2015-11-02 13:50 UTC (permalink / raw)
  To: Jan Beulich, Ian Jackson, Keir Fraser, Tim Deegan
  Cc: George Dunlap, Andrew Cooper, Kevin Tian, Jun Nakajima, xen-devel

On Mon, 2015-10-26 at 07:06 -0600, Jan Beulich wrote:
> > > > On 19.10.15 at 16:51, <JBeulich@suse.com> wrote:
> 
> "REST" maintainers, could I please get an ack or otherwise on this
> common code change?
> 
> Thanks, Jan
> 
> > --- a/xen/common/page_alloc.c
> > +++ b/xen/common/page_alloc.c
> > @@ -1959,22 +1959,16 @@ __initcall(pagealloc_keyhandler_init);
> >  
> >  void scrub_one_page(struct page_info *pg)
> >  {
> > -    void *p;
> > -
> >      if ( unlikely(pg->count_info & PGC_broken) )
> >          return;
> >  
> > -    p = __map_domain_page(pg);
> > -
> >  #ifndef NDEBUG
> >      /* Avoid callers relying on allocations returning zeroed pages. */
> > -    memset(p, 0xc2, PAGE_SIZE);
> > +    unmap_domain_page(memset(__map_domain_page(pg), 0xc2, PAGE_SIZE));

I'm not madly keep on this clever nesting of the map/memset/unmap and would
have preferred a more localised void *p (or a memset_domain_page helper
maybe), but I don't mind enough to block this over:

Acked-by: Ian Campbell <ian.campbell@citrix.com>

> >  #else
> >      /* For a production build, clear_page() is the fastest way to
> > scrub. */
> > -    clear_page(p);
> > +    clear_domain_page(_mfn(page_to_mfn(pg)));
> >  #endif
> > -
> > -    unmap_domain_page(p);
> >  }
> >  
> >  static void dump_heap(unsigned char key)
> 
> 
> 

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2015-11-02 13:50 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-10-19 14:51 [PATCH] use clear_domain_page() instead of open coding it Jan Beulich
2015-10-20  9:53 ` Andrew Cooper
2015-10-20  9:53 ` George Dunlap
2015-10-26 13:06 ` Ping: " Jan Beulich
2015-10-26 15:47   ` Ian Jackson
2015-10-26 15:59     ` Jan Beulich
2015-11-02 13:50   ` Ian Campbell
2015-10-29  1:13 ` Tian, Kevin

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.