All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] add locking around certain calls to map_pages_to_xen()
@ 2013-07-11 11:30 Jan Beulich
  2013-07-11 11:37 ` Andrew Cooper
  0 siblings, 1 reply; 13+ messages in thread
From: Jan Beulich @ 2013-07-11 11:30 UTC (permalink / raw)
  To: xen-devel; +Cc: Keir Fraser

[-- Attachment #1: Type: text/plain, Size: 4339 bytes --]

While boot time calls don't need this, run time uses of the function
which may result in L2 page tables to get populated need to be
serialized to avoid two CPUs populating the same L2 (or L3) entry,
overwriting each other's results.

This fixes what would seem to be a regression from commit b0581b92
("x86: make map_domain_page_global() a simple wrapper around vmap()"),
albeit that change only made more readily visible the already existing
issue.

The __init addition to memguard_init(), while seemingly unrelated,
helps making obvious that this function's use of map_pages_to_xen() is
a boot time only one.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5757,13 +5757,33 @@ void destroy_xen_mappings(unsigned long 
 void __set_fixmap(
     enum fixed_addresses idx, unsigned long mfn, unsigned long flags)
 {
-    BUG_ON(idx >= __end_of_fixed_addresses);
+    /*
+     * As the fixmap area requires more than a single L2 entry, we need a lock
+     * to avoid races of map_pages_to_xen() populating those. There's no need
+     * for this at boot time, and not doing so avoids needing to disable IRQs
+     * while holding the lock.
+     */
+    static DEFINE_SPINLOCK(lock);
+
+    if ( system_state > SYS_STATE_boot )
+       spin_lock(&lock);
     map_pages_to_xen(fix_to_virt(idx), mfn, 1, flags);
+    if ( system_state > SYS_STATE_boot )
+        spin_unlock(&lock);
 }
 
 void *__init arch_vmap_virt_end(void)
 {
-    return (void *)fix_to_virt(__end_of_fixed_addresses);
+    /*
+     * If the fixmap area required more than a single L3 entry, we'd need to
+     * lock against vmap()'s calls to map_pages_to_xen(). Pre-populate the
+     * L3 entry possibly shared with vmap(), and make sure we don't share an
+     * L2 entry with it.
+     */
+    BUILD_BUG_ON(((FIXADDR_TOP - 1) ^ FIXADDR_START) >> L3_PAGETABLE_SHIFT);
+    map_pages_to_xen(fix_to_virt(__end_of_fixed_addresses), 0, 1, _PAGE_AVAIL);
+    return (void *)(fix_to_virt(__end_of_fixed_addresses) &
+                    (~0L << L2_PAGETABLE_SHIFT));
 }
 
 void __iomem *ioremap(paddr_t pa, size_t len)
@@ -6011,7 +6031,7 @@ void free_perdomain_mappings(struct doma
 
 #ifdef MEMORY_GUARD
 
-void memguard_init(void)
+void __init memguard_init(void)
 {
     unsigned long start = max_t(unsigned long, xen_phys_start, 1UL << 20);
     map_pages_to_xen(
--- a/xen/arch/x86/x86_64/mmconfig_64.c
+++ b/xen/arch/x86/x86_64/mmconfig_64.c
@@ -118,6 +118,8 @@ static void __iomem *mcfg_ioremap(const 
                                   unsigned long idx, unsigned int prot)
 {
     unsigned long virt, size;
+    int rc;
+    static DEFINE_SPINLOCK(lock);
 
     virt = PCI_MCFG_VIRT_START + (idx << mmcfg_pci_segment_shift) +
            (cfg->start_bus_number << 20);
@@ -125,10 +127,13 @@ static void __iomem *mcfg_ioremap(const 
     if (virt + size < virt || virt + size > PCI_MCFG_VIRT_END)
         return NULL;
 
-    if (map_pages_to_xen(virt,
-                         (cfg->address >> PAGE_SHIFT) +
-                         (cfg->start_bus_number << (20 - PAGE_SHIFT)),
-                         size >> PAGE_SHIFT, prot))
+    spin_lock(&lock);
+    rc = map_pages_to_xen(virt,
+                          (cfg->address >> PAGE_SHIFT) +
+                          (cfg->start_bus_number << (20 - PAGE_SHIFT)),
+                          size >> PAGE_SHIFT, prot);
+    spin_unlock(&lock);
+    if ( rc )
         return NULL;
 
     return (void __iomem *) virt;
--- a/xen/common/vmap.c
+++ b/xen/common/vmap.c
@@ -179,7 +179,12 @@ void *__vmap(const unsigned long *mfn, u
 
     for ( ; va && nr--; ++mfn, cur += PAGE_SIZE * granularity )
     {
-        if ( map_pages_to_xen(cur, *mfn, granularity, flags) )
+        int rc;
+
+        spin_lock(&vm_lock);
+        rc = map_pages_to_xen(cur, *mfn, granularity, flags);
+        spin_unlock(&vm_lock);
+        if ( rc )
         {
             vunmap(va);
             va = NULL;
@@ -198,7 +203,9 @@ void vunmap(const void *va)
 {
     unsigned long addr = (unsigned long)va;
 
+    spin_lock(&vm_lock);
     destroy_xen_mappings(addr, addr + PAGE_SIZE * vm_size(va));
+    spin_unlock(&vm_lock);
     vm_free(va);
 }
 #endif



[-- Attachment #2: vmap-lock.patch --]
[-- Type: text/plain, Size: 4393 bytes --]

add locking around certain calls to map_pages_to_xen()

While boot time calls don't need this, run time uses of the function
which may result in L2 page tables to get populated need to be
serialized to avoid two CPUs populating the same L2 (or L3) entry,
overwriting each other's results.

This fixes what would seem to be a regression from commit b0581b92
("x86: make map_domain_page_global() a simple wrapper around vmap()"),
albeit that change only made more readily visible the already existing
issue.

The __init addition to memguard_init(), while seemingly unrelated,
helps making obvious that this function's use of map_pages_to_xen() is
a boot time only one.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5757,13 +5757,33 @@ void destroy_xen_mappings(unsigned long 
 void __set_fixmap(
     enum fixed_addresses idx, unsigned long mfn, unsigned long flags)
 {
-    BUG_ON(idx >= __end_of_fixed_addresses);
+    /*
+     * As the fixmap area requires more than a single L2 entry, we need a lock
+     * to avoid races of map_pages_to_xen() populating those. There's no need
+     * for this at boot time, and not doing so avoids needing to disable IRQs
+     * while holding the lock.
+     */
+    static DEFINE_SPINLOCK(lock);
+
+    if ( system_state > SYS_STATE_boot )
+       spin_lock(&lock);
     map_pages_to_xen(fix_to_virt(idx), mfn, 1, flags);
+    if ( system_state > SYS_STATE_boot )
+        spin_unlock(&lock);
 }
 
 void *__init arch_vmap_virt_end(void)
 {
-    return (void *)fix_to_virt(__end_of_fixed_addresses);
+    /*
+     * If the fixmap area required more than a single L3 entry, we'd need to
+     * lock against vmap()'s calls to map_pages_to_xen(). Pre-populate the
+     * L3 entry possibly shared with vmap(), and make sure we don't share an
+     * L2 entry with it.
+     */
+    BUILD_BUG_ON(((FIXADDR_TOP - 1) ^ FIXADDR_START) >> L3_PAGETABLE_SHIFT);
+    map_pages_to_xen(fix_to_virt(__end_of_fixed_addresses), 0, 1, _PAGE_AVAIL);
+    return (void *)(fix_to_virt(__end_of_fixed_addresses) &
+                    (~0L << L2_PAGETABLE_SHIFT));
 }
 
 void __iomem *ioremap(paddr_t pa, size_t len)
@@ -6011,7 +6031,7 @@ void free_perdomain_mappings(struct doma
 
 #ifdef MEMORY_GUARD
 
-void memguard_init(void)
+void __init memguard_init(void)
 {
     unsigned long start = max_t(unsigned long, xen_phys_start, 1UL << 20);
     map_pages_to_xen(
--- a/xen/arch/x86/x86_64/mmconfig_64.c
+++ b/xen/arch/x86/x86_64/mmconfig_64.c
@@ -118,6 +118,8 @@ static void __iomem *mcfg_ioremap(const 
                                   unsigned long idx, unsigned int prot)
 {
     unsigned long virt, size;
+    int rc;
+    static DEFINE_SPINLOCK(lock);
 
     virt = PCI_MCFG_VIRT_START + (idx << mmcfg_pci_segment_shift) +
            (cfg->start_bus_number << 20);
@@ -125,10 +127,13 @@ static void __iomem *mcfg_ioremap(const 
     if (virt + size < virt || virt + size > PCI_MCFG_VIRT_END)
         return NULL;
 
-    if (map_pages_to_xen(virt,
-                         (cfg->address >> PAGE_SHIFT) +
-                         (cfg->start_bus_number << (20 - PAGE_SHIFT)),
-                         size >> PAGE_SHIFT, prot))
+    spin_lock(&lock);
+    rc = map_pages_to_xen(virt,
+                          (cfg->address >> PAGE_SHIFT) +
+                          (cfg->start_bus_number << (20 - PAGE_SHIFT)),
+                          size >> PAGE_SHIFT, prot);
+    spin_unlock(&lock);
+    if ( rc )
         return NULL;
 
     return (void __iomem *) virt;
--- a/xen/common/vmap.c
+++ b/xen/common/vmap.c
@@ -179,7 +179,12 @@ void *__vmap(const unsigned long *mfn, u
 
     for ( ; va && nr--; ++mfn, cur += PAGE_SIZE * granularity )
     {
-        if ( map_pages_to_xen(cur, *mfn, granularity, flags) )
+        int rc;
+
+        spin_lock(&vm_lock);
+        rc = map_pages_to_xen(cur, *mfn, granularity, flags);
+        spin_unlock(&vm_lock);
+        if ( rc )
         {
             vunmap(va);
             va = NULL;
@@ -198,7 +203,9 @@ void vunmap(const void *va)
 {
     unsigned long addr = (unsigned long)va;
 
+    spin_lock(&vm_lock);
     destroy_xen_mappings(addr, addr + PAGE_SIZE * vm_size(va));
+    spin_unlock(&vm_lock);
     vm_free(va);
 }
 #endif

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2013-07-15  8:36 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-07-11 11:30 [PATCH] add locking around certain calls to map_pages_to_xen() Jan Beulich
2013-07-11 11:37 ` Andrew Cooper
2013-07-11 11:56   ` Jan Beulich
2013-07-12  8:17   ` [PATCH v2] " Jan Beulich
2013-07-12  9:48     ` Andrew Cooper
2013-07-12 12:15     ` Keir Fraser
2013-07-12 12:44       ` Jan Beulich
2013-07-12 13:37         ` Keir Fraser
2013-07-12 13:41           ` Jan Beulich
2013-07-12 14:01             ` Keir Fraser
2013-07-12 14:30               ` Jan Beulich
2013-07-15  8:24                 ` Jan Beulich
2013-07-15  8:36                   ` Keir Fraser

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.