All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH for-4.5 v2 0/2] Support for sparse physical address space on ARM
@ 2014-09-12 15:14 Ian Campbell
  2014-09-12 15:15 ` [PATCH for-4.5 v2 1/2] xen: refactor physical address space compression support into common code Ian Campbell
  2014-09-12 15:15 ` [PATCH for-4.5 v2 2/2] xen: arm: Enable physical address space compression (PDX) on arm32 Ian Campbell
  0 siblings, 2 replies; 8+ messages in thread
From: Ian Campbell @ 2014-09-12 15:14 UTC (permalink / raw)
  To: xen-devel
  Cc: Stefano Stabellini, Julien Grall, Tim Deegan, Roy Franz,
	Jan Beulich, Fu Wei

This series refactors the x86 "pdx" stuff (phys address space
compression) into common code and makes use of it on ARM.

Since last time I've implemented support for arm32 and fixed a bunch of
bug, hence no longer RFC. Since this is needed to run on grub and/or
UEFI systems (which tend to have sparser address maps) I'm targeting
this at 4.5.

The code is also available from 
        git://xenbits.xen.org/people/ianc/xen.git pdx-v2

Ian.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH for-4.5 v2 1/2] xen: refactor physical address space compression support into common code
  2014-09-12 15:14 [PATCH for-4.5 v2 0/2] Support for sparse physical address space on ARM Ian Campbell
@ 2014-09-12 15:15 ` Ian Campbell
  2014-09-12 15:32   ` Jan Beulich
  2014-09-12 15:15 ` [PATCH for-4.5 v2 2/2] xen: arm: Enable physical address space compression (PDX) on arm32 Ian Campbell
  1 sibling, 1 reply; 8+ messages in thread
From: Ian Campbell @ 2014-09-12 15:15 UTC (permalink / raw)
  To: xen-devel
  Cc: Ian Campbell, stefano.stabellini, julien.grall, tim, Roy Franz, Fu Wei

The "pdx compression" functionality will be useful on ARM as well.

Move the code to common code+header and introduce HAS_PDX to control when it is
built. L2_PAGETABLE_SHIFT is x86 specific, so introduce PDX_GROUP_SHIFT to
abstract it out.

ARM has no need for superpage compression (yet?) and lacks SUPERPAGE_SHIFT so
those functions (spage_to_mfn et al) are not moved.

No affect on x86 and no change for ARM (yet).

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
---
v2:
 - Correct closing guard comment in pdx.h
---
 xen/Rules.mk                      |    1 +
 xen/arch/x86/Rules.mk             |    1 +
 xen/arch/x86/mm.c                 |    3 --
 xen/arch/x86/setup.c              |   10 ----
 xen/arch/x86/x86_64/mm.c          |   53 --------------------
 xen/common/Makefile               |    1 +
 xen/common/pdx.c                  |   99 +++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/mm.h          |    4 +-
 xen/include/asm-x86/page.h        |    2 -
 xen/include/asm-x86/x86_64/page.h |   25 +---------
 xen/include/xen/pdx.h             |   47 ++++++++++++++++++
 11 files changed, 152 insertions(+), 94 deletions(-)
 create mode 100644 xen/common/pdx.c
 create mode 100644 xen/include/xen/pdx.h

diff --git a/xen/Rules.mk b/xen/Rules.mk
index b49f3c8..e2f9e36 100644
--- a/xen/Rules.mk
+++ b/xen/Rules.mk
@@ -59,6 +59,7 @@ CFLAGS-$(HAS_PASSTHROUGH) += -DHAS_PASSTHROUGH
 CFLAGS-$(HAS_DEVICE_TREE) += -DHAS_DEVICE_TREE
 CFLAGS-$(HAS_PCI)       += -DHAS_PCI
 CFLAGS-$(HAS_IOPORTS)   += -DHAS_IOPORTS
+CFLAGS-$(HAS_PDX)       += -DHAS_PDX
 CFLAGS-$(frame_pointer) += -fno-omit-frame-pointer -DCONFIG_FRAME_POINTER
 
 ifneq ($(max_phys_cpus),)
diff --git a/xen/arch/x86/Rules.mk b/xen/arch/x86/Rules.mk
index 576985e..6775cb5 100644
--- a/xen/arch/x86/Rules.mk
+++ b/xen/arch/x86/Rules.mk
@@ -12,6 +12,7 @@ HAS_NS16550 := y
 HAS_EHCI := y
 HAS_KEXEC := y
 HAS_GDBSX := y
+HAS_PDX := y
 xenoprof := y
 
 #
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index d23cb3f..5b3f06f 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -147,9 +147,6 @@ struct domain *dom_xen, *dom_io, *dom_cow;
 unsigned long max_page;
 unsigned long total_pages;
 
-unsigned long __read_mostly pdx_group_valid[BITS_TO_LONGS(
-    (FRAMETABLE_NR + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT)] = { [0] = 1 };
-
 bool_t __read_mostly machine_to_phys_mapping_valid = 0;
 
 struct rangeset *__read_mostly mmio_ro_ranges;
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 6a814cd..8c8b91f 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -392,16 +392,6 @@ static void __init setup_max_pdx(unsigned long top_page)
     max_page = pdx_to_pfn(max_pdx - 1) + 1;
 }
 
-void set_pdx_range(unsigned long smfn, unsigned long emfn)
-{
-    unsigned long idx, eidx;
-
-    idx = pfn_to_pdx(smfn) / PDX_GROUP_COUNT;
-    eidx = (pfn_to_pdx(emfn - 1) + PDX_GROUP_COUNT) / PDX_GROUP_COUNT;
-    for ( ; idx < eidx; ++idx )
-        __set_bit(idx, pdx_group_valid);
-}
-
 /* A temporary copy of the e820 map that we can mess with during bootstrap. */
 static struct e820map __initdata boot_e820;
 
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 4937f9a..09817fc 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -40,15 +40,6 @@
 #include <asm/mem_access.h>
 #include <public/memory.h>
 
-/* Parameters for PFN/MADDR compression. */
-unsigned long __read_mostly max_pdx;
-unsigned long __read_mostly pfn_pdx_bottom_mask = ~0UL;
-unsigned long __read_mostly ma_va_bottom_mask = ~0UL;
-unsigned long __read_mostly pfn_top_mask = 0;
-unsigned long __read_mostly ma_top_mask = 0;
-unsigned long __read_mostly pfn_hole_mask = 0;
-unsigned int __read_mostly pfn_pdx_hole_shift = 0;
-
 unsigned int __read_mostly m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
 
 /* Enough page directories to map into the bottom 1GB. */
@@ -59,14 +50,6 @@ l2_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
 
 l2_pgentry_t *compat_idle_pg_table_l2;
 
-int __mfn_valid(unsigned long mfn)
-{
-    return likely(mfn < max_page) &&
-           likely(!(mfn & pfn_hole_mask)) &&
-           likely(test_bit(pfn_to_pdx(mfn) / PDX_GROUP_COUNT,
-                           pdx_group_valid));
-}
-
 void *do_page_walk(struct vcpu *v, unsigned long addr)
 {
     unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
@@ -119,42 +102,6 @@ void *do_page_walk(struct vcpu *v, unsigned long addr)
     return map_domain_page(mfn) + (addr & ~PAGE_MASK);
 }
 
-void __init pfn_pdx_hole_setup(unsigned long mask)
-{
-    unsigned int i, j, bottom_shift = 0, hole_shift = 0;
-
-    /*
-     * We skip the first MAX_ORDER bits, as we never want to compress them.
-     * This guarantees that page-pointer arithmetic remains valid within
-     * contiguous aligned ranges of 2^MAX_ORDER pages. Among others, our
-     * buddy allocator relies on this assumption.
-     */
-    for ( j = MAX_ORDER-1; ; )
-    {
-        i = find_next_zero_bit(&mask, BITS_PER_LONG, j);
-        j = find_next_bit(&mask, BITS_PER_LONG, i);
-        if ( j >= BITS_PER_LONG )
-            break;
-        if ( j - i > hole_shift )
-        {
-            hole_shift = j - i;
-            bottom_shift = i;
-        }
-    }
-    if ( !hole_shift )
-        return;
-
-    printk(KERN_INFO "PFN compression on bits %u...%u\n",
-           bottom_shift, bottom_shift + hole_shift - 1);
-
-    pfn_pdx_hole_shift  = hole_shift;
-    pfn_pdx_bottom_mask = (1UL << bottom_shift) - 1;
-    ma_va_bottom_mask   = (PAGE_SIZE << bottom_shift) - 1;
-    pfn_hole_mask       = ((1UL << hole_shift) - 1) << bottom_shift;
-    pfn_top_mask        = ~(pfn_pdx_bottom_mask | pfn_hole_mask);
-    ma_top_mask         = pfn_top_mask << PAGE_SHIFT;
-}
-
 /*
  * Allocate page table pages for m2p table
  */
diff --git a/xen/common/Makefile b/xen/common/Makefile
index 3683ae3..f7d10f0 100644
--- a/xen/common/Makefile
+++ b/xen/common/Makefile
@@ -51,6 +51,7 @@ obj-y += tmem_xen.o
 obj-y += radix-tree.o
 obj-y += rbtree.o
 obj-y += lzo.o
+obj-$(HAS_PDX) += pdx.o
 
 obj-bin-$(CONFIG_X86) += $(foreach n,decompress bunzip2 unxz unlzma unlzo unlz4 earlycpio,$(n).init.o)
 
diff --git a/xen/common/pdx.c b/xen/common/pdx.c
new file mode 100644
index 0000000..11349a7
--- /dev/null
+++ b/xen/common/pdx.c
@@ -0,0 +1,99 @@
+/******************************************************************************
+ * Original code extracted from arch/x86/x86_64/mm.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/mm.h>
+#include <xen/bitops.h>
+
+/* Parameters for PFN/MADDR compression. */
+unsigned long __read_mostly max_pdx;
+unsigned long __read_mostly pfn_pdx_bottom_mask = ~0UL;
+unsigned long __read_mostly ma_va_bottom_mask = ~0UL;
+unsigned long __read_mostly pfn_top_mask = 0;
+unsigned long __read_mostly ma_top_mask = 0;
+unsigned long __read_mostly pfn_hole_mask = 0;
+unsigned int __read_mostly pfn_pdx_hole_shift = 0;
+
+unsigned long __read_mostly pdx_group_valid[BITS_TO_LONGS(
+    (FRAMETABLE_NR + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT)] = { [0] = 1 };
+
+int __mfn_valid(unsigned long mfn)
+{
+    return likely(mfn < max_page) &&
+           likely(!(mfn & pfn_hole_mask)) &&
+           likely(test_bit(pfn_to_pdx(mfn) / PDX_GROUP_COUNT,
+                           pdx_group_valid));
+}
+
+void set_pdx_range(unsigned long smfn, unsigned long emfn)
+{
+    unsigned long idx, eidx;
+
+    idx = pfn_to_pdx(smfn) / PDX_GROUP_COUNT;
+    eidx = (pfn_to_pdx(emfn - 1) + PDX_GROUP_COUNT) / PDX_GROUP_COUNT;
+
+    for ( ; idx < eidx; ++idx )
+        __set_bit(idx, pdx_group_valid);
+}
+
+void __init pfn_pdx_hole_setup(unsigned long mask)
+{
+    unsigned int i, j, bottom_shift = 0, hole_shift = 0;
+
+    /*
+     * We skip the first MAX_ORDER bits, as we never want to compress them.
+     * This guarantees that page-pointer arithmetic remains valid within
+     * contiguous aligned ranges of 2^MAX_ORDER pages. Among others, our
+     * buddy allocator relies on this assumption.
+     */
+    for ( j = MAX_ORDER-1; ; )
+    {
+        i = find_next_zero_bit(&mask, BITS_PER_LONG, j);
+        j = find_next_bit(&mask, BITS_PER_LONG, i);
+        if ( j >= BITS_PER_LONG )
+            break;
+        if ( j - i > hole_shift )
+        {
+            hole_shift = j - i;
+            bottom_shift = i;
+        }
+    }
+    if ( !hole_shift )
+        return;
+
+    printk(KERN_INFO "PFN compression on bits %u...%u\n",
+           bottom_shift, bottom_shift + hole_shift - 1);
+
+    pfn_pdx_hole_shift  = hole_shift;
+    pfn_pdx_bottom_mask = (1UL << bottom_shift) - 1;
+    ma_va_bottom_mask   = (PAGE_SIZE << bottom_shift) - 1;
+    pfn_hole_mask       = ((1UL << hole_shift) - 1) << bottom_shift;
+    pfn_top_mask        = ~(pfn_pdx_bottom_mask | pfn_hole_mask);
+    ma_top_mask         = pfn_top_mask << PAGE_SHIFT;
+}
+
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 7b85865..746bcf1 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -280,9 +280,7 @@ extern unsigned long max_page;
 extern unsigned long total_pages;
 void init_frametable(void);
 
-#define PDX_GROUP_COUNT ((1 << L2_PAGETABLE_SHIFT) / \
-                         (sizeof(*frame_table) & -sizeof(*frame_table)))
-extern unsigned long pdx_group_valid[];
+#define PDX_GROUP_SHIFT L2_PAGETABLE_SHIFT
 
 /* Convert between Xen-heap virtual addresses and page-info structures. */
 static inline struct page_info *__virt_to_page(const void *v)
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index ccc268d..9aa780e 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -334,8 +334,6 @@ void *alloc_xen_pagetable(void);
 void free_xen_pagetable(void *v);
 l1_pgentry_t *virt_to_xen_l1e(unsigned long v);
 
-extern void set_pdx_range(unsigned long smfn, unsigned long emfn);
-
 /* Convert between PAT/PCD/PWT embedded in PTE flags and 3-bit cacheattr. */
 static inline uint32_t pte_flags_to_cacheattr(uint32_t flags)
 {
diff --git a/xen/include/asm-x86/x86_64/page.h b/xen/include/asm-x86/x86_64/page.h
index 3eee5b5..1d54587 100644
--- a/xen/include/asm-x86/x86_64/page.h
+++ b/xen/include/asm-x86/x86_64/page.h
@@ -35,17 +35,10 @@
 #include <xen/config.h>
 #include <asm/types.h>
 
-extern unsigned long xen_virt_end;
+#include <xen/pdx.h>
 
-extern unsigned long max_pdx;
-extern unsigned long pfn_pdx_bottom_mask, ma_va_bottom_mask;
-extern unsigned int pfn_pdx_hole_shift;
-extern unsigned long pfn_hole_mask;
-extern unsigned long pfn_top_mask, ma_top_mask;
-extern void pfn_pdx_hole_setup(unsigned long);
+extern unsigned long xen_virt_end;
 
-#define page_to_pdx(pg)  ((pg) - frame_table)
-#define pdx_to_page(pdx) (frame_table + (pdx))
 #define spage_to_pdx(spg) (((spg) - spage_table)<<(SUPERPAGE_SHIFT-PAGE_SHIFT))
 #define pdx_to_spage(pdx) (spage_table + ((pdx)>>(SUPERPAGE_SHIFT-PAGE_SHIFT)))
 /*
@@ -57,20 +50,6 @@ extern void pfn_pdx_hole_setup(unsigned long);
 #define pdx_to_virt(pdx) ((void *)(DIRECTMAP_VIRT_START + \
                                    ((unsigned long)(pdx) << PAGE_SHIFT)))
 
-extern int __mfn_valid(unsigned long mfn);
-
-static inline unsigned long pfn_to_pdx(unsigned long pfn)
-{
-    return (pfn & pfn_pdx_bottom_mask) |
-           ((pfn & pfn_top_mask) >> pfn_pdx_hole_shift);
-}
-
-static inline unsigned long pdx_to_pfn(unsigned long pdx)
-{
-    return (pdx & pfn_pdx_bottom_mask) |
-           ((pdx << pfn_pdx_hole_shift) & pfn_top_mask);
-}
-
 static inline unsigned long pfn_to_sdx(unsigned long pfn)
 {
     return pfn_to_pdx(pfn) >> (SUPERPAGE_SHIFT-PAGE_SHIFT);
diff --git a/xen/include/xen/pdx.h b/xen/include/xen/pdx.h
new file mode 100644
index 0000000..cdab0d1
--- /dev/null
+++ b/xen/include/xen/pdx.h
@@ -0,0 +1,47 @@
+#ifndef __XEN_PDX_H__
+#define __XEN_PDX_H__
+
+#ifdef HAS_PDX
+
+extern unsigned long max_pdx;
+extern unsigned long pfn_pdx_bottom_mask, ma_va_bottom_mask;
+extern unsigned int pfn_pdx_hole_shift;
+extern unsigned long pfn_hole_mask;
+extern unsigned long pfn_top_mask, ma_top_mask;
+
+#define PDX_GROUP_COUNT ((1 << PDX_GROUP_SHIFT) / \
+                         (sizeof(*frame_table) & -sizeof(*frame_table)))
+extern unsigned long pdx_group_valid[];
+
+extern void set_pdx_range(unsigned long smfn, unsigned long emfn);
+
+#define page_to_pdx(pg)  ((pg) - frame_table)
+#define pdx_to_page(pdx) (frame_table + (pdx))
+
+extern int __mfn_valid(unsigned long mfn);
+
+static inline unsigned long pfn_to_pdx(unsigned long pfn)
+{
+    return (pfn & pfn_pdx_bottom_mask) |
+           ((pfn & pfn_top_mask) >> pfn_pdx_hole_shift);
+}
+
+static inline unsigned long pdx_to_pfn(unsigned long pdx)
+{
+    return (pdx & pfn_pdx_bottom_mask) |
+           ((pdx << pfn_pdx_hole_shift) & pfn_top_mask);
+}
+
+extern void pfn_pdx_hole_setup(unsigned long);
+
+#endif /* HAVE_PDX */
+#endif /* __XEN_PDX_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH for-4.5 v2 2/2] xen: arm: Enable physical address space compression (PDX) on arm32
  2014-09-12 15:14 [PATCH for-4.5 v2 0/2] Support for sparse physical address space on ARM Ian Campbell
  2014-09-12 15:15 ` [PATCH for-4.5 v2 1/2] xen: refactor physical address space compression support into common code Ian Campbell
@ 2014-09-12 15:15 ` Ian Campbell
  2014-09-12 23:00   ` Julien Grall
  1 sibling, 1 reply; 8+ messages in thread
From: Ian Campbell @ 2014-09-12 15:15 UTC (permalink / raw)
  To: xen-devel
  Cc: Ian Campbell, stefano.stabellini, julien.grall, tim, Roy Franz, Fu Wei

This allows us to support sparse physical address maps which we previously
could not because the frametable would end up taking up an enourmous fraction
of RAM.

On a fast model which has RAM at 0x80000000-0x100000000 and
0x880000000-0x900000000 this reduces the size of the frametable from
478M to 84M.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
v2:
 - Implement support for arm32 (tested on models).
 - Simplify the arm64 stuff a bit
 - Fixedsome bugs
---
 xen/arch/arm/Rules.mk        |    1 +
 xen/arch/arm/mm.c            |   18 +++---
 xen/arch/arm/setup.c         |  136 +++++++++++++++++++-----------------------
 xen/include/asm-arm/config.h |   11 +++-
 xen/include/asm-arm/mm.h     |   37 ++++++------
 xen/include/asm-arm/numa.h   |    2 +-
 6 files changed, 101 insertions(+), 104 deletions(-)

diff --git a/xen/arch/arm/Rules.mk b/xen/arch/arm/Rules.mk
index 8658176..26fafa2 100644
--- a/xen/arch/arm/Rules.mk
+++ b/xen/arch/arm/Rules.mk
@@ -10,6 +10,7 @@ HAS_DEVICE_TREE := y
 HAS_VIDEO := y
 HAS_ARM_HDLCD := y
 HAS_PASSTHROUGH := y
+HAS_PDX := y
 
 CFLAGS += -I$(BASEDIR)/include
 
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 0a243b0..7a04cc4 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -138,7 +138,7 @@ unsigned long xenheap_mfn_start __read_mostly = ~0UL;
 unsigned long xenheap_mfn_end __read_mostly;
 unsigned long xenheap_virt_end __read_mostly;
 
-unsigned long frametable_base_mfn __read_mostly;
+unsigned long frametable_base_pdx __read_mostly;
 unsigned long frametable_virt_end __read_mostly;
 
 unsigned long max_page;
@@ -665,7 +665,7 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
     /* Align to previous 1GB boundary */
     base_mfn &= ~((FIRST_SIZE>>PAGE_SHIFT)-1);
 
-    offset = base_mfn - xenheap_mfn_start;
+    offset = pfn_to_pdx(base_mfn - xenheap_mfn_start);
     vaddr = DIRECTMAP_VIRT_START + offset*PAGE_SIZE;
 
     while ( base_mfn < end_mfn )
@@ -716,7 +716,8 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
 void __init setup_frametable_mappings(paddr_t ps, paddr_t pe)
 {
     unsigned long nr_pages = (pe - ps) >> PAGE_SHIFT;
-    unsigned long frametable_size = nr_pages * sizeof(struct page_info);
+    unsigned long nr_pdxs = pfn_to_pdx(nr_pages);
+    unsigned long frametable_size = nr_pdxs * sizeof(struct page_info);
     unsigned long base_mfn;
 #ifdef CONFIG_ARM_64
     lpae_t *second, pte;
@@ -724,7 +725,7 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe)
     int i;
 #endif
 
-    frametable_base_mfn = ps >> PAGE_SHIFT;
+    frametable_base_pdx = pfn_to_pdx(ps >> PAGE_SHIFT);
 
     /* Round up to 32M boundary */
     frametable_size = (frametable_size + 0x1ffffff) & ~0x1ffffff;
@@ -745,11 +746,12 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe)
     create_32mb_mappings(xen_second, FRAMETABLE_VIRT_START, base_mfn, frametable_size >> PAGE_SHIFT);
 #endif
 
-    memset(&frame_table[0], 0, nr_pages * sizeof(struct page_info));
-    memset(&frame_table[nr_pages], -1,
-           frametable_size - (nr_pages * sizeof(struct page_info)));
+    memset(&frame_table[0], 0, nr_pdxs * sizeof(struct page_info));
+    memset(&frame_table[nr_pdxs], -1,
+           frametable_size - (nr_pdxs * sizeof(struct page_info)));
+
+    frametable_virt_end = FRAMETABLE_VIRT_START + (nr_pdxs * sizeof(struct page_info));
 
-    frametable_virt_end = FRAMETABLE_VIRT_START + (nr_pages * sizeof(struct page_info));
 }
 
 void *__init arch_vmap_virt_end(void)
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index 446b4dc..7b11344 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -423,11 +423,58 @@ static paddr_t __init get_xen_paddr(void)
     return paddr;
 }
 
+/* Sets all bits from the most-significant 1-bit down to the LSB */
+static u64 __init fill_mask(u64 mask)
+{
+    while (mask & (mask + 1))
+        mask |= mask + 1;
+    return mask;
+}
+
+static void init_pdx(void)
+{
+    paddr_t bank_start, bank_size, bank_end;
+
+    u64 mask = fill_mask(bootinfo.mem.bank[0].start - 1);
+    int bank;
+
+    for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ )
+    {
+        bank_start = bootinfo.mem.bank[bank].start;
+        bank_size = bootinfo.mem.bank[bank].size;
+        bank_end = bank_start + bank_size;
+
+        mask |= bank_start | fill_mask(bank_start ^ (bank_end - 1));
+    }
+
+    for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ )
+    {
+        bank_start = bootinfo.mem.bank[bank].start;
+        bank_size = bootinfo.mem.bank[bank].size;
+        bank_end = bank_start + bank_size;
+
+        if (~mask &
+            fill_mask(bank_start ^ (bank_end - 1)))
+            mask = 0;
+    }
+
+    pfn_pdx_hole_setup(mask >> PAGE_SHIFT);
+
+    for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ )
+    {
+        bank_start = bootinfo.mem.bank[bank].start;
+        bank_size = bootinfo.mem.bank[bank].size;
+        bank_end = bank_start + bank_size;
+
+        set_pdx_range(paddr_to_pfn(bank_start),
+                      paddr_to_pfn(bank_end));
+    }
+}
+
 #ifdef CONFIG_ARM_32
 static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
 {
     paddr_t ram_start, ram_end, ram_size;
-    paddr_t contig_start, contig_end;
     paddr_t s, e;
     unsigned long ram_pages;
     unsigned long heap_pages, xenheap_pages, domheap_pages;
@@ -439,24 +486,11 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
     if ( !bootinfo.mem.nr_banks )
         panic("No memory bank");
 
-    /*
-     * We are going to accumulate two regions here.
-     *
-     * The first is the bounds of the initial memory region which is
-     * contiguous with the first bank. For simplicity the xenheap is
-     * always allocated from this region.
-     *
-     * The second is the complete bounds of the regions containing RAM
-     * (ie. from the lowest RAM address to the highest), which
-     * includes any holes.
-     *
-     * We also track the number of actual RAM pages (i.e. not counting
-     * the holes).
-     */
-    ram_size  = bootinfo.mem.bank[0].size;
+    init_pdx();
 
-    contig_start = ram_start = bootinfo.mem.bank[0].start;
-    contig_end   = ram_end = ram_start + ram_size;
+    ram_start = bootinfo.mem.bank[0].start;
+    ram_size  = bootinfo.mem.bank[0].size;
+    ram_end   = ram_start + ram_size;
 
     for ( i = 1; i < bootinfo.mem.nr_banks; i++ )
     {
@@ -464,41 +498,9 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
         paddr_t bank_size = bootinfo.mem.bank[i].size;
         paddr_t bank_end = bank_start + bank_size;
 
-        paddr_t new_ram_size = ram_size + bank_size;
-        paddr_t new_ram_start = min(ram_start,bank_start);
-        paddr_t new_ram_end = max(ram_end,bank_end);
-
-        /*
-         * If the new bank is contiguous with the initial contiguous
-         * region then incorporate it into the contiguous region.
-         *
-         * Otherwise we allow non-contigious regions so long as at
-         * least half of the total RAM region actually contains
-         * RAM. We actually fudge this slightly and require that
-         * adding the current bank does not cause us to violate this
-         * restriction.
-         *
-         * This restriction ensures that the frametable (which is not
-         * currently sparse) does not consume all available RAM.
-         */
-        if ( bank_start == contig_end )
-            contig_end = bank_end;
-        else if ( bank_end == contig_start )
-            contig_start = bank_start;
-        else if ( 2 * new_ram_size < new_ram_end - new_ram_start )
-            /* Would create memory map which is too sparse, so stop here. */
-            break;
-
-        ram_size = new_ram_size;
-        ram_start = new_ram_start;
-        ram_end = new_ram_end;
-    }
-
-    if ( i != bootinfo.mem.nr_banks )
-    {
-        printk("WARNING: only using %d out of %d memory banks\n",
-               i, bootinfo.mem.nr_banks);
-        bootinfo.mem.nr_banks = i;
+        ram_size  = ram_size + bank_size;
+        ram_start = min(ram_start,bank_start);
+        ram_end   = max(ram_end,bank_end);
     }
 
     total_pages = ram_pages = ram_size >> PAGE_SHIFT;
@@ -520,8 +522,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
 
     do
     {
-        /* xenheap is always in the initial contiguous region */
-        e = consider_modules(contig_start, contig_end,
+        e = consider_modules(ram_start, ram_end,
                              pfn_to_paddr(xenheap_pages),
                              32<<20, 0);
         if ( e )
@@ -616,6 +617,8 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
     unsigned long dtb_pages;
     void *fdt;
 
+    init_pdx();
+
     total_pages = 0;
     for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ )
     {
@@ -624,26 +627,9 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
         paddr_t bank_end = bank_start + bank_size;
         paddr_t s, e;
 
-        paddr_t new_ram_size = ram_size + bank_size;
-        paddr_t new_ram_start = min(ram_start,bank_start);
-        paddr_t new_ram_end = max(ram_end,bank_end);
-
-        /*
-         * We allow non-contigious regions so long as at least half of
-         * the total RAM region actually contains RAM. We actually
-         * fudge this slightly and require that adding the current
-         * bank does not cause us to violate this restriction.
-         *
-         * This restriction ensures that the frametable (which is not
-         * currently sparse) does not consume all available RAM.
-         */
-        if ( bank > 0 && 2 * new_ram_size < new_ram_end - new_ram_start )
-            /* Would create memory map which is too sparse, so stop here. */
-            break;
-
-        ram_start = new_ram_start;
-        ram_end = new_ram_end;
-        ram_size = new_ram_size;
+        ram_size = ram_size + bank_size;
+        ram_start = min(ram_start,bank_start);
+        ram_end = max(ram_end,bank_end);
 
         setup_xenheap_mappings(bank_start>>PAGE_SHIFT, bank_size>>PAGE_SHIFT);
 
diff --git a/xen/include/asm-arm/config.h b/xen/include/asm-arm/config.h
index 1c3abcf..59b2887 100644
--- a/xen/include/asm-arm/config.h
+++ b/xen/include/asm-arm/config.h
@@ -126,7 +126,12 @@
 #define CONFIG_SEPARATE_XENHEAP 1
 
 #define FRAMETABLE_VIRT_START  _AT(vaddr_t,0x02000000)
-#define VMAP_VIRT_START  _AT(vaddr_t,0x10000000)
+#define FRAMETABLE_SIZE        MB(128-32)
+#define FRAMETABLE_NR          (FRAMETABLE_SIZE / sizeof(*frame_table))
+#define FRAMETABLE_VIRT_END    (FRAMETABLE_VIRT_START + FRAMETABLE_SIZE - 1)
+
+#define VMAP_VIRT_START        _AT(vaddr_t,0x10000000)
+
 #define XENHEAP_VIRT_START     _AT(vaddr_t,0x40000000)
 #define XENHEAP_VIRT_END       _AT(vaddr_t,0x7fffffff)
 #define DOMHEAP_VIRT_START     _AT(vaddr_t,0x80000000)
@@ -149,7 +154,9 @@
 #define VMAP_VIRT_END    (VMAP_VIRT_START + GB(1) - 1)
 
 #define FRAMETABLE_VIRT_START  GB(32)
-#define FRAMETABLE_VIRT_END    (FRAMETABLE_VIRT_START + GB(32) - 1)
+#define FRAMETABLE_SIZE        GB(32)
+#define FRAMETABLE_NR          (FRAMETABLE_SIZE / sizeof(*frame_table))
+#define FRAMETABLE_VIRT_END    (FRAMETABLE_VIRT_START + FRAMETABLE_SIZE - 1)
 
 #define DIRECTMAP_VIRT_START   SLOT0(256)
 #define DIRECTMAP_SIZE         (SLOT0_ENTRY_SIZE * (265-256))
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 9fa80a4..120500f 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -6,6 +6,7 @@
 #include <asm/page.h>
 #include <public/xen.h>
 #include <xen/domain_page.h>
+#include <xen/pdx.h>
 
 /* Align Xen to a 2 MiB boundary. */
 #define XEN_PADDR_ALIGN (1 << 21)
@@ -140,12 +141,14 @@ extern void share_xen_page_with_privileged_guests(
     struct page_info *page, int readonly);
 
 #define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
-/* MFN of the first page in the frame table. */
-extern unsigned long frametable_base_mfn;
+/* PDX of the first page in the frame table. */
+extern unsigned long frametable_base_pdx;
 
 extern unsigned long max_page;
 extern unsigned long total_pages;
 
+#define PDX_GROUP_SHIFT SECOND_SHIFT
+
 /* Boot-time pagetable setup */
 extern void setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr);
 /* Remove early mappings */
@@ -184,20 +187,15 @@ static inline void __iomem *ioremap_wc(paddr_t start, size_t len)
     return ioremap_attr(start, len, PAGE_HYPERVISOR_WC);
 }
 
+/* XXX -- account for base */
 #define mfn_valid(mfn)        ({                                              \
     unsigned long __m_f_n = (mfn);                                            \
-    likely(__m_f_n >= frametable_base_mfn && __m_f_n < max_page);             \
+    likely(pfn_to_pdx(__m_f_n) >= frametable_base_pdx && __mfn_valid(__m_f_n)); \
 })
 
-#define max_pdx                 max_page
-#define pfn_to_pdx(pfn)         (pfn)
-#define pdx_to_pfn(pdx)         (pdx)
-#define virt_to_pdx(va)         virt_to_mfn(va)
-#define pdx_to_virt(pdx)        mfn_to_virt(pdx)
-
 /* Convert between machine frame numbers and page-info structures. */
-#define mfn_to_page(mfn)  (frame_table + (pfn_to_pdx(mfn) - frametable_base_mfn))
-#define page_to_mfn(pg)   pdx_to_pfn((unsigned long)((pg) - frame_table) + frametable_base_mfn)
+#define mfn_to_page(mfn)  (frame_table + (pfn_to_pdx(mfn) - frametable_base_pdx))
+#define page_to_mfn(pg)   pdx_to_pfn((unsigned long)((pg) - frame_table) + frametable_base_pdx)
 #define __page_to_mfn(pg)  page_to_mfn(pg)
 #define __mfn_to_page(mfn) mfn_to_page(mfn)
 
@@ -230,9 +228,11 @@ static inline void *maddr_to_virt(paddr_t ma)
 #else
 static inline void *maddr_to_virt(paddr_t ma)
 {
-    ASSERT((ma >> PAGE_SHIFT) < (DIRECTMAP_SIZE >> PAGE_SHIFT));
-    ma -= pfn_to_paddr(xenheap_mfn_start);
-    return (void *)(unsigned long) ma + DIRECTMAP_VIRT_START;
+    ASSERT(pfn_to_pdx(ma >> PAGE_SHIFT) < (DIRECTMAP_SIZE >> PAGE_SHIFT));
+    return (void *)(DIRECTMAP_VIRT_START -
+                    pfn_to_paddr(xenheap_mfn_start) +
+                    ((ma & ma_va_bottom_mask) |
+                     ((ma & ma_top_mask) >> pfn_pdx_hole_shift)));
 }
 #endif
 
@@ -258,13 +258,14 @@ static inline int gvirt_to_maddr(vaddr_t va, paddr_t *pa, unsigned int flags)
 static inline struct page_info *virt_to_page(const void *v)
 {
     unsigned long va = (unsigned long)v;
+    unsigned long pdx;
+
     ASSERT(va >= XENHEAP_VIRT_START);
     ASSERT(va < xenheap_virt_end);
 
-    return frame_table
-        + ((va - XENHEAP_VIRT_START) >> PAGE_SHIFT)
-        + xenheap_mfn_start
-        - frametable_base_mfn;
+    pdx = (va - XENHEAP_VIRT_START) >> PAGE_SHIFT;
+    pdx += pfn_to_pdx(xenheap_mfn_start);
+    return frame_table + pdx - frametable_base_pdx;
 }
 
 static inline void *page_to_virt(const struct page_info *pg)
diff --git a/xen/include/asm-arm/numa.h b/xen/include/asm-arm/numa.h
index 2c019d7..06a9d5a 100644
--- a/xen/include/asm-arm/numa.h
+++ b/xen/include/asm-arm/numa.h
@@ -12,7 +12,7 @@ static inline __attribute__((pure)) int phys_to_nid(paddr_t addr)
 
 /* XXX: implement NUMA support */
 #define node_spanned_pages(nid) (total_pages)
-#define node_start_pfn(nid) (frametable_base_mfn)
+#define node_start_pfn(nid) (pdx_to_pfn(frametable_base_pdx))
 #define __node_distance(a, b) (20)
 
 #endif /* __ARCH_ARM_NUMA_H */
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH for-4.5 v2 1/2] xen: refactor physical address space compression support into common code
  2014-09-12 15:15 ` [PATCH for-4.5 v2 1/2] xen: refactor physical address space compression support into common code Ian Campbell
@ 2014-09-12 15:32   ` Jan Beulich
  2014-09-12 15:35     ` Ian Campbell
  0 siblings, 1 reply; 8+ messages in thread
From: Jan Beulich @ 2014-09-12 15:32 UTC (permalink / raw)
  To: Ian Campbell
  Cc: stefano.stabellini, julien.grall, tim, xen-devel, Roy Franz, Fu Wei

>>> On 12.09.14 at 17:15, <ian.campbell@citrix.com> wrote:
> The "pdx compression" functionality will be useful on ARM as well.
> 
> Move the code to common code+header and introduce HAS_PDX to control when it 
> is
> built. L2_PAGETABLE_SHIFT is x86 specific, so introduce PDX_GROUP_SHIFT to
> abstract it out.
> 
> ARM has no need for superpage compression (yet?) and lacks SUPERPAGE_SHIFT 
> so
> those functions (spage_to_mfn et al) are not moved.
> 
> No affect on x86 and no change for ARM (yet).
> 
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> Acked-by: Jan Beulich <jbeulich@suse.com>
> ---
> v2:
>  - Correct closing guard comment in pdx.h

Hmm, looking at it ...

> --- /dev/null
> +++ b/xen/include/xen/pdx.h
> @@ -0,0 +1,47 @@
> +#ifndef __XEN_PDX_H__
> +#define __XEN_PDX_H__
> +
> +#ifdef HAS_PDX

... is it HAS_PDX ...

> +#endif /* HAVE_PDX */

... or HAVE_PDX?

Jan

> +#endif /* __XEN_PDX_H__ */
> +
> +/*
> + * Local variables:
> + * mode: C
> + * c-file-style: "BSD"
> + * c-basic-offset: 4
> + * indent-tabs-mode: nil
> + * End:
> + */

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH for-4.5 v2 1/2] xen: refactor physical address space compression support into common code
  2014-09-12 15:32   ` Jan Beulich
@ 2014-09-12 15:35     ` Ian Campbell
  0 siblings, 0 replies; 8+ messages in thread
From: Ian Campbell @ 2014-09-12 15:35 UTC (permalink / raw)
  To: Jan Beulich
  Cc: stefano.stabellini, julien.grall, tim, xen-devel, Roy Franz, Fu Wei

On Fri, 2014-09-12 at 16:32 +0100, Jan Beulich wrote:
> >>> On 12.09.14 at 17:15, <ian.campbell@citrix.com> wrote:
> > The "pdx compression" functionality will be useful on ARM as well.
> > 
> > Move the code to common code+header and introduce HAS_PDX to control when it 
> > is
> > built. L2_PAGETABLE_SHIFT is x86 specific, so introduce PDX_GROUP_SHIFT to
> > abstract it out.
> > 
> > ARM has no need for superpage compression (yet?) and lacks SUPERPAGE_SHIFT 
> > so
> > those functions (spage_to_mfn et al) are not moved.
> > 
> > No affect on x86 and no change for ARM (yet).
> > 
> > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> > Acked-by: Jan Beulich <jbeulich@suse.com>
> > ---
> > v2:
> >  - Correct closing guard comment in pdx.h
> 
> Hmm, looking at it ...
> 
> > --- /dev/null
> > +++ b/xen/include/xen/pdx.h
> > @@ -0,0 +1,47 @@
> > +#ifndef __XEN_PDX_H__
> > +#define __XEN_PDX_H__
> > +
> > +#ifdef HAS_PDX
> 
> ... is it HAS_PDX ...
> 
> > +#endif /* HAVE_PDX */
> 
> ... or HAVE_PDX?

HAS_PDX :-(

I've fixed this locally.

Ian.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH for-4.5 v2 2/2] xen: arm: Enable physical address space compression (PDX) on arm32
  2014-09-12 15:15 ` [PATCH for-4.5 v2 2/2] xen: arm: Enable physical address space compression (PDX) on arm32 Ian Campbell
@ 2014-09-12 23:00   ` Julien Grall
  2014-09-16 19:33     ` Ian Campbell
  0 siblings, 1 reply; 8+ messages in thread
From: Julien Grall @ 2014-09-12 23:00 UTC (permalink / raw)
  To: Ian Campbell, xen-devel; +Cc: Roy Franz, tim, Fu Wei, stefano.stabellini

Hi Ian,

Don't you implement PDX for both arm32 and arm64? If so, you may need to 
update the commit title.

The patch looks good to me. I have only few comments on it.

On 12/09/14 08:15, Ian Campbell wrote:
> This allows us to support sparse physical address maps which we previously
> could not because the frametable would end up taking up an enourmous fraction

enormous

[..]

> -    memset(&frame_table[0], 0, nr_pages * sizeof(struct page_info));
> -    memset(&frame_table[nr_pages], -1,
> -           frametable_size - (nr_pages * sizeof(struct page_info)));
> +    memset(&frame_table[0], 0, nr_pdxs * sizeof(struct page_info));
> +    memset(&frame_table[nr_pdxs], -1,
> +           frametable_size - (nr_pdxs * sizeof(struct page_info)));
> +
> +    frametable_virt_end = FRAMETABLE_VIRT_START + (nr_pdxs * sizeof(struct page_info));
>

NIT: I don't think it's necessary to have a blank line before the end of 
the block. I would drop it.

[..]

> +/* Sets all bits from the most-significant 1-bit down to the LSB */
> +static u64 __init fill_mask(u64 mask)
> +{
> +    while (mask & (mask + 1))
> +        mask |= mask + 1;
> +    return mask;
> +}

The function is the same on x86. Shall we create an helper?

[..]

>           setup_xenheap_mappings(bank_start>>PAGE_SHIFT, bank_size>>PAGE_SHIFT);

You forgot to drop check if ( bank != bootinfo.mem.nr_banks ) which, I 
think, is not relevant anymore.


> diff --git a/xen/include/asm-arm/config.h b/xen/include/asm-arm/config.h
> index 1c3abcf..59b2887 100644
> --- a/xen/include/asm-arm/config.h
> +++ b/xen/include/asm-arm/config.h
> @@ -126,7 +126,12 @@
>   #define CONFIG_SEPARATE_XENHEAP 1
>
>   #define FRAMETABLE_VIRT_START  _AT(vaddr_t,0x02000000)
> -#define VMAP_VIRT_START  _AT(vaddr_t,0x10000000)
> +#define FRAMETABLE_SIZE        MB(128-32)

I would add a comment about why the frametable size is "MB(128-32)".

Regards,

-- 
Julien Grall

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH for-4.5 v2 2/2] xen: arm: Enable physical address space compression (PDX) on arm32
  2014-09-12 23:00   ` Julien Grall
@ 2014-09-16 19:33     ` Ian Campbell
  2014-09-16 20:49       ` Julien Grall
  0 siblings, 1 reply; 8+ messages in thread
From: Ian Campbell @ 2014-09-16 19:33 UTC (permalink / raw)
  To: Julien Grall; +Cc: Roy Franz, stefano.stabellini, tim, Fu Wei, xen-devel

On Fri, 2014-09-12 at 16:00 -0700, Julien Grall wrote:
> > +/* Sets all bits from the most-significant 1-bit down to the LSB */
> > +static u64 __init fill_mask(u64 mask)
> > +{
> > +    while (mask & (mask + 1))
> > +        mask |= mask + 1;
> > +    return mask;
> > +}
> 
> The function is the same on x86. Shall we create an helper?

I'll see what I can do.

> > diff --git a/xen/include/asm-arm/config.h b/xen/include/asm-arm/config.h
> > index 1c3abcf..59b2887 100644
> > --- a/xen/include/asm-arm/config.h
> > +++ b/xen/include/asm-arm/config.h
> > @@ -126,7 +126,12 @@
> >   #define CONFIG_SEPARATE_XENHEAP 1
> >
> >   #define FRAMETABLE_VIRT_START  _AT(vaddr_t,0x02000000)
> > -#define VMAP_VIRT_START  _AT(vaddr_t,0x10000000)
> > +#define FRAMETABLE_SIZE        MB(128-32)
> 
> I would add a comment about why the frametable size is "MB(128-32)".

There is a big comment just above here which gives an overview of the
virtual address space layout. I don't think it needs repeating here.

Ian.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH for-4.5 v2 2/2] xen: arm: Enable physical address space compression (PDX) on arm32
  2014-09-16 19:33     ` Ian Campbell
@ 2014-09-16 20:49       ` Julien Grall
  0 siblings, 0 replies; 8+ messages in thread
From: Julien Grall @ 2014-09-16 20:49 UTC (permalink / raw)
  To: Ian Campbell; +Cc: Roy Franz, stefano.stabellini, tim, Fu Wei, xen-devel

Hi Ian,

On 16/09/14 12:33, Ian Campbell wrote:
> On Fri, 2014-09-12 at 16:00 -0700, Julien Grall wrote:
>>> diff --git a/xen/include/asm-arm/config.h b/xen/include/asm-arm/config.h
>>> index 1c3abcf..59b2887 100644
>>> --- a/xen/include/asm-arm/config.h
>>> +++ b/xen/include/asm-arm/config.h
>>> @@ -126,7 +126,12 @@
>>>    #define CONFIG_SEPARATE_XENHEAP 1
>>>	
>>>    #define FRAMETABLE_VIRT_START  _AT(vaddr_t,0x02000000)
>>> -#define VMAP_VIRT_START  _AT(vaddr_t,0x10000000)
>>> +#define FRAMETABLE_SIZE        MB(128-32)
>>
>> I would add a comment about why the frametable size is "MB(128-32)".
>
> There is a big comment just above here which gives an overview of the
> virtual address space layout. I don't think it needs repeating here.

Hmm right. I haven't look at include/asm-arm/config.h, sorry.

Regards,

-- 
Julien Grall

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2014-09-16 20:49 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-09-12 15:14 [PATCH for-4.5 v2 0/2] Support for sparse physical address space on ARM Ian Campbell
2014-09-12 15:15 ` [PATCH for-4.5 v2 1/2] xen: refactor physical address space compression support into common code Ian Campbell
2014-09-12 15:32   ` Jan Beulich
2014-09-12 15:35     ` Ian Campbell
2014-09-12 15:15 ` [PATCH for-4.5 v2 2/2] xen: arm: Enable physical address space compression (PDX) on arm32 Ian Campbell
2014-09-12 23:00   ` Julien Grall
2014-09-16 19:33     ` Ian Campbell
2014-09-16 20:49       ` Julien Grall

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.