All of lore.kernel.org
 help / color / mirror / Atom feed
* [BAND-AID PATCH] x86: partially undo (disable) d639e6a05a
@ 2015-03-27 12:19 Jan Beulich
  2015-03-27 12:28 ` Tim Deegan
  0 siblings, 1 reply; 4+ messages in thread
From: Jan Beulich @ 2015-03-27 12:19 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Keir Fraser, Tim Deegan

[-- Attachment #1: Type: text/plain, Size: 2750 bytes --]

As from osstest results it looks like commit ("x86: allow 64-bit PV
guest kernels to suppress user mode exposure of M2P") is guilty in
causing migration failures, comment out the meat of it without fully
reverting, until it is understood what is causing the issue.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
I'm intending to push this unless the bisector manages to disprove the
suspicion about aforementioned commit by tonight.

--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1396,25 +1396,31 @@ void init_guest_l4_table(l4_pgentry_t l4
         l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR);
     l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
         l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR);
+#if 0 /* FIXME */
     if ( zap_ro_mpt || is_pv_32on64_domain(d) || paging_mode_refcounts(d) )
         l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
+#endif
 }
 
 void fill_ro_mpt(unsigned long mfn)
 {
+#if 0 /* FIXME */
     l4_pgentry_t *l4tab = map_domain_page(mfn);
 
     l4tab[l4_table_offset(RO_MPT_VIRT_START)] =
         idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)];
     unmap_domain_page(l4tab);
+#endif
 }
 
 void zap_ro_mpt(unsigned long mfn)
 {
+#if 0 /* FIXME */
     l4_pgentry_t *l4tab = map_domain_page(mfn);
 
     l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
     unmap_domain_page(l4tab);
+#endif
 }
 
 static int alloc_l4_table(struct page_info *page)
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1435,8 +1435,10 @@ void sh_install_xen_entries_in_l4(struct
         shadow_l4e_from_mfn(page_to_mfn(d->arch.perdomain_l3_pg),
                             __PAGE_HYPERVISOR);
 
+#if 0 /* FIXME */
     if ( !VM_ASSIST(d, m2p_strict) )
         sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] = shadow_l4e_empty();
+#endif
 
     /* Shadow linear mapping for 4-level shadows.  N.B. for 3-level
      * shadows on 64-bit xen, this linear mapping is later replaced by the
@@ -3978,6 +3980,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
         /* PAGING_LEVELS==4 implies 64-bit, which means that
          * map_domain_page_global can't fail */
         BUG_ON(v->arch.paging.shadow.guest_vtable == NULL);
+#if 0 /* FIXME */
         if ( !shadow_mode_external(d) && !is_pv_32on64_domain(d) )
         {
             shadow_l4e_t *sl4e = v->arch.paging.shadow.guest_vtable;
@@ -3991,6 +3994,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
                 sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =
                     shadow_l4e_empty();
         }
+#endif
     }
     else
         v->arch.paging.shadow.guest_vtable = __linear_l4_table;




[-- Attachment #2: x86-m2p-strict-partial-undo.patch --]
[-- Type: text/plain, Size: 2788 bytes --]

x86: partially undo (disable) d639e6a05a

As from osstest results it looks like commit ("x86: allow 64-bit PV
guest kernels to suppress user mode exposure of M2P") is guilty in
causing migration failures, comment out the meat of it without fully
reverting, until it is understood what is causing the issue.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
I'm intending to push this unless the bisector manages to disprove the
suspicion about aforementioned commit by tonight.

--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1396,25 +1396,31 @@ void init_guest_l4_table(l4_pgentry_t l4
         l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR);
     l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
         l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR);
+#if 0 /* FIXME */
     if ( zap_ro_mpt || is_pv_32on64_domain(d) || paging_mode_refcounts(d) )
         l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
+#endif
 }
 
 void fill_ro_mpt(unsigned long mfn)
 {
+#if 0 /* FIXME */
     l4_pgentry_t *l4tab = map_domain_page(mfn);
 
     l4tab[l4_table_offset(RO_MPT_VIRT_START)] =
         idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)];
     unmap_domain_page(l4tab);
+#endif
 }
 
 void zap_ro_mpt(unsigned long mfn)
 {
+#if 0 /* FIXME */
     l4_pgentry_t *l4tab = map_domain_page(mfn);
 
     l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
     unmap_domain_page(l4tab);
+#endif
 }
 
 static int alloc_l4_table(struct page_info *page)
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1435,8 +1435,10 @@ void sh_install_xen_entries_in_l4(struct
         shadow_l4e_from_mfn(page_to_mfn(d->arch.perdomain_l3_pg),
                             __PAGE_HYPERVISOR);
 
+#if 0 /* FIXME */
     if ( !VM_ASSIST(d, m2p_strict) )
         sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] = shadow_l4e_empty();
+#endif
 
     /* Shadow linear mapping for 4-level shadows.  N.B. for 3-level
      * shadows on 64-bit xen, this linear mapping is later replaced by the
@@ -3978,6 +3980,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
         /* PAGING_LEVELS==4 implies 64-bit, which means that
          * map_domain_page_global can't fail */
         BUG_ON(v->arch.paging.shadow.guest_vtable == NULL);
+#if 0 /* FIXME */
         if ( !shadow_mode_external(d) && !is_pv_32on64_domain(d) )
         {
             shadow_l4e_t *sl4e = v->arch.paging.shadow.guest_vtable;
@@ -3991,6 +3994,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
                 sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =
                     shadow_l4e_empty();
         }
+#endif
     }
     else
         v->arch.paging.shadow.guest_vtable = __linear_l4_table;

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [BAND-AID PATCH] x86: partially undo (disable) d639e6a05a
  2015-03-27 12:19 [BAND-AID PATCH] x86: partially undo (disable) d639e6a05a Jan Beulich
@ 2015-03-27 12:28 ` Tim Deegan
  2015-03-27 12:35   ` Jan Beulich
  0 siblings, 1 reply; 4+ messages in thread
From: Tim Deegan @ 2015-03-27 12:28 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Keir Fraser, Andrew Cooper

At 12:19 +0000 on 27 Mar (1427458745), Jan Beulich wrote:
> As from osstest results it looks like commit ("x86: allow 64-bit PV
> guest kernels to suppress user mode exposure of M2P") is guilty in
> causing migration failures, comment out the meat of it without fully
> reverting, until it is understood what is causing the issue.

I'd prefer to just revert it unless there's a benfit to keeping some
parts still enabled.

Cheers,

Tim.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [BAND-AID PATCH] x86: partially undo (disable) d639e6a05a
  2015-03-27 12:28 ` Tim Deegan
@ 2015-03-27 12:35   ` Jan Beulich
  2015-03-27 12:47     ` Tim Deegan
  0 siblings, 1 reply; 4+ messages in thread
From: Jan Beulich @ 2015-03-27 12:35 UTC (permalink / raw)
  To: Tim Deegan; +Cc: Andrew Cooper, Keir Fraser, xen-devel

>>> On 27.03.15 at 13:28, <tim@xen.org> wrote:
> At 12:19 +0000 on 27 Mar (1427458745), Jan Beulich wrote:
>> As from osstest results it looks like commit ("x86: allow 64-bit PV
>> guest kernels to suppress user mode exposure of M2P") is guilty in
>> causing migration failures, comment out the meat of it without fully
>> reverting, until it is understood what is causing the issue.
> 
> I'd prefer to just revert it unless there's a benfit to keeping some
> parts still enabled.

I'd like to avoid reverting 2e4e0d4efc as a prereq, and putting
together a revert without also reverting that other one I predicted
(maybe wrongly) to take more time than creating this one, and I'm
pretty short on time today. So yes, if you or Andrew want to put
together a full revert of just that one commit, you may consider it
pre-acked.

Jan

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [BAND-AID PATCH] x86: partially undo (disable) d639e6a05a
  2015-03-27 12:35   ` Jan Beulich
@ 2015-03-27 12:47     ` Tim Deegan
  0 siblings, 0 replies; 4+ messages in thread
From: Tim Deegan @ 2015-03-27 12:47 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Andrew Cooper, Keir Fraser, xen-devel

At 12:35 +0000 on 27 Mar (1427459701), Jan Beulich wrote:
> >>> On 27.03.15 at 13:28, <tim@xen.org> wrote:
> > At 12:19 +0000 on 27 Mar (1427458745), Jan Beulich wrote:
> >> As from osstest results it looks like commit ("x86: allow 64-bit PV
> >> guest kernels to suppress user mode exposure of M2P") is guilty in
> >> causing migration failures, comment out the meat of it without fully
> >> reverting, until it is understood what is causing the issue.
> > 
> > I'd prefer to just revert it unless there's a benfit to keeping some
> > parts still enabled.
> 
> I'd like to avoid reverting 2e4e0d4efc as a prereq , and putting
> together a revert without also reverting that other one I predicted
> (maybe wrongly) to take more time than creating this one, and I'm
> pretty short on time today. So yes, if you or Andrew want to put
> together a full revert of just that one commit, you may consider it
> pre-acked.

OK, here you go.  I'll leave it to you to decide when/if to apply, as
I won't be around later.

>From 7ae18ee3c980f750dbcb91be3fe452e99c39d1a7 Mon Sep 17 00:00:00 2001
From: Tim Deegan <tim@xen.org>
Date: Fri, 27 Mar 2015 12:43:12 +0000
Subject: [PATCH] Revert "x86: allow 64-bit PV guest kernels to suppress user
 mode exposure of M2P"

This reverts commit d639e6a05a0f8ee0e61c6cc4eebba78934ef3648.

Signed-off-by: Tim Deegan <tim@xen.org>
Acked-by: Jan Beulich <JBeulich@suse.com>

Conflicts:
	xen/arch/x86/domain.c
	xen/arch/x86/mm.c
	xen/arch/x86/mm/shadow/multi.c
---
 xen/arch/x86/domain.c          |  9 +--------
 xen/arch/x86/domain_build.c    |  2 +-
 xen/arch/x86/mm.c              | 28 ++--------------------------
 xen/arch/x86/mm/shadow/multi.c | 16 ----------------
 xen/arch/x86/x86_64/mm.c       |  6 +++---
 xen/include/asm-x86/config.h   |  3 +--
 xen/include/asm-x86/mm.h       |  5 +----
 xen/include/public/xen.h       | 12 ------------
 8 files changed, 9 insertions(+), 72 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 7bae90e..393aa26 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -339,7 +339,7 @@ static int setup_compat_l4(struct vcpu *v)
 
     l4tab = __map_domain_page(pg);
     clear_page(l4tab);
-    init_guest_l4_table(l4tab, v->domain, 1);
+    init_guest_l4_table(l4tab, v->domain);
     unmap_domain_page(l4tab);
 
     v->arch.guest_table = pagetable_from_page(pg);
@@ -970,11 +970,7 @@ int arch_set_info_guest(
         case -EINTR:
             rc = -ERESTART;
         case -ERESTART:
-            break;
         case 0:
-            if ( !compat && !VM_ASSIST(d, m2p_strict) &&
-                 !paging_mode_refcounts(d) )
-                fill_ro_mpt(cr3_gfn);
             break;
         default:
             if ( cr3_page == current->arch.old_guest_table )
@@ -1009,10 +1005,7 @@ int arch_set_info_guest(
                 default:
                     if ( cr3_page == current->arch.old_guest_table )
                         cr3_page = NULL;
-                    break;
                 case 0:
-                    if ( VM_ASSIST(d, m2p_strict) )
-                        zap_ro_mpt(cr3_gfn);
                     break;
                 }
             }
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index 287b932..e5c845c 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -1203,7 +1203,7 @@ int __init construct_dom0(
         l3start = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
     }
     clear_page(l4tab);
-    init_guest_l4_table(l4tab, d, 0);
+    init_guest_l4_table(l4tab, d);
     v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
     if ( is_pv_32on64_domain(d) )
         v->arch.guest_table_user = v->arch.guest_table;
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index c92ac90..3e38761 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1380,8 +1380,7 @@ static int alloc_l3_table(struct page_info *page)
     return rc > 0 ? 0 : rc;
 }
 
-void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
-                         bool_t zap_ro_mpt)
+void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d)
 {
     /* Xen private mappings. */
     memcpy(&l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT],
@@ -1396,25 +1395,6 @@ void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
         l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR);
     l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
         l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR);
-    if ( zap_ro_mpt || is_pv_32on64_domain(d) || paging_mode_refcounts(d) )
-        l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
-}
-
-void fill_ro_mpt(unsigned long mfn)
-{
-    l4_pgentry_t *l4tab = map_domain_page(mfn);
-
-    l4tab[l4_table_offset(RO_MPT_VIRT_START)] =
-        idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)];
-    unmap_domain_page(l4tab);
-}
-
-void zap_ro_mpt(unsigned long mfn)
-{
-    l4_pgentry_t *l4tab = map_domain_page(mfn);
-
-    l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
-    unmap_domain_page(l4tab);
 }
 
 static int alloc_l4_table(struct page_info *page)
@@ -1464,7 +1444,7 @@ static int alloc_l4_table(struct page_info *page)
         adjust_guest_l4e(pl4e[i], d);
     }
 
-    init_guest_l4_table(pl4e, d, !VM_ASSIST(d, m2p_strict));
+    init_guest_l4_table(pl4e, d);
     unmap_domain_page(pl4e);
 
     return rc > 0 ? 0 : rc;
@@ -2774,8 +2754,6 @@ int new_guest_cr3(unsigned long mfn)
 
     invalidate_shadow_ldt(curr, 0);
 
-    if ( !VM_ASSIST(d, m2p_strict) && !paging_mode_refcounts(d) )
-        fill_ro_mpt(mfn);
     curr->arch.guest_table = pagetable_from_pfn(mfn);
     update_cr3(curr);
 
@@ -3133,8 +3111,6 @@ long do_mmuext_op(
                                 op.arg1.mfn);
                     break;
                 }
-                if ( VM_ASSIST(d, m2p_strict) && !paging_mode_refcounts(d) )
-                    zap_ro_mpt(op.arg1.mfn);
             }
 
             curr->arch.guest_table_user = pagetable_from_pfn(op.arg1.mfn);
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index bf8a469..c82aa96 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1435,9 +1435,6 @@ void sh_install_xen_entries_in_l4(struct domain *d, mfn_t gl4mfn, mfn_t sl4mfn)
         shadow_l4e_from_mfn(page_to_mfn(d->arch.perdomain_l3_pg),
                             __PAGE_HYPERVISOR);
 
-    if ( !VM_ASSIST(d, m2p_strict) )
-        sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] = shadow_l4e_empty();
-
     /* Shadow linear mapping for 4-level shadows.  N.B. for 3-level
      * shadows on 64-bit xen, this linear mapping is later replaced by the
      * monitor pagetable structure, which is built in make_monitor_table
@@ -3978,19 +3975,6 @@ sh_update_cr3(struct vcpu *v, int do_locking)
         /* PAGING_LEVELS==4 implies 64-bit, which means that
          * map_domain_page_global can't fail */
         BUG_ON(v->arch.paging.shadow.guest_vtable == NULL);
-        if ( !shadow_mode_external(d) && !is_pv_32on64_domain(d) )
-        {
-            shadow_l4e_t *sl4e = v->arch.paging.shadow.guest_vtable;
-
-            if ( (v->arch.flags & TF_kernel_mode) &&
-                 !VM_ASSIST(d, m2p_strict) )
-                sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =
-                    idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)];
-            else if ( !(v->arch.flags & TF_kernel_mode) &&
-                      VM_ASSIST(d, m2p_strict) )
-                sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =
-                    shadow_l4e_empty();
-        }
     }
     else
         v->arch.paging.shadow.guest_vtable = __linear_l4_table;
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 5c70061..6875c92 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -480,7 +480,7 @@ static int setup_m2p_table(struct mem_hotadd_info *info)
                 l2_ro_mpt += l2_table_offset(va);
             }
 
-            /* NB. Cannot be GLOBAL: guest user mode should not see it. */
+            /* NB. Cannot be GLOBAL as shadow_mode_translate reuses this area. */
             l2e_write(l2_ro_mpt, l2e_from_pfn(mfn,
                    /*_PAGE_GLOBAL|*/_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT));
         }
@@ -583,7 +583,7 @@ void __init paging_init(void)
                        0x77, 1UL << L3_PAGETABLE_SHIFT);
 
                 ASSERT(!l2_table_offset(va));
-                /* NB. Cannot be GLOBAL: guest user mode should not see it. */
+                /* NB. Cannot be GLOBAL as shadow_mode_translate reuses this area. */
                 l3e_write(&l3_ro_mpt[l3_table_offset(va)],
                     l3e_from_page(l1_pg,
                         /*_PAGE_GLOBAL|*/_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT));
@@ -621,7 +621,7 @@ void __init paging_init(void)
                       l3e_from_page(l2_pg, __PAGE_HYPERVISOR | _PAGE_USER));
             ASSERT(!l2_table_offset(va));
         }
-        /* NB. Cannot be GLOBAL: guest user mode should not see it. */
+        /* NB. Cannot be GLOBAL as shadow_mode_translate reuses this area. */
         if ( l1_pg )
             l2e_write(l2_ro_mpt, l2e_from_page(
                 l1_pg, /*_PAGE_GLOBAL|*/_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT));
diff --git a/xen/include/asm-x86/config.h b/xen/include/asm-x86/config.h
index 7700c77..3569753 100644
--- a/xen/include/asm-x86/config.h
+++ b/xen/include/asm-x86/config.h
@@ -346,8 +346,7 @@ extern unsigned long xen_phys_start;
 #define NATIVE_VM_ASSIST_VALID   ((1UL << VMASST_TYPE_4gb_segments)        | \
                                   (1UL << VMASST_TYPE_4gb_segments_notify) | \
                                   (1UL << VMASST_TYPE_writable_pagetables) | \
-                                  (1UL << VMASST_TYPE_pae_extended_cr3)    | \
-                                  (1UL << VMASST_TYPE_m2p_strict))
+                                  (1UL << VMASST_TYPE_pae_extended_cr3))
 #define VM_ASSIST_VALID          NATIVE_VM_ASSIST_VALID
 #define COMPAT_VM_ASSIST_VALID   (NATIVE_VM_ASSIST_VALID & \
                                   ((1UL << COMPAT_BITS_PER_LONG) - 1))
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index d1f95c8..136f1c3 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -318,10 +318,7 @@ static inline void *__page_to_virt(const struct page_info *pg)
 int free_page_type(struct page_info *page, unsigned long type,
                    int preemptible);
 
-void init_guest_l4_table(l4_pgentry_t[], const struct domain *,
-                         bool_t zap_ro_mpt);
-void fill_ro_mpt(unsigned long mfn);
-void zap_ro_mpt(unsigned long mfn);
+void init_guest_l4_table(l4_pgentry_t[], const struct domain *);
 
 int is_iomem_page(unsigned long mfn);
 
diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h
index 17ecb94..dd52a50 100644
--- a/xen/include/public/xen.h
+++ b/xen/include/public/xen.h
@@ -486,18 +486,6 @@ DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
 /* x86/PAE guests: support PDPTs above 4GB. */
 #define VMASST_TYPE_pae_extended_cr3     3
 
-/*
- * x86/64 guests: strictly hide M2P from user mode.
- * This allows the guest to control respective hypervisor behavior:
- * - when not set, L4 tables get created with the respective slot blank,
- *   and whenever the L4 table gets used as a kernel one the missing
- *   mapping gets inserted,
- * - when set, L4 tables get created with the respective slot initialized
- *   as before, and whenever the L4 table gets used as a user one the
- *   mapping gets zapped.
- */
-#define VMASST_TYPE_m2p_strict           32
-
 #if __XEN_INTERFACE_VERSION__ < 0x00040600
 #define MAX_VMASST_TYPE                  3
 #endif
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2015-03-27 12:47 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-03-27 12:19 [BAND-AID PATCH] x86: partially undo (disable) d639e6a05a Jan Beulich
2015-03-27 12:28 ` Tim Deegan
2015-03-27 12:35   ` Jan Beulich
2015-03-27 12:47     ` Tim Deegan

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.