All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] x86/mm: Add mem access rights to NPT
@ 2018-06-18 15:17 Alexandru Isaila
  2018-06-18 15:39 ` Tamas K Lengyel
  2018-06-22 15:51 ` Jan Beulich
  0 siblings, 2 replies; 18+ messages in thread
From: Alexandru Isaila @ 2018-06-18 15:17 UTC (permalink / raw)
  To: xen-devel
  Cc: tamas, rcojocaru, george.dunlap, andrew.cooper3, jbeulich,
	Isaila Alexandru

From: Isaila Alexandru <aisaila@bitdefender.com>

This patch adds access rights for the NPT pages. The access rights are
saved in a radix tree with the root saved in p2m_domain. The rights are manipulated through p2m_set_access()
and p2m_get_access() functions.
The patch follows the ept logic.

Note: It was tested with xen-access write

Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

---
Changes since V1:
        - Save the page access rights in a radix tree
---
 xen/arch/x86/mm/mem_access.c     |   3 ++
 xen/arch/x86/mm/p2m-pt.c         | 100 ++++++++++++++++++++++++++++++++++-----
 xen/arch/x86/mm/p2m.c            |   3 ++
 xen/include/asm-x86/mem_access.h |   2 +-
 xen/include/asm-x86/p2m.h        |   6 +++
 5 files changed, 100 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index c0cd017..b240f13 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -221,6 +221,9 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
         {
             req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
             req->u.mem_access.gla = gla;
+        }
+        if ( npfec.gla_valid || cpu_has_svm )
+        {
 
             if ( npfec.kind == npfec_kind_with_gla )
                 req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index b8c5d2e..1a16533 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -68,7 +68,8 @@
 static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
                                        p2m_type_t t,
                                        mfn_t mfn,
-                                       unsigned int level)
+                                       unsigned int level,
+                                       p2m_access_t access)
 {
     unsigned long flags;
     /*
@@ -87,23 +88,27 @@ static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
     case p2m_ram_paged:
     case p2m_ram_paging_in:
     default:
-        return flags | _PAGE_NX_BIT;
+        flags |= P2M_BASE_FLAGS | _PAGE_NX_BIT;
+        break;
     case p2m_grant_map_ro:
         return flags | P2M_BASE_FLAGS | _PAGE_NX_BIT;
     case p2m_ioreq_server:
         flags |= P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
         if ( p2m->ioreq.flags & XEN_DMOP_IOREQ_MEM_ACCESS_WRITE )
-            return flags & ~_PAGE_RW;
-        return flags;
+            flags &= ~_PAGE_RW;
+        break;
     case p2m_ram_ro:
     case p2m_ram_logdirty:
     case p2m_ram_shared:
-        return flags | P2M_BASE_FLAGS;
+        flags |= P2M_BASE_FLAGS;
+        break;
     case p2m_ram_rw:
-        return flags | P2M_BASE_FLAGS | _PAGE_RW;
+        flags |= P2M_BASE_FLAGS | _PAGE_RW;
+        break;
     case p2m_grant_map_rw:
     case p2m_map_foreign:
-        return flags | P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
+        flags |= P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
+        break;
     case p2m_mmio_direct:
         if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) )
             flags |= _PAGE_RW;
@@ -112,8 +117,37 @@ static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
             flags |= _PAGE_PWT;
             ASSERT(!level);
         }
-        return flags | P2M_BASE_FLAGS | _PAGE_PCD;
+        flags |= P2M_BASE_FLAGS | _PAGE_PCD;
+        break;
+    }
+    switch (access)
+    {
+        case p2m_access_r:
+        case p2m_access_w:
+            flags |= _PAGE_NX_BIT;
+            flags &= ~_PAGE_RW;
+            break;
+        case p2m_access_rw:
+            flags |= _PAGE_NX_BIT;
+            break;
+        case p2m_access_n:
+        case p2m_access_n2rwx:
+            flags |= _PAGE_NX_BIT;
+            flags &= ~_PAGE_RW;
+            break;
+        case p2m_access_rx:
+        case p2m_access_wx:
+        case p2m_access_rx2rw:
+            flags &= ~(_PAGE_NX_BIT | _PAGE_RW);
+            break;
+        case p2m_access_x:
+            flags &= ~_PAGE_RW;
+            break;
+        case p2m_access_rwx:
+        default:
+            break;
     }
+    return flags;
 }
 
 
@@ -174,6 +208,35 @@ static void p2m_add_iommu_flags(l1_pgentry_t *p2m_entry,
         l1e_add_flags(*p2m_entry, iommu_nlevel_to_flags(nlevel, flags));
 }
 
+static p2m_access_t p2m_get_access(struct p2m_domain *p2m, unsigned long gfn)
+{
+    void *ptr;
+
+    ptr = radix_tree_lookup(&p2m->mem_access_settings, gfn);
+    if ( !ptr )
+        return p2m_access_rwx;
+    else
+        return radix_tree_ptr_to_int(ptr);
+}
+
+static void p2m_set_access(struct p2m_domain *p2m, unsigned long gfn,
+                                      p2m_access_t a)
+{
+    int rc;
+
+    if ( p2m_access_rwx == a )
+        radix_tree_delete(&p2m->mem_access_settings, gfn);
+
+    rc = radix_tree_insert(&p2m->mem_access_settings, gfn,
+                           radix_tree_int_to_ptr(a));
+    if ( rc == -EEXIST )
+        /* If a setting already exists, change it to the new one */
+        radix_tree_replace_slot(
+            radix_tree_lookup_slot(
+                &p2m->mem_access_settings, gfn),
+            radix_tree_int_to_ptr(a));
+}
+
 /* Returns: 0 for success, -errno for failure */
 static int
 p2m_next_level(struct p2m_domain *p2m, void **table,
@@ -201,6 +264,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
         new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
 
         p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable);
+        p2m_set_access(p2m, gfn, p2m->default_access);
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
     }
     else if ( flags & _PAGE_PSE )
@@ -249,6 +313,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
         {
             new_entry = l1e_from_pfn(pfn | (i << ((level - 1) * PAGETABLE_ORDER)),
                                      flags);
+            p2m_set_access(p2m, gfn, p2m->default_access);
             p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, level);
         }
 
@@ -256,6 +321,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
 
         new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
         p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable);
+        p2m_set_access(p2m, gfn, p2m->default_access);
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
     }
     else
@@ -420,8 +486,9 @@ static int do_recalc(struct p2m_domain *p2m, unsigned long gfn)
         if ( nt != ot )
         {
             unsigned long mfn = l1e_get_pfn(e);
+            p2m_access_t a = p2m_get_access(p2m, gfn);
             unsigned long flags = p2m_type_to_flags(p2m, nt,
-                                                    _mfn(mfn), level);
+                                                    _mfn(mfn), level, a);
 
             if ( level )
             {
@@ -569,13 +636,14 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
         ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
         l3e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt)
             ? p2m_l3e_from_pfn(mfn_x(mfn),
-                               p2m_type_to_flags(p2m, p2mt, mfn, 2))
+                               p2m_type_to_flags(p2m, p2mt, mfn, 2, p2ma))
             : l3e_empty();
         entry_content.l1 = l3e_content.l3;
 
         if ( entry_content.l1 != 0 )
             p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
 
+        p2m_set_access(p2m, gfn, p2ma);
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
     }
@@ -608,7 +676,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
 
         if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
             entry_content = p2m_l1e_from_pfn(mfn_x(mfn),
-                                         p2m_type_to_flags(p2m, p2mt, mfn, 0));
+                                         p2m_type_to_flags(p2m, p2mt, mfn, 0, p2ma));
         else
             entry_content = l1e_empty();
 
@@ -630,6 +698,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
             p2m->ioreq.entry_count--;
         }
 
+        p2m_set_access(p2m, gfn, p2ma);
         /* level 1 entry */
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -661,13 +730,14 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
         ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
         l2e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt)
             ? p2m_l2e_from_pfn(mfn_x(mfn),
-                               p2m_type_to_flags(p2m, p2mt, mfn, 1))
+                               p2m_type_to_flags(p2m, p2mt, mfn, 1, p2ma))
             : l2e_empty();
         entry_content.l1 = l2e_content.l2;
 
         if ( entry_content.l1 != 0 )
             p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
 
+        p2m_set_access(p2m, gfn, p2ma);
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
     }
@@ -750,7 +820,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, gfn_t gfn_,
      * XXX we will return p2m_invalid for unmapped gfns */
     *t = p2m_mmio_dm;
     /* Not implemented except with EPT */
-    *a = p2m_access_rwx; 
+    *a = p2m_access_n;
 
     if ( gfn > p2m->max_mapped_pfn )
     {
@@ -813,6 +883,7 @@ pod_retry_l3:
                        l1_table_offset(addr));
             *t = p2m_recalc_type(recalc || _needs_recalc(flags),
                                  p2m_flags_to_type(flags), p2m, gfn);
+            *a = p2m_get_access(p2m, gfn);
             unmap_domain_page(l3e);
 
             ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
@@ -852,6 +923,7 @@ pod_retry_l2:
         mfn = _mfn(l2e_get_pfn(*l2e) + l1_table_offset(addr));
         *t = p2m_recalc_type(recalc || _needs_recalc(flags),
                              p2m_flags_to_type(flags), p2m, gfn);
+        *a = p2m_get_access(p2m, gfn);
         unmap_domain_page(l2e);
         
         ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
@@ -888,6 +960,7 @@ pod_retry_l1:
     }
     mfn = l1e_get_mfn(*l1e);
     *t = p2m_recalc_type(recalc || _needs_recalc(flags), l1t, p2m, gfn);
+    *a = p2m_get_access(p2m, gfn);
     unmap_domain_page(l1e);
 
     ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t) || p2m_is_paging(*t));
@@ -1127,6 +1200,7 @@ void p2m_pt_init(struct p2m_domain *p2m)
     p2m->change_entry_type_global = p2m_pt_change_entry_type_global;
     p2m->change_entry_type_range = p2m_pt_change_entry_type_range;
     p2m->write_p2m_entry = paging_write_p2m_entry;
+    radix_tree_init(&p2m->mem_access_settings);
 #if P2M_AUDIT
     p2m->audit_p2m = p2m_pt_audit_p2m;
 #else
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index c53cab4..e360fdc 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -675,6 +675,9 @@ void p2m_teardown(struct p2m_domain *p2m)
 
     d = p2m->domain;
 
+    if ( cpu_has_svm )
+        radix_tree_destroy(&p2m->mem_access_settings, NULL);
+
     p2m_lock(p2m);
     ASSERT(atomic_read(&d->shr_pages) == 0);
     p2m->phys_table = pagetable_null();
diff --git a/xen/include/asm-x86/mem_access.h b/xen/include/asm-x86/mem_access.h
index 4043c9f..34f2c07 100644
--- a/xen/include/asm-x86/mem_access.h
+++ b/xen/include/asm-x86/mem_access.h
@@ -46,7 +46,7 @@ bool p2m_mem_access_emulate_check(struct vcpu *v,
 /* Sanity check for mem_access hardware support */
 static inline bool p2m_mem_access_sanity_check(struct domain *d)
 {
-    return is_hvm_domain(d) && cpu_has_vmx && hap_enabled(d);
+    return is_hvm_domain(d) && hap_enabled(d);
 }
 
 #endif /*__ASM_X86_MEM_ACCESS_H__ */
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index d4b3cfc..f5868df 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -288,6 +288,12 @@ struct p2m_domain {
      * retyped get this access type.  See definition of p2m_access_t. */
     p2m_access_t default_access;
 
+    /*
+     * Radix tree to store the p2m_access_t settings as the pte's don't have
+     * enough available bits to store this information.
+     */
+    struct radix_tree_root mem_access_settings;
+
     /* If true, and an access fault comes in and there is no vm_event listener, 
      * pause domain.  Otherwise, remove access restrictions. */
     bool_t       access_required;
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] x86/mm: Add mem access rights to NPT
  2018-06-18 15:17 [PATCH v2] x86/mm: Add mem access rights to NPT Alexandru Isaila
@ 2018-06-18 15:39 ` Tamas K Lengyel
  2018-06-19  8:14   ` Alexandru Stefan ISAILA
  2018-06-22 15:51 ` Jan Beulich
  1 sibling, 1 reply; 18+ messages in thread
From: Tamas K Lengyel @ 2018-06-18 15:39 UTC (permalink / raw)
  To: Alexandru Isaila
  Cc: George Dunlap, Andrew Cooper, Jan Beulich, Razvan Cojocaru, Xen-devel

On Mon, Jun 18, 2018 at 9:19 AM Alexandru Isaila
<aisaila@bitdefender.com> wrote:
>
> From: Isaila Alexandru <aisaila@bitdefender.com>
>
> This patch adds access rights for the NPT pages. The access rights are
> saved in a radix tree with the root saved in p2m_domain. The rights are manipulated through p2m_set_access()
> and p2m_get_access() functions.
> The patch follows the ept logic.
>
> Note: It was tested with xen-access write
>
> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
>
> ---
> Changes since V1:
>         - Save the page access rights in a radix tree
> ---
>  xen/arch/x86/mm/mem_access.c     |   3 ++
>  xen/arch/x86/mm/p2m-pt.c         | 100 ++++++++++++++++++++++++++++++++++-----
>  xen/arch/x86/mm/p2m.c            |   3 ++
>  xen/include/asm-x86/mem_access.h |   2 +-
>  xen/include/asm-x86/p2m.h        |   6 +++
>  5 files changed, 100 insertions(+), 14 deletions(-)
>
> diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
> index c0cd017..b240f13 100644
> --- a/xen/arch/x86/mm/mem_access.c
> +++ b/xen/arch/x86/mm/mem_access.c
> @@ -221,6 +221,9 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
>          {
>              req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
>              req->u.mem_access.gla = gla;
> +        }
> +        if ( npfec.gla_valid || cpu_has_svm )
> +        {
>
>              if ( npfec.kind == npfec_kind_with_gla )
>                  req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
> diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
> index b8c5d2e..1a16533 100644
> --- a/xen/arch/x86/mm/p2m-pt.c
> +++ b/xen/arch/x86/mm/p2m-pt.c
> @@ -68,7 +68,8 @@
>  static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
>                                         p2m_type_t t,
>                                         mfn_t mfn,
> -                                       unsigned int level)
> +                                       unsigned int level,
> +                                       p2m_access_t access)
>  {
>      unsigned long flags;
>      /*
> @@ -87,23 +88,27 @@ static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
>      case p2m_ram_paged:
>      case p2m_ram_paging_in:
>      default:
> -        return flags | _PAGE_NX_BIT;
> +        flags |= P2M_BASE_FLAGS | _PAGE_NX_BIT;
> +        break;
>      case p2m_grant_map_ro:
>          return flags | P2M_BASE_FLAGS | _PAGE_NX_BIT;
>      case p2m_ioreq_server:
>          flags |= P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
>          if ( p2m->ioreq.flags & XEN_DMOP_IOREQ_MEM_ACCESS_WRITE )
> -            return flags & ~_PAGE_RW;
> -        return flags;
> +            flags &= ~_PAGE_RW;
> +        break;
>      case p2m_ram_ro:
>      case p2m_ram_logdirty:
>      case p2m_ram_shared:
> -        return flags | P2M_BASE_FLAGS;
> +        flags |= P2M_BASE_FLAGS;
> +        break;
>      case p2m_ram_rw:
> -        return flags | P2M_BASE_FLAGS | _PAGE_RW;
> +        flags |= P2M_BASE_FLAGS | _PAGE_RW;
> +        break;
>      case p2m_grant_map_rw:
>      case p2m_map_foreign:
> -        return flags | P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
> +        flags |= P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
> +        break;
>      case p2m_mmio_direct:
>          if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) )
>              flags |= _PAGE_RW;
> @@ -112,8 +117,37 @@ static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
>              flags |= _PAGE_PWT;
>              ASSERT(!level);
>          }
> -        return flags | P2M_BASE_FLAGS | _PAGE_PCD;
> +        flags |= P2M_BASE_FLAGS | _PAGE_PCD;
> +        break;
> +    }
> +    switch (access)
> +    {
> +        case p2m_access_r:
> +        case p2m_access_w:
> +            flags |= _PAGE_NX_BIT;
> +            flags &= ~_PAGE_RW;
> +            break;
> +        case p2m_access_rw:
> +            flags |= _PAGE_NX_BIT;
> +            break;
> +        case p2m_access_n:
> +        case p2m_access_n2rwx:
> +            flags |= _PAGE_NX_BIT;
> +            flags &= ~_PAGE_RW;
> +            break;
> +        case p2m_access_rx:
> +        case p2m_access_wx:
> +        case p2m_access_rx2rw:
> +            flags &= ~(_PAGE_NX_BIT | _PAGE_RW);
> +            break;
> +        case p2m_access_x:
> +            flags &= ~_PAGE_RW;
> +            break;
> +        case p2m_access_rwx:
> +        default:
> +            break;
>      }
> +    return flags;
>  }
>
>
> @@ -174,6 +208,35 @@ static void p2m_add_iommu_flags(l1_pgentry_t *p2m_entry,
>          l1e_add_flags(*p2m_entry, iommu_nlevel_to_flags(nlevel, flags));
>  }
>
> +static p2m_access_t p2m_get_access(struct p2m_domain *p2m, unsigned long gfn)
> +{
> +    void *ptr;
> +
> +    ptr = radix_tree_lookup(&p2m->mem_access_settings, gfn);
> +    if ( !ptr )
> +        return p2m_access_rwx;
> +    else
> +        return radix_tree_ptr_to_int(ptr);
> +}
> +
> +static void p2m_set_access(struct p2m_domain *p2m, unsigned long gfn,
> +                                      p2m_access_t a)
> +{
> +    int rc;
> +

Shouldn't there be some locking around the radix tree operations here?
If not, why not?

> +    if ( p2m_access_rwx == a )
> +        radix_tree_delete(&p2m->mem_access_settings, gfn);
> +
> +    rc = radix_tree_insert(&p2m->mem_access_settings, gfn,
> +                           radix_tree_int_to_ptr(a));
> +    if ( rc == -EEXIST )
> +        /* If a setting already exists, change it to the new one */
> +        radix_tree_replace_slot(
> +            radix_tree_lookup_slot(
> +                &p2m->mem_access_settings, gfn),
> +            radix_tree_int_to_ptr(a));
> +}
> +
>  /* Returns: 0 for success, -errno for failure */
>  static int
>  p2m_next_level(struct p2m_domain *p2m, void **table,
> @@ -201,6 +264,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
>          new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
>
>          p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable);
> +        p2m_set_access(p2m, gfn, p2m->default_access);
>          p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
>      }
>      else if ( flags & _PAGE_PSE )
> @@ -249,6 +313,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
>          {
>              new_entry = l1e_from_pfn(pfn | (i << ((level - 1) * PAGETABLE_ORDER)),
>                                       flags);
> +            p2m_set_access(p2m, gfn, p2m->default_access);
>              p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, level);
>          }
>
> @@ -256,6 +321,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
>
>          new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
>          p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable);
> +        p2m_set_access(p2m, gfn, p2m->default_access);
>          p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
>      }
>      else
> @@ -420,8 +486,9 @@ static int do_recalc(struct p2m_domain *p2m, unsigned long gfn)
>          if ( nt != ot )
>          {
>              unsigned long mfn = l1e_get_pfn(e);
> +            p2m_access_t a = p2m_get_access(p2m, gfn);
>              unsigned long flags = p2m_type_to_flags(p2m, nt,
> -                                                    _mfn(mfn), level);
> +                                                    _mfn(mfn), level, a);
>
>              if ( level )
>              {
> @@ -569,13 +636,14 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>          ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
>          l3e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt)
>              ? p2m_l3e_from_pfn(mfn_x(mfn),
> -                               p2m_type_to_flags(p2m, p2mt, mfn, 2))
> +                               p2m_type_to_flags(p2m, p2mt, mfn, 2, p2ma))
>              : l3e_empty();
>          entry_content.l1 = l3e_content.l3;
>
>          if ( entry_content.l1 != 0 )
>              p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
>
> +        p2m_set_access(p2m, gfn, p2ma);
>          p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3);
>          /* NB: paging_write_p2m_entry() handles tlb flushes properly */
>      }
> @@ -608,7 +676,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>
>          if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
>              entry_content = p2m_l1e_from_pfn(mfn_x(mfn),
> -                                         p2m_type_to_flags(p2m, p2mt, mfn, 0));
> +                                         p2m_type_to_flags(p2m, p2mt, mfn, 0, p2ma));
>          else
>              entry_content = l1e_empty();
>
> @@ -630,6 +698,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>              p2m->ioreq.entry_count--;
>          }
>
> +        p2m_set_access(p2m, gfn, p2ma);
>          /* level 1 entry */
>          p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1);
>          /* NB: paging_write_p2m_entry() handles tlb flushes properly */
> @@ -661,13 +730,14 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
>          ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
>          l2e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt)
>              ? p2m_l2e_from_pfn(mfn_x(mfn),
> -                               p2m_type_to_flags(p2m, p2mt, mfn, 1))
> +                               p2m_type_to_flags(p2m, p2mt, mfn, 1, p2ma))
>              : l2e_empty();
>          entry_content.l1 = l2e_content.l2;
>
>          if ( entry_content.l1 != 0 )
>              p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
>
> +        p2m_set_access(p2m, gfn, p2ma);
>          p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2);
>          /* NB: paging_write_p2m_entry() handles tlb flushes properly */
>      }
> @@ -750,7 +820,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, gfn_t gfn_,
>       * XXX we will return p2m_invalid for unmapped gfns */
>      *t = p2m_mmio_dm;
>      /* Not implemented except with EPT */
> -    *a = p2m_access_rwx;
> +    *a = p2m_access_n;
>
>      if ( gfn > p2m->max_mapped_pfn )
>      {
> @@ -813,6 +883,7 @@ pod_retry_l3:
>                         l1_table_offset(addr));
>              *t = p2m_recalc_type(recalc || _needs_recalc(flags),
>                                   p2m_flags_to_type(flags), p2m, gfn);
> +            *a = p2m_get_access(p2m, gfn);
>              unmap_domain_page(l3e);
>
>              ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
> @@ -852,6 +923,7 @@ pod_retry_l2:
>          mfn = _mfn(l2e_get_pfn(*l2e) + l1_table_offset(addr));
>          *t = p2m_recalc_type(recalc || _needs_recalc(flags),
>                               p2m_flags_to_type(flags), p2m, gfn);
> +        *a = p2m_get_access(p2m, gfn);
>          unmap_domain_page(l2e);
>
>          ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
> @@ -888,6 +960,7 @@ pod_retry_l1:
>      }
>      mfn = l1e_get_mfn(*l1e);
>      *t = p2m_recalc_type(recalc || _needs_recalc(flags), l1t, p2m, gfn);
> +    *a = p2m_get_access(p2m, gfn);
>      unmap_domain_page(l1e);
>
>      ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t) || p2m_is_paging(*t));
> @@ -1127,6 +1200,7 @@ void p2m_pt_init(struct p2m_domain *p2m)
>      p2m->change_entry_type_global = p2m_pt_change_entry_type_global;
>      p2m->change_entry_type_range = p2m_pt_change_entry_type_range;
>      p2m->write_p2m_entry = paging_write_p2m_entry;
> +    radix_tree_init(&p2m->mem_access_settings);
>  #if P2M_AUDIT
>      p2m->audit_p2m = p2m_pt_audit_p2m;
>  #else
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index c53cab4..e360fdc 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -675,6 +675,9 @@ void p2m_teardown(struct p2m_domain *p2m)
>
>      d = p2m->domain;
>
> +    if ( cpu_has_svm )
> +        radix_tree_destroy(&p2m->mem_access_settings, NULL);
> +
>      p2m_lock(p2m);
>      ASSERT(atomic_read(&d->shr_pages) == 0);
>      p2m->phys_table = pagetable_null();
> diff --git a/xen/include/asm-x86/mem_access.h b/xen/include/asm-x86/mem_access.h
> index 4043c9f..34f2c07 100644
> --- a/xen/include/asm-x86/mem_access.h
> +++ b/xen/include/asm-x86/mem_access.h
> @@ -46,7 +46,7 @@ bool p2m_mem_access_emulate_check(struct vcpu *v,
>  /* Sanity check for mem_access hardware support */
>  static inline bool p2m_mem_access_sanity_check(struct domain *d)
>  {
> -    return is_hvm_domain(d) && cpu_has_vmx && hap_enabled(d);
> +    return is_hvm_domain(d) && hap_enabled(d);
>  }
>
>  #endif /*__ASM_X86_MEM_ACCESS_H__ */
> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
> index d4b3cfc..f5868df 100644
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -288,6 +288,12 @@ struct p2m_domain {
>       * retyped get this access type.  See definition of p2m_access_t. */
>      p2m_access_t default_access;
>
> +    /*
> +     * Radix tree to store the p2m_access_t settings as the pte's don't have
> +     * enough available bits to store this information.
> +     */
> +    struct radix_tree_root mem_access_settings;
> +
>      /* If true, and an access fault comes in and there is no vm_event listener,
>       * pause domain.  Otherwise, remove access restrictions. */
>      bool_t       access_required;
> --
> 2.7.4

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] x86/mm: Add mem access rights to NPT
  2018-06-18 15:39 ` Tamas K Lengyel
@ 2018-06-19  8:14   ` Alexandru Stefan ISAILA
  2018-06-19 14:22     ` Tamas K Lengyel
  0 siblings, 1 reply; 18+ messages in thread
From: Alexandru Stefan ISAILA @ 2018-06-19  8:14 UTC (permalink / raw)
  To: tamas; +Cc: george.dunlap, andrew.cooper3, jbeulich, rcojocaru, xen-devel

On Lu, 2018-06-18 at 09:39 -0600, Tamas K Lengyel wrote:
> On Mon, Jun 18, 2018 at 9:19 AM Alexandru Isaila
> <aisaila@bitdefender.com> wrote:
> >
> >
> > +static p2m_access_t p2m_get_access(struct p2m_domain *p2m,
> > unsigned long gfn)
> > +{
> > +    void *ptr;
> > +
> > +    ptr = radix_tree_lookup(&p2m->mem_access_settings, gfn);
> > +    if ( !ptr )
> > +        return p2m_access_rwx;
> > +    else
> > +        return radix_tree_ptr_to_int(ptr);
> > +}
> > +
> > +static void p2m_set_access(struct p2m_domain *p2m, unsigned long
> > gfn,
> > +                                      p2m_access_t a)
> > +{
> > +    int rc;
> > +
> Shouldn't there be some locking around the radix tree operations
> here?
> If not, why not?
The lock is in the p2m_set_mem_access() so that one entry is set at a
time. The radix tree operations are similar to the ones on ARM but if
we missed something I will appreciate the help in correcting the issue.

Thanks,
Alex
>
> >
> > +    if ( p2m_access_rwx == a )
> > +        radix_tree_delete(&p2m->mem_access_settings, gfn);
> > +
> > +    rc = radix_tree_insert(&p2m->mem_access_settings, gfn,
> > +                           radix_tree_int_to_ptr(a));
> > +    if ( rc == -EEXIST )
> > +        /* If a setting already exists, change it to the new one
> > */
> > +        radix_tree_replace_slot(
> > +            radix_tree_lookup_slot(
> > +                &p2m->mem_access_settings, gfn),
> > +            radix_tree_int_to_ptr(a));
> > +}
> > +

________________________
This email was scanned by Bitdefender
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] x86/mm: Add mem access rights to NPT
  2018-06-19  8:14   ` Alexandru Stefan ISAILA
@ 2018-06-19 14:22     ` Tamas K Lengyel
  0 siblings, 0 replies; 18+ messages in thread
From: Tamas K Lengyel @ 2018-06-19 14:22 UTC (permalink / raw)
  To: Alexandru Isaila
  Cc: George Dunlap, Andrew Cooper, Jan Beulich, Razvan Cojocaru, Xen-devel

On Tue, Jun 19, 2018 at 2:14 AM Alexandru Stefan ISAILA
<aisaila@bitdefender.com> wrote:
>
> On Lu, 2018-06-18 at 09:39 -0600, Tamas K Lengyel wrote:
> > On Mon, Jun 18, 2018 at 9:19 AM Alexandru Isaila
> > <aisaila@bitdefender.com> wrote:
> > >
> > >
> > > +static p2m_access_t p2m_get_access(struct p2m_domain *p2m,
> > > unsigned long gfn)
> > > +{
> > > +    void *ptr;
> > > +
> > > +    ptr = radix_tree_lookup(&p2m->mem_access_settings, gfn);
> > > +    if ( !ptr )
> > > +        return p2m_access_rwx;
> > > +    else
> > > +        return radix_tree_ptr_to_int(ptr);
> > > +}
> > > +
> > > +static void p2m_set_access(struct p2m_domain *p2m, unsigned long
> > > gfn,
> > > +                                      p2m_access_t a)
> > > +{
> > > +    int rc;
> > > +
> > Shouldn't there be some locking around the radix tree operations
> > here?
> > If not, why not?
> The lock is in the p2m_set_mem_access() so that one entry is set at a
> time. The radix tree operations are similar to the ones on ARM but if
> we missed something I will appreciate the help in correcting the issue.

There are calls to this function that are not from p2m_set_mem_access
though, so I just wanted to make sure that each of those call-sites
already has a lock in place. Perhaps a comment to this function should
make that clear that a caller has to perform its own locking, the
function as-is doesn't do that. Or perhaps an ASSERT would be even
better if possible?

Thanks,
Tamas

> >
> > >
> > > +    if ( p2m_access_rwx == a )
> > > +        radix_tree_delete(&p2m->mem_access_settings, gfn);
> > > +
> > > +    rc = radix_tree_insert(&p2m->mem_access_settings, gfn,
> > > +                           radix_tree_int_to_ptr(a));
> > > +    if ( rc == -EEXIST )
> > > +        /* If a setting already exists, change it to the new one
> > > */
> > > +        radix_tree_replace_slot(
> > > +            radix_tree_lookup_slot(
> > > +                &p2m->mem_access_settings, gfn),
> > > +            radix_tree_int_to_ptr(a));
> > > +}
> > > +
>
> ________________________
> This email was scanned by Bitdefender

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] x86/mm: Add mem access rights to NPT
  2018-06-18 15:17 [PATCH v2] x86/mm: Add mem access rights to NPT Alexandru Isaila
  2018-06-18 15:39 ` Tamas K Lengyel
@ 2018-06-22 15:51 ` Jan Beulich
  2018-06-28 14:10   ` Alexandru Stefan ISAILA
  1 sibling, 1 reply; 18+ messages in thread
From: Jan Beulich @ 2018-06-22 15:51 UTC (permalink / raw)
  To: aisaila; +Cc: George Dunlap, Andrew Cooper, tamas, Razvan Cojocaru, xen-devel

>>> On 18.06.18 at 17:17, <aisaila@bitdefender.com> wrote:
> From: Isaila Alexandru <aisaila@bitdefender.com>
> 
> This patch adds access rights for the NPT pages. The access rights are
> saved in a radix tree with the root saved in p2m_domain.

Sounds resource intensive. How many nodes would such a radix tree have
on average?

> --- a/xen/arch/x86/mm/mem_access.c
> +++ b/xen/arch/x86/mm/mem_access.c
> @@ -221,6 +221,9 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
>          {
>              req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
>              req->u.mem_access.gla = gla;
> +        }
> +        if ( npfec.gla_valid || cpu_has_svm )
> +        {
>  
>              if ( npfec.kind == npfec_kind_with_gla )

You leave a bogusly placed blank line. Please put it ahead of the if()
you add.

> @@ -112,8 +117,37 @@ static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
>              flags |= _PAGE_PWT;
>              ASSERT(!level);
>          }
> -        return flags | P2M_BASE_FLAGS | _PAGE_PCD;
> +        flags |= P2M_BASE_FLAGS | _PAGE_PCD;
> +        break;
> +    }
> +    switch (access)

Coding style.

> +static void p2m_set_access(struct p2m_domain *p2m, unsigned long gfn,
> +                                      p2m_access_t a)
> +{
> +    int rc;
> +
> +    if ( p2m_access_rwx == a )
> +        radix_tree_delete(&p2m->mem_access_settings, gfn);
> +
> +    rc = radix_tree_insert(&p2m->mem_access_settings, gfn,
> +                           radix_tree_int_to_ptr(a));

Is there an "else" missing above here? Otherwise why would you
delete the node first?

Also the access rights are far fewer bits than there can be stored in a
node. Did you consider clustering several GFNs' access rights into a
single node?

> @@ -750,7 +820,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, gfn_t gfn_,
>       * XXX we will return p2m_invalid for unmapped gfns */
>      *t = p2m_mmio_dm;
>      /* Not implemented except with EPT */
> -    *a = p2m_access_rwx; 
> +    *a = p2m_access_n;

And the comment remains nevertheless?

> @@ -1127,6 +1200,7 @@ void p2m_pt_init(struct p2m_domain *p2m)
>      p2m->change_entry_type_global = p2m_pt_change_entry_type_global;
>      p2m->change_entry_type_range = p2m_pt_change_entry_type_range;
>      p2m->write_p2m_entry = paging_write_p2m_entry;
> +    radix_tree_init(&p2m->mem_access_settings);

While I don't think there's a risk of races, wouldn't it be better anyway
to set up resources before installing hooks / callbacks?

> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -675,6 +675,9 @@ void p2m_teardown(struct p2m_domain *p2m)
>  
>      d = p2m->domain;
>  
> +    if ( cpu_has_svm )
> +        radix_tree_destroy(&p2m->mem_access_settings, NULL);

What about shadow mode, or SVM without NPT? The init above is not
conditional upon SVM, nor is any of the manipulation of the radix tree.

> --- a/xen/include/asm-x86/mem_access.h
> +++ b/xen/include/asm-x86/mem_access.h
> @@ -46,7 +46,7 @@ bool p2m_mem_access_emulate_check(struct vcpu *v,
>  /* Sanity check for mem_access hardware support */
>  static inline bool p2m_mem_access_sanity_check(struct domain *d)
>  {
> -    return is_hvm_domain(d) && cpu_has_vmx && hap_enabled(d);
> +    return is_hvm_domain(d) && hap_enabled(d);

Considering this, should initialization and manipulation perhaps become
conditional upon hap_enabled(d) (perhaps implicitly by finding the radix
tree uninitialized)? Or even some more specific predicate, so that the
tree wouldn't be maintained at all unless someone cared about the
access rights?

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] x86/mm: Add mem access rights to NPT
  2018-06-22 15:51 ` Jan Beulich
@ 2018-06-28 14:10   ` Alexandru Stefan ISAILA
  2018-06-28 14:40     ` Jan Beulich
  0 siblings, 1 reply; 18+ messages in thread
From: Alexandru Stefan ISAILA @ 2018-06-28 14:10 UTC (permalink / raw)
  To: JBeulich; +Cc: George.Dunlap, andrew.cooper3, tamas, rcojocaru, xen-devel

On Vi, 2018-06-22 at 09:51 -0600, Jan Beulich wrote:
> >
> > >
> > > >
> > > > On 18.06.18 at 17:17, <aisaila@bitdefender.com> wrote:
> > From: Isaila Alexandru <aisaila@bitdefender.com>
> >
> > This patch adds access rights for the NPT pages. The access rights
> > are
> > saved in a radix tree with the root saved in p2m_domain.
> Sounds resource intensive. How many nodes would such a radix tree
> have
> on average?

The average is around 1478890 for a machine with 4GB of ram.
>
> >
> > --- a/xen/arch/x86/mm/mem_access.c
> > +++ b/xen/arch/x86/mm/mem_access.c
> > @@ -221,6 +221,9 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned
> > long gla,
> >          {
> >              req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
> >              req->u.mem_access.gla = gla;
> > +        }
> > +        if ( npfec.gla_valid || cpu_has_svm )
> > +        {
> >
> >              if ( npfec.kind == npfec_kind_with_gla )
> You leave a bogusly placed blank line. Please put it ahead of the
> if()
> you add.
>
> >
> > @@ -112,8 +117,37 @@ static unsigned long p2m_type_to_flags(const
> > struct p2m_domain *p2m,
> >              flags |= _PAGE_PWT;
> >              ASSERT(!level);
> >          }
> > -        return flags | P2M_BASE_FLAGS | _PAGE_PCD;
> > +        flags |= P2M_BASE_FLAGS | _PAGE_PCD;
> > +        break;
> > +    }
> > +    switch (access)
> Coding style.
>
> >
> > +static void p2m_set_access(struct p2m_domain *p2m, unsigned long
> > gfn,
> > +                                      p2m_access_t a)
> > +{
> > +    int rc;
> > +
> > +    if ( p2m_access_rwx == a )
> > +        radix_tree_delete(&p2m->mem_access_settings, gfn);
> > +
> > +    rc = radix_tree_insert(&p2m->mem_access_settings, gfn,
> > +                           radix_tree_int_to_ptr(a));
> Is there an "else" missing above here? Otherwise why would you
> delete the node first?

Yes it needs a else or a return. We plan not to have the rwx in the
tree so we ca save up some space.

Alex

________________________
This email was scanned by Bitdefender
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] x86/mm: Add mem access rights to NPT
  2018-06-28 14:10   ` Alexandru Stefan ISAILA
@ 2018-06-28 14:40     ` Jan Beulich
  2018-06-28 14:53       ` Alexandru Stefan ISAILA
  2018-06-28 16:29       ` Tamas K Lengyel
  0 siblings, 2 replies; 18+ messages in thread
From: Jan Beulich @ 2018-06-28 14:40 UTC (permalink / raw)
  To: aisaila; +Cc: George Dunlap, Andrew Cooper, tamas, Razvan Cojocaru, xen-devel

>>> On 28.06.18 at 16:10, <aisaila@bitdefender.com> wrote:
> On Vi, 2018-06-22 at 09:51 -0600, Jan Beulich wrote:
>> > > > On 18.06.18 at 17:17, <aisaila@bitdefender.com> wrote:
>> > From: Isaila Alexandru <aisaila@bitdefender.com>
>> >
>> > This patch adds access rights for the NPT pages. The access rights
>> > are
>> > saved in a radix tree with the root saved in p2m_domain.
>> Sounds resource intensive. How many nodes would such a radix tree
>> have
>> on average?
> 
> The average is around 1478890 for a machine with 4GB of ram.

Is this with ...

>> > +static void p2m_set_access(struct p2m_domain *p2m, unsigned long
>> > gfn,
>> > +                                      p2m_access_t a)
>> > +{
>> > +    int rc;
>> > +
>> > +    if ( p2m_access_rwx == a )
>> > +        radix_tree_delete(&p2m->mem_access_settings, gfn);
>> > +
>> > +    rc = radix_tree_insert(&p2m->mem_access_settings, gfn,
>> > +                           radix_tree_int_to_ptr(a));
>> Is there an "else" missing above here? Otherwise why would you
>> delete the node first?
> 
> Yes it needs a else or a return. We plan not to have the rwx in the
> tree so we ca save up some space.

... this corrected? Otherwise I'm tempted to say that the creation of
this radix tree needs to be avoided by all means, as long as it's not
really needed.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] x86/mm: Add mem access rights to NPT
  2018-06-28 14:40     ` Jan Beulich
@ 2018-06-28 14:53       ` Alexandru Stefan ISAILA
  2018-06-28 14:58         ` Razvan Cojocaru
  2018-06-29  6:13         ` Jan Beulich
  2018-06-28 16:29       ` Tamas K Lengyel
  1 sibling, 2 replies; 18+ messages in thread
From: Alexandru Stefan ISAILA @ 2018-06-28 14:53 UTC (permalink / raw)
  To: JBeulich; +Cc: George.Dunlap, andrew.cooper3, tamas, rcojocaru, xen-devel

On Jo, 2018-06-28 at 08:40 -0600, Jan Beulich wrote:
> >
> > >
> > > >
> > > > On 28.06.18 at 16:10, <aisaila@bitdefender.com> wrote:
> > On Vi, 2018-06-22 at 09:51 -0600, Jan Beulich wrote:
> > >
> > > >
> > > > >
> > > > > >
> > > > > > On 18.06.18 at 17:17, <aisaila@bitdefender.com> wrote:
> > > > From: Isaila Alexandru <aisaila@bitdefender.com>
> > > >
> > > > This patch adds access rights for the NPT pages. The access
> > > > rights
> > > > are
> > > > saved in a radix tree with the root saved in p2m_domain.
> > > Sounds resource intensive. How many nodes would such a radix tree
> > > have
> > > on average?
> > The average is around 1478890 for a machine with 4GB of ram.
> Is this with ...
>
> >
> > >
> > > >
> > > > +static void p2m_set_access(struct p2m_domain *p2m, unsigned
> > > > long
> > > > gfn,
> > > > +                                      p2m_access_t a)
> > > > +{
> > > > +    int rc;
> > > > +
> > > > +    if ( p2m_access_rwx == a )
> > > > +        radix_tree_delete(&p2m->mem_access_settings, gfn);
> > > > +
> > > > +    rc = radix_tree_insert(&p2m->mem_access_settings, gfn,
> > > > +                           radix_tree_int_to_ptr(a));
> > > Is there an "else" missing above here? Otherwise why would you
> > > delete the node first?
> > Yes it needs a else or a return. We plan not to have the rwx in the
> > tree so we ca save up some space.
> ... this corrected? Otherwise I'm tempted to say that the creation of
> this radix tree needs to be avoided by all means, as long as it's not
> really needed.

The number was with this patch so no correction. This was done with
xen-access write and I don't think it will make a difference if you
change the access to all the mem pages.

Alex

________________________
This email was scanned by Bitdefender
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] x86/mm: Add mem access rights to NPT
  2018-06-28 14:53       ` Alexandru Stefan ISAILA
@ 2018-06-28 14:58         ` Razvan Cojocaru
  2018-06-29  6:17           ` Jan Beulich
  2018-06-29  6:13         ` Jan Beulich
  1 sibling, 1 reply; 18+ messages in thread
From: Razvan Cojocaru @ 2018-06-28 14:58 UTC (permalink / raw)
  To: Alexandru Stefan ISAILA, JBeulich
  Cc: George.Dunlap, andrew.cooper3, tamas, xen-devel

On 06/28/2018 05:53 PM, Alexandru Stefan ISAILA wrote:
> On Jo, 2018-06-28 at 08:40 -0600, Jan Beulich wrote:
>>>
>>>>
>>>>>
>>>>> On 28.06.18 at 16:10, <aisaila@bitdefender.com> wrote:
>>> On Vi, 2018-06-22 at 09:51 -0600, Jan Beulich wrote:
>>>>
>>>>>
>>>>>>
>>>>>>>
>>>>>>> On 18.06.18 at 17:17, <aisaila@bitdefender.com> wrote:
>>>>> From: Isaila Alexandru <aisaila@bitdefender.com>
>>>>>
>>>>> This patch adds access rights for the NPT pages. The access
>>>>> rights
>>>>> are
>>>>> saved in a radix tree with the root saved in p2m_domain.
>>>> Sounds resource intensive. How many nodes would such a radix tree
>>>> have
>>>> on average?
>>> The average is around 1478890 for a machine with 4GB of ram.
>> Is this with ...
>>
>>>
>>>>
>>>>>
>>>>> +static void p2m_set_access(struct p2m_domain *p2m, unsigned
>>>>> long
>>>>> gfn,
>>>>> +                                      p2m_access_t a)
>>>>> +{
>>>>> +    int rc;
>>>>> +
>>>>> +    if ( p2m_access_rwx == a )
>>>>> +        radix_tree_delete(&p2m->mem_access_settings, gfn);
>>>>> +
>>>>> +    rc = radix_tree_insert(&p2m->mem_access_settings, gfn,
>>>>> +                           radix_tree_int_to_ptr(a));
>>>> Is there an "else" missing above here? Otherwise why would you
>>>> delete the node first?
>>> Yes it needs a else or a return. We plan not to have the rwx in the
>>> tree so we ca save up some space.
>> ... this corrected? Otherwise I'm tempted to say that the creation of
>> this radix tree needs to be avoided by all means, as long as it's not
>> really needed.
> 
> The number was with this patch so no correction. This was done with
> xen-access write and I don't think it will make a difference if you
> change the access to all the mem pages.

Right, so the average is the average between runs of "xen-access <DOMID>
write", which sets all of the domain's pages to r-x. A typical
introspection application will not set all of the domain's pages up this
way - so the number is an average of maximums.

AFAIK ARM uses this model - are there fewer pages in a typical ARM
guest, or are we missing something about the way they are using radix
trees for this purpose? Or perhaps the ARM code should be changed as well?


Thanks,
Razvan

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] x86/mm: Add mem access rights to NPT
  2018-06-28 14:40     ` Jan Beulich
  2018-06-28 14:53       ` Alexandru Stefan ISAILA
@ 2018-06-28 16:29       ` Tamas K Lengyel
  1 sibling, 0 replies; 18+ messages in thread
From: Tamas K Lengyel @ 2018-06-28 16:29 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Alexandru Isaila, Andrew Cooper, George Dunlap, Razvan Cojocaru,
	Xen-devel

On Thu, Jun 28, 2018 at 8:40 AM Jan Beulich <JBeulich@suse.com> wrote:
>
> >>> On 28.06.18 at 16:10, <aisaila@bitdefender.com> wrote:
> > On Vi, 2018-06-22 at 09:51 -0600, Jan Beulich wrote:
> >> > > > On 18.06.18 at 17:17, <aisaila@bitdefender.com> wrote:
> >> > From: Isaila Alexandru <aisaila@bitdefender.com>
> >> >
> >> > This patch adds access rights for the NPT pages. The access rights
> >> > are
> >> > saved in a radix tree with the root saved in p2m_domain.
> >> Sounds resource intensive. How many nodes would such a radix tree
> >> have
> >> on average?
> >
> > The average is around 1478890 for a machine with 4GB of ram.
>
> Is this with ...
>
> >> > +static void p2m_set_access(struct p2m_domain *p2m, unsigned long
> >> > gfn,
> >> > +                                      p2m_access_t a)
> >> > +{
> >> > +    int rc;
> >> > +
> >> > +    if ( p2m_access_rwx == a )
> >> > +        radix_tree_delete(&p2m->mem_access_settings, gfn);
> >> > +
> >> > +    rc = radix_tree_insert(&p2m->mem_access_settings, gfn,
> >> > +                           radix_tree_int_to_ptr(a));
> >> Is there an "else" missing above here? Otherwise why would you
> >> delete the node first?
> >
> > Yes it needs a else or a return. We plan not to have the rwx in the
> > tree so we ca save up some space.
>
> ... this corrected? Otherwise I'm tempted to say that the creation of
> this radix tree needs to be avoided by all means, as long as it's not
> really needed.

Having the radix tree created and present doesn't add much overhead. A
lookup on an empty radix tree is effectively just boils down to a NULL
check. So yes, when the page is rwx, there should be no setting in the
tree. Otherwise performance is going to decrease with the number of
pages having custom settings, but since this tree under "normal" use
won't have more then a couple hundred nodes at most this shouldn't
really be of concern. If performance is of essence, optimizing how
many pages are monitoring with mem_access is ultimately up to the
user.

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] x86/mm: Add mem access rights to NPT
  2018-06-28 14:53       ` Alexandru Stefan ISAILA
  2018-06-28 14:58         ` Razvan Cojocaru
@ 2018-06-29  6:13         ` Jan Beulich
  2018-06-29  8:42           ` Alexandru Stefan ISAILA
  1 sibling, 1 reply; 18+ messages in thread
From: Jan Beulich @ 2018-06-29  6:13 UTC (permalink / raw)
  To: aisaila; +Cc: George.Dunlap, andrew.cooper3, tamas, rcojocaru, xen-devel

>>> Alexandru Stefan ISAILA <aisaila@bitdefender.com> 06/28/18 4:53 PM >>>
>On Jo, 2018-06-28 at 08:40 -0600, Jan Beulich wrote:
>> > > > On 28.06.18 at 16:10, <aisaila@bitdefender.com> wrote:
>> > On Vi, 2018-06-22 at 09:51 -0600, Jan Beulich wrote:
>> > > > > > On 18.06.18 at 17:17, <aisaila@bitdefender.com> wrote:
>> > > > From: Isaila Alexandru <aisaila@bitdefender.com>
>> > > >
>> > > > This patch adds access rights for the NPT pages. The access
>> > > > rights
>> > > > are
>> > > > saved in a radix tree with the root saved in p2m_domain.
>> > > Sounds resource intensive. How many nodes would such a radix tree
>> > > have
>> > > on average?
>> > The average is around 1478890 for a machine with 4GB of ram.
>> Is this with ...
>>
>> > > > +static void p2m_set_access(struct p2m_domain *p2m, unsigned
>> > > > long
>> > > > gfn,
>> > > > +                                      p2m_access_t a)
>> > > > +{
>> > > > +    int rc;
>> > > > +
>> > > > +    if ( p2m_access_rwx == a )
>> > > > +        radix_tree_delete(&p2m->mem_access_settings, gfn);
>> > > > +
>> > > > +    rc = radix_tree_insert(&p2m->mem_access_settings, gfn,
>> > > > +                           radix_tree_int_to_ptr(a));
>> > > Is there an "else" missing above here? Otherwise why would you
>> > > delete the node first?
>> > Yes it needs a else or a return. We plan not to have the rwx in the
>> > tree so we ca save up some space.
>> ... this corrected? Otherwise I'm tempted to say that the creation of
>> this radix tree needs to be avoided by all means, as long as it's not
>> really needed.
>
>The number was with this patch so no correction. This was done with
>xen-access write and I don't think it will make a difference if you
>change the access to all the mem pages.

The question "on average" wasn't with xen-access in use. I'm worried of the
overhead _without_ any introspection active.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] x86/mm: Add mem access rights to NPT
  2018-06-28 14:58         ` Razvan Cojocaru
@ 2018-06-29  6:17           ` Jan Beulich
  0 siblings, 0 replies; 18+ messages in thread
From: Jan Beulich @ 2018-06-29  6:17 UTC (permalink / raw)
  To: aisaila, rcojocaru; +Cc: George.Dunlap, andrew.cooper3, tamas, xen-devel

>>> Razvan Cojocaru <rcojocaru@bitdefender.com> 06/28/18 4:58 PM >>>
>On 06/28/2018 05:53 PM, Alexandru Stefan ISAILA wrote:
>> The number was with this patch so no correction. This was done with
>> xen-access write and I don't think it will make a difference if you
>> change the access to all the mem pages.
>
>Right, so the average is the average between runs of "xen-access <DOMID>
>write", which sets all of the domain's pages to r-x. A typical
>introspection application will not set all of the domain's pages up this
>way - so the number is an average of maximums.
>
>AFAIK ARM uses this model - are there fewer pages in a typical ARM
>guest, or are we missing something about the way they are using radix
>trees for this purpose? Or perhaps the ARM code should be changed as well?

Whether the over head with introspection in use is tolerable is something
you guys need to know. As said in the other reply - my worries are about
extra overhead without introspection in use. If the radix tree was never used
in such a case, it would be more clearly visible that there is no overhead if
the creation of the radix tree at domain creation time was avoided.


Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] x86/mm: Add mem access rights to NPT
  2018-06-29  6:13         ` Jan Beulich
@ 2018-06-29  8:42           ` Alexandru Stefan ISAILA
  2018-06-29  9:38             ` Jan Beulich
  0 siblings, 1 reply; 18+ messages in thread
From: Alexandru Stefan ISAILA @ 2018-06-29  8:42 UTC (permalink / raw)
  To: jbeulich; +Cc: George.Dunlap, andrew.cooper3, tamas, rcojocaru, xen-devel

On Vi, 2018-06-29 at 00:13 -0600, Jan Beulich wrote:
> >
> > >
> > > >
> > > > Alexandru Stefan ISAILA <aisaila@bitdefender.com> 06/28/18 4:53
> > > > PM >>>
> > On Jo, 2018-06-28 at 08:40 -0600, Jan Beulich wrote:
> > >
> > > >
> > > > >
> > > > > >
> > > > > > On 28.06.18 at 16:10, <aisaila@bitdefender.com> wrote:
> > > > On Vi, 2018-06-22 at 09:51 -0600, Jan Beulich wrote:
> > > > >
> > > > > >
> > > > > > >
> > > > > > > >
> > > > > > > > On 18.06.18 at 17:17, <aisaila@bitdefender.com> wrote:
> > > > > > From: Isaila Alexandru <aisaila@bitdefender.com>
> > > > > >
> > > > > > This patch adds access rights for the NPT pages. The access
> > > > > > rights
> > > > > > are
> > > > > > saved in a radix tree with the root saved in p2m_domain.
> > > > > Sounds resource intensive. How many nodes would such a radix
> > > > > tree
> > > > > have
> > > > > on average?
> > > > The average is around 1478890 for a machine with 4GB of ram.
> > > Is this with ...
> > >
> > > >
> > > > >
> > > > > >
> > > > > > +static void p2m_set_access(struct p2m_domain *p2m,
> > > > > > unsigned
> > > > > > long
> > > > > > gfn,
> > > > > > +                                      p2m_access_t a)
> > > > > > +{
> > > > > > +    int rc;
> > > > > > +
> > > > > > +    if ( p2m_access_rwx == a )
> > > > > > +        radix_tree_delete(&p2m->mem_access_settings, gfn);
> > > > > > +
> > > > > > +    rc = radix_tree_insert(&p2m->mem_access_settings, gfn,
> > > > > > +                           radix_tree_int_to_ptr(a));
> > > > > Is there an "else" missing above here? Otherwise why would
> > > > > you
> > > > > delete the node first?
> > > > Yes it needs a else or a return. We plan not to have the rwx in
> > > > the
> > > > tree so we ca save up some space.
> > > ... this corrected? Otherwise I'm tempted to say that the
> > > creation of
> > > this radix tree needs to be avoided by all means, as long as it's
> > > not
> > > really needed.
> > The number was with this patch so no correction. This was done with
> > xen-access write and I don't think it will make a difference if you
> > change the access to all the mem pages.
> The question "on average" wasn't with xen-access in use. I'm worried
> of the
> overhead _without_ any introspection active.

I've started a win 7 machine with no introspection or xen-access. After
30 min of uptime there were 0 inserts into the tree. I guess the
overhead is down to a minimum with no user modified access rights.

Alex

________________________
This email was scanned by Bitdefender
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] x86/mm: Add mem access rights to NPT
  2018-06-29  8:42           ` Alexandru Stefan ISAILA
@ 2018-06-29  9:38             ` Jan Beulich
  2018-06-29 16:42               ` Tamas K Lengyel
  0 siblings, 1 reply; 18+ messages in thread
From: Jan Beulich @ 2018-06-29  9:38 UTC (permalink / raw)
  To: aisaila; +Cc: George Dunlap, Andrew Cooper, tamas, Razvan Cojocaru, xen-devel

>>> On 29.06.18 at 10:42, <aisaila@bitdefender.com> wrote:
> I've started a win 7 machine with no introspection or xen-access. After
> 30 min of uptime there were 0 inserts into the tree. I guess the
> overhead is down to a minimum with no user modified access rights.

Thanks, this is helpful to know. In that case though, as said before, I'd
like to ask to defer setting up of the radix tree to the point where it
actually is going to be needed.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] x86/mm: Add mem access rights to NPT
  2018-06-29  9:38             ` Jan Beulich
@ 2018-06-29 16:42               ` Tamas K Lengyel
  2018-07-02  6:37                 ` Jan Beulich
  0 siblings, 1 reply; 18+ messages in thread
From: Tamas K Lengyel @ 2018-06-29 16:42 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Alexandru Isaila, Andrew Cooper, George Dunlap, Razvan Cojocaru,
	Xen-devel

On Fri, Jun 29, 2018 at 3:38 AM Jan Beulich <JBeulich@suse.com> wrote:
>
> >>> On 29.06.18 at 10:42, <aisaila@bitdefender.com> wrote:
> > I've started a win 7 machine with no introspection or xen-access. After
> > 30 min of uptime there were 0 inserts into the tree. I guess the
> > overhead is down to a minimum with no user modified access rights.
>
> Thanks, this is helpful to know. In that case though, as said before, I'd
> like to ask to defer setting up of the radix tree to the point where it
> actually is going to be needed.

Are you worried about the radix tree being present (ie radix_tree_init
have been called) before it is used for mem_access? As I said earlier,
a lookup on an empty radix tree is equivalent of a NULL check. I don't
get how not having the tree initialized will be any faster then doing
a lookup on an empty one.

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] x86/mm: Add mem access rights to NPT
  2018-06-29 16:42               ` Tamas K Lengyel
@ 2018-07-02  6:37                 ` Jan Beulich
  2018-07-02  8:01                   ` Alexandru Stefan ISAILA
  0 siblings, 1 reply; 18+ messages in thread
From: Jan Beulich @ 2018-07-02  6:37 UTC (permalink / raw)
  To: tamas; +Cc: aisaila, Andrew Cooper, George Dunlap, Razvan Cojocaru, Xen-devel

>>> On 29.06.18 at 18:42, <tamas@tklengyel.com> wrote:
> On Fri, Jun 29, 2018 at 3:38 AM Jan Beulich <JBeulich@suse.com> wrote:
>>
>> >>> On 29.06.18 at 10:42, <aisaila@bitdefender.com> wrote:
>> > I've started a win 7 machine with no introspection or xen-access. After
>> > 30 min of uptime there were 0 inserts into the tree. I guess the
>> > overhead is down to a minimum with no user modified access rights.
>>
>> Thanks, this is helpful to know. In that case though, as said before, I'd
>> like to ask to defer setting up of the radix tree to the point where it
>> actually is going to be needed.
> 
> Are you worried about the radix tree being present (ie radix_tree_init
> have been called) before it is used for mem_access? As I said earlier,
> a lookup on an empty radix tree is equivalent of a NULL check. I don't
> get how not having the tree initialized will be any faster then doing
> a lookup on an empty one.

No, the question is not about performance. The point is about
reassurance that the tree isn't going to be used in normal (non-
introspection) operation. If it suddenly and unknowingly became
used down the road, the resource consumption pattern of
domains may change quite significantly. Not setting up the tree
unless needed likely also helps review of the changes, as it'll be
necessary to make sure in the patch that it won't get accessed
without having been set up.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] x86/mm: Add mem access rights to NPT
  2018-07-02  6:37                 ` Jan Beulich
@ 2018-07-02  8:01                   ` Alexandru Stefan ISAILA
  2018-07-02  8:20                     ` Jan Beulich
  0 siblings, 1 reply; 18+ messages in thread
From: Alexandru Stefan ISAILA @ 2018-07-02  8:01 UTC (permalink / raw)
  To: JBeulich, tamas; +Cc: George.Dunlap, andrew.cooper3, rcojocaru, xen-devel

On Lu, 2018-07-02 at 00:37 -0600, Jan Beulich wrote:
> >
> > >
> > > >
> > > > On 29.06.18 at 18:42, <tamas@tklengyel.com> wrote:
> > On Fri, Jun 29, 2018 at 3:38 AM Jan Beulich <JBeulich@suse.com>
> > wrote:
> > >
> > >
> > > >
> > > > >
> > > > > >
> > > > > > On 29.06.18 at 10:42, <aisaila@bitdefender.com> wrote:
> > > > I've started a win 7 machine with no introspection or xen-
> > > > access. After
> > > > 30 min of uptime there were 0 inserts into the tree. I guess
> > > > the
> > > > overhead is down to a minimum with no user modified access
> > > > rights.
> > > Thanks, this is helpful to know. In that case though, as said
> > > before, I'd
> > > like to ask to defer setting up of the radix tree to the point
> > > where it
> > > actually is going to be needed.
> > Are you worried about the radix tree being present (ie
> > radix_tree_init
> > have been called) before it is used for mem_access? As I said
> > earlier,
> > a lookup on an empty radix tree is equivalent of a NULL check. I
> > don't
> > get how not having the tree initialized will be any faster then
> > doing
> > a lookup on an empty one.
> No, the question is not about performance. The point is about
> reassurance that the tree isn't going to be used in normal (non-
> introspection) operation. If it suddenly and unknowingly became
> used down the road, the resource consumption pattern of
> domains may change quite significantly. Not setting up the tree
> unless needed likely also helps review of the changes, as it'll be
> necessary to make sure in the patch that it won't get accessed
> without having been set up.
>
I can move the radix tree init to p2m_pt_set_entry() so that it is
called on the first use or I can move it to vm_event_enable() and then
return in every case that the root is NULL.

What would be the best way to handle this?

Thanks,
Alex

________________________
This email was scanned by Bitdefender
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH v2] x86/mm: Add mem access rights to NPT
  2018-07-02  8:01                   ` Alexandru Stefan ISAILA
@ 2018-07-02  8:20                     ` Jan Beulich
  0 siblings, 0 replies; 18+ messages in thread
From: Jan Beulich @ 2018-07-02  8:20 UTC (permalink / raw)
  To: aisaila; +Cc: George Dunlap, Andrew Cooper, tamas, Razvan Cojocaru, xen-devel

>>> On 02.07.18 at 10:01, <aisaila@bitdefender.com> wrote:
> On Lu, 2018-07-02 at 00:37 -0600, Jan Beulich wrote:
>> >
>> > >
>> > > >
>> > > > On 29.06.18 at 18:42, <tamas@tklengyel.com> wrote:
>> > On Fri, Jun 29, 2018 at 3:38 AM Jan Beulich <JBeulich@suse.com>
>> > wrote:
>> > >
>> > >
>> > > >
>> > > > >
>> > > > > >
>> > > > > > On 29.06.18 at 10:42, <aisaila@bitdefender.com> wrote:
>> > > > I've started a win 7 machine with no introspection or xen-
>> > > > access. After
>> > > > 30 min of uptime there were 0 inserts into the tree. I guess
>> > > > the
>> > > > overhead is down to a minimum with no user modified access
>> > > > rights.
>> > > Thanks, this is helpful to know. In that case though, as said
>> > > before, I'd
>> > > like to ask to defer setting up of the radix tree to the point
>> > > where it
>> > > actually is going to be needed.
>> > Are you worried about the radix tree being present (ie
>> > radix_tree_init
>> > have been called) before it is used for mem_access? As I said
>> > earlier,
>> > a lookup on an empty radix tree is equivalent of a NULL check. I
>> > don't
>> > get how not having the tree initialized will be any faster then
>> > doing
>> > a lookup on an empty one.
>> No, the question is not about performance. The point is about
>> reassurance that the tree isn't going to be used in normal (non-
>> introspection) operation. If it suddenly and unknowingly became
>> used down the road, the resource consumption pattern of
>> domains may change quite significantly. Not setting up the tree
>> unless needed likely also helps review of the changes, as it'll be
>> necessary to make sure in the patch that it won't get accessed
>> without having been set up.
>>
> I can move the radix tree init to p2m_pt_set_entry() so that it is
> called on the first use or I can move it to vm_event_enable() and then
> return in every case that the root is NULL.
> 
> What would be the best way to handle this?

I'd prefer the second of the named options.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2018-07-02  8:20 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-06-18 15:17 [PATCH v2] x86/mm: Add mem access rights to NPT Alexandru Isaila
2018-06-18 15:39 ` Tamas K Lengyel
2018-06-19  8:14   ` Alexandru Stefan ISAILA
2018-06-19 14:22     ` Tamas K Lengyel
2018-06-22 15:51 ` Jan Beulich
2018-06-28 14:10   ` Alexandru Stefan ISAILA
2018-06-28 14:40     ` Jan Beulich
2018-06-28 14:53       ` Alexandru Stefan ISAILA
2018-06-28 14:58         ` Razvan Cojocaru
2018-06-29  6:17           ` Jan Beulich
2018-06-29  6:13         ` Jan Beulich
2018-06-29  8:42           ` Alexandru Stefan ISAILA
2018-06-29  9:38             ` Jan Beulich
2018-06-29 16:42               ` Tamas K Lengyel
2018-07-02  6:37                 ` Jan Beulich
2018-07-02  8:01                   ` Alexandru Stefan ISAILA
2018-07-02  8:20                     ` Jan Beulich
2018-06-28 16:29       ` Tamas K Lengyel

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.