xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [Xen-devel] [PATCH v2] x86/mm: Add mem access rights to NPT
@ 2019-08-22 14:02 Alexandru Stefan ISAILA
  2019-08-29 15:04 ` Jan Beulich
  0 siblings, 1 reply; 4+ messages in thread
From: Alexandru Stefan ISAILA @ 2019-08-22 14:02 UTC (permalink / raw)
  To: xen-devel
  Cc: Petre Ovidiu PIRCALABU, kevin.tian, tamas, wl, rcojocaru,
	george.dunlap, andrew.cooper3, George Dunlap,
	Alexandru Stefan ISAILA, jbeulich, jun.nakajima, roger.pau

This patch adds access control for NPT mode.

The access rights are stored in the NPT p2m table 56:53.
The bits are free after clearing the IOMMU flags [1].

Modify p2m_type_to_flags() to accept and interpret an access value,
parallel to the ept code.

Add a set_default_access() method to the p2m-pt and p2m-ept versions
of the p2m rather than setting it directly, to deal with different
default permitted access values.

[1] 854a49a7486a02edae5b3e53617bace526e9c1b1

Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
Signed-off-by: George Dunlap <george.dunlap@citrix.com>

---
Note: Tested with xen-access write/exec

Changes since V1:
	- Remove radix tree and store access bits in NPT p2m table.
---
 xen/arch/x86/mm/mem_access.c |   7 +--
 xen/arch/x86/mm/p2m-ept.c    |   9 +++
 xen/arch/x86/mm/p2m-pt.c     | 111 ++++++++++++++++++++++++++++++-----
 xen/include/asm-x86/p2m.h    |   2 +
 4 files changed, 109 insertions(+), 20 deletions(-)

diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index 0144f92b98..d8cc7a50de 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -374,10 +374,7 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
 
     /* If request to set default access. */
     if ( gfn_eq(gfn, INVALID_GFN) )
-    {
-        p2m->default_access = a;
-        return 0;
-    }
+        return p2m->set_default_access(p2m, a);
 
     p2m_lock(p2m);
     if ( ap2m )
@@ -520,7 +517,7 @@ void arch_p2m_set_access_required(struct domain *d, bool access_required)
 
 bool p2m_mem_access_sanity_check(const struct domain *d)
 {
-    return is_hvm_domain(d) && cpu_has_vmx && hap_enabled(d);
+    return is_hvm_domain(d) && hap_enabled(d);
 }
 
 /*
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 6b8468c793..a6f2347bc1 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -624,6 +624,14 @@ bool_t ept_handle_misconfig(uint64_t gpa)
     return spurious ? (rc >= 0) : (rc > 0);
 }
 
+int ept_set_default_access(struct p2m_domain *p2m, p2m_access_t p2ma)
+{
+    /* All access is permitted */
+    p2m->default_access = p2ma;
+    
+    return 0;
+}
+
 /*
  * ept_set_entry() computes 'need_modify_vtd_table' for itself,
  * by observing whether any gfn->mfn translations are modified.
@@ -1222,6 +1230,7 @@ int ept_p2m_init(struct p2m_domain *p2m)
     p2m->change_entry_type_global = ept_change_entry_type_global;
     p2m->change_entry_type_range = ept_change_entry_type_range;
     p2m->memory_type_changed = ept_memory_type_changed;
+    p2m->set_default_access = ept_set_default_access;
     p2m->audit_p2m = NULL;
     p2m->tlb_flush = ept_tlb_flush;
 
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 3a0a500d66..7f79eac979 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -63,10 +63,13 @@
 #define needs_recalc(level, ent) _needs_recalc(level##e_get_flags(ent))
 #define valid_recalc(level, ent) (!(level##e_get_flags(ent) & _PAGE_ACCESSED))
 
+#define _PAGE_ACCESS_BITS  (0x1e00) /* mem_access rights 16:13 */
+
 static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
                                        p2m_type_t t,
                                        mfn_t mfn,
-                                       unsigned int level)
+                                       unsigned int level,
+                                       p2m_access_t access)
 {
     unsigned long flags = (unsigned long)(t & 0x7f) << 12;
 
@@ -79,23 +82,28 @@ static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
     case p2m_ram_paged:
     case p2m_ram_paging_in:
     default:
-        return flags | _PAGE_NX_BIT;
+        flags |= _PAGE_NX_BIT;
+        break;
     case p2m_grant_map_ro:
-        return flags | P2M_BASE_FLAGS | _PAGE_NX_BIT;
+        flags |= P2M_BASE_FLAGS | _PAGE_NX_BIT;
+        break;
     case p2m_ioreq_server:
         flags |= P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
         if ( p2m->ioreq.flags & XEN_DMOP_IOREQ_MEM_ACCESS_WRITE )
-            return flags & ~_PAGE_RW;
-        return flags;
+            flags &= ~_PAGE_RW;
+        break;
     case p2m_ram_ro:
     case p2m_ram_logdirty:
     case p2m_ram_shared:
-        return flags | P2M_BASE_FLAGS;
+        flags |= P2M_BASE_FLAGS;
+        break;
     case p2m_ram_rw:
-        return flags | P2M_BASE_FLAGS | _PAGE_RW;
+        flags |= P2M_BASE_FLAGS | _PAGE_RW;
+        break;
     case p2m_grant_map_rw:
     case p2m_map_foreign:
-        return flags | P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
+        flags |= P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
+        break;
     case p2m_mmio_direct:
         if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) )
             flags |= _PAGE_RW;
@@ -104,8 +112,32 @@ static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
             flags |= _PAGE_PWT;
             ASSERT(!level);
         }
-        return flags | P2M_BASE_FLAGS | _PAGE_PCD;
+        flags |= P2M_BASE_FLAGS | _PAGE_PCD;
+        break;
     }
+
+    switch ( access )
+    {
+    case p2m_access_r:
+        flags |= _PAGE_NX_BIT;
+        flags &= ~_PAGE_RW;
+        break;
+    case p2m_access_rw:
+        flags |= _PAGE_NX_BIT;
+        break;
+    case p2m_access_rx:
+    case p2m_access_rx2rw:
+        flags &= ~(_PAGE_NX_BIT | _PAGE_RW);
+        break;
+    case p2m_access_x:
+        flags &= ~_PAGE_RW;
+        break;
+    case p2m_access_rwx:
+    default:
+        break;
+    }
+
+    return flags;
 }
 
 
@@ -152,6 +184,17 @@ p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order)
     p2m_free_ptp(p2m, l1e_get_page(*p2m_entry));
 }
 
+static void p2m_set_access(intpte_t *entry, p2m_access_t access)
+{
+    *entry |= put_pte_flags((get_pte_flags(*entry) & ~_PAGE_ACCESS_BITS) |
+                            (MASK_INSR(access, _PAGE_ACCESS_BITS)));
+}
+
+static p2m_access_t p2m_get_access(intpte_t entry)
+{
+    return (p2m_access_t)(MASK_EXTR(get_pte_flags(entry), _PAGE_ACCESS_BITS));
+}
+
 // Walk one level of the P2M table, allocating a new table if required.
 // Returns 0 on error.
 //
@@ -226,6 +269,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
         {
             new_entry = l1e_from_pfn(pfn | (i << ((level - 1) * PAGETABLE_ORDER)),
                                      flags);
+            p2m_set_access(&new_entry.l1, p2m->default_access);
             rc = p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, level);
             if ( rc )
             {
@@ -237,6 +281,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
         unmap_domain_page(l1_entry);
 
         new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
+        p2m_set_access(&new_entry.l1, p2m->default_access);
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry,
                                   level + 1);
         if ( rc )
@@ -425,8 +470,9 @@ static int do_recalc(struct p2m_domain *p2m, unsigned long gfn)
         if ( nt != ot )
         {
             unsigned long mfn = l1e_get_pfn(e);
+            p2m_access_t a = p2m_get_access(e.l1);
             unsigned long flags = p2m_type_to_flags(p2m, nt,
-                                                    _mfn(mfn), level);
+                                                    _mfn(mfn), level, a);
 
             if ( level )
             {
@@ -474,6 +520,32 @@ int p2m_pt_handle_deferred_changes(uint64_t gpa)
     return rc;
 }
 
+static int p2m_pt_check_access(p2m_access_t p2ma)
+{
+    switch ( p2ma )
+    {
+    case p2m_access_n:
+    case p2m_access_w:
+    case p2m_access_wx:
+    case p2m_access_n2rwx:
+        return -EINVAL;
+    default:
+        break;
+    }
+    return 0;
+}
+
+static int p2m_pt_set_default_access(struct p2m_domain *p2m,
+                                     p2m_access_t p2ma)
+{
+    int rc = p2m_pt_check_access(p2ma);
+
+    if ( !rc )
+        p2m->default_access = p2ma;
+
+    return rc;
+}
+
 /* Checks only applicable to entries with order > PAGE_ORDER_4K */
 static void check_entry(mfn_t mfn, p2m_type_t new, p2m_type_t old,
                         unsigned int order)
@@ -514,6 +586,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
     if ( !sve )
         return -EOPNOTSUPP;
 
+    if ( (rc = p2m_pt_check_access(p2ma)) )
+        return rc;
+
     if ( tb_init_done )
     {
         struct {
@@ -564,10 +639,11 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
         check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order);
         l3e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt)
             ? p2m_l3e_from_pfn(mfn_x(mfn),
-                               p2m_type_to_flags(p2m, p2mt, mfn, 2))
+                               p2m_type_to_flags(p2m, p2mt, mfn, 2, p2ma))
             : l3e_empty();
         entry_content.l1 = l3e_content.l3;
 
+        p2m_set_access(&entry_content.l1, p2ma);
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
         if ( rc )
@@ -597,10 +673,11 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
 
         if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
             entry_content = p2m_l1e_from_pfn(mfn_x(mfn),
-                                         p2m_type_to_flags(p2m, p2mt, mfn, 0));
+                                         p2m_type_to_flags(p2m, p2mt, mfn, 0, p2ma));
         else
             entry_content = l1e_empty();
 
+        p2m_set_access(&entry_content.l1, p2ma);
         /* level 1 entry */
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -625,10 +702,11 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
         check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order);
         l2e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt)
             ? p2m_l2e_from_pfn(mfn_x(mfn),
-                               p2m_type_to_flags(p2m, p2mt, mfn, 1))
+                               p2m_type_to_flags(p2m, p2mt, mfn, 1, p2ma))
             : l2e_empty();
         entry_content.l1 = l2e_content.l2;
 
+        p2m_set_access(&entry_content.l1, p2ma);
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
         if ( rc )
@@ -676,8 +754,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, gfn_t gfn_,
      * XXX Once we start explicitly registering MMIO regions in the p2m 
      * XXX we will return p2m_invalid for unmapped gfns */
     *t = p2m_mmio_dm;
-    /* Not implemented except with EPT */
-    *a = p2m_access_rwx; 
+    *a = p2m_access_n;
 
     if ( gfn > p2m->max_mapped_pfn )
     {
@@ -740,6 +817,7 @@ pod_retry_l3:
                        l1_table_offset(addr));
             *t = p2m_recalc_type(recalc || _needs_recalc(flags),
                                  p2m_flags_to_type(flags), p2m, gfn);
+            *a = p2m_get_access(l3e->l3);
             unmap_domain_page(l3e);
 
             ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
@@ -779,6 +857,7 @@ pod_retry_l2:
         mfn = _mfn(l2e_get_pfn(*l2e) + l1_table_offset(addr));
         *t = p2m_recalc_type(recalc || _needs_recalc(flags),
                              p2m_flags_to_type(flags), p2m, gfn);
+        *a = p2m_get_access(l2e->l2);
         unmap_domain_page(l2e);
         
         ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
@@ -815,6 +894,7 @@ pod_retry_l1:
     }
     mfn = l1e_get_mfn(*l1e);
     *t = p2m_recalc_type(recalc || _needs_recalc(flags), l1t, p2m, gfn);
+    *a = p2m_get_access(l1e->l1);
     unmap_domain_page(l1e);
 
     ASSERT(mfn_valid(mfn) || !p2m_is_any_ram(*t) || p2m_is_paging(*t));
@@ -1063,6 +1143,7 @@ void p2m_pt_init(struct p2m_domain *p2m)
     p2m->change_entry_type_global = p2m_pt_change_entry_type_global;
     p2m->change_entry_type_range = p2m_pt_change_entry_type_range;
     p2m->write_p2m_entry = paging_write_p2m_entry;
+    p2m->set_default_access = p2m_pt_set_default_access;
 #if P2M_AUDIT
     p2m->audit_p2m = p2m_pt_audit_p2m;
 #else
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 94285db1b4..32e71067b7 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -269,6 +269,8 @@ struct p2m_domain {
                                           unsigned long gfn, l1_pgentry_t *p,
                                           l1_pgentry_t new, unsigned int level);
     long               (*audit_p2m)(struct p2m_domain *p2m);
+    int                (*set_default_access)(struct p2m_domain *p2m,
+                                             p2m_access_t p2ma);
 
     /*
      * P2M updates may require TLBs to be flushed (invalidated).
-- 
2.17.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [Xen-devel] [PATCH v2] x86/mm: Add mem access rights to NPT
  2019-08-22 14:02 [Xen-devel] [PATCH v2] x86/mm: Add mem access rights to NPT Alexandru Stefan ISAILA
@ 2019-08-29 15:04 ` Jan Beulich
  2019-09-02 11:23   ` Alexandru Stefan ISAILA
  0 siblings, 1 reply; 4+ messages in thread
From: Jan Beulich @ 2019-08-29 15:04 UTC (permalink / raw)
  To: Alexandru Stefan ISAILA
  Cc: Petre Ovidiu PIRCALABU, kevin.tian, tamas, wl, rcojocaru,
	george.dunlap, andrew.cooper3, George Dunlap, jun.nakajima,
	xen-devel, roger.pau

On 22.08.2019 16:02, Alexandru Stefan ISAILA wrote:
> This patch adds access control for NPT mode.
> 
> The access rights are stored in the NPT p2m table 56:53.

Why starting from bit 53? I can't seem to find any use of bit 52.

> The bits are free after clearing the IOMMU flags [1].
> 
> Modify p2m_type_to_flags() to accept and interpret an access value,
> parallel to the ept code.
> 
> Add a set_default_access() method to the p2m-pt and p2m-ept versions
> of the p2m rather than setting it directly, to deal with different
> default permitted access values.

I think this would better be a separate change.

> --- a/xen/arch/x86/mm/p2m-pt.c
> +++ b/xen/arch/x86/mm/p2m-pt.c
> @@ -63,10 +63,13 @@
>  #define needs_recalc(level, ent) _needs_recalc(level##e_get_flags(ent))
>  #define valid_recalc(level, ent) (!(level##e_get_flags(ent) & _PAGE_ACCESSED))
>  
> +#define _PAGE_ACCESS_BITS  (0x1e00) /* mem_access rights 16:13 */

I guess this is too disconnected from the two page.h headers where
the correlation between bit positions gets explained, so I guess
you want to extend the comment and either refer there, or replicate
some of it to make understandable why 16:13 matches 56:53.

I'm also concerned how easy it'll be for someone to find this
definition when wanting to use other of the available bits.

> @@ -104,8 +112,32 @@ static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
>              flags |= _PAGE_PWT;
>              ASSERT(!level);
>          }
> -        return flags | P2M_BASE_FLAGS | _PAGE_PCD;
> +        flags |= P2M_BASE_FLAGS | _PAGE_PCD;
> +        break;
>      }
> +
> +    switch ( access )
> +    {
> +    case p2m_access_r:
> +        flags |= _PAGE_NX_BIT;
> +        flags &= ~_PAGE_RW;
> +        break;
> +    case p2m_access_rw:
> +        flags |= _PAGE_NX_BIT;
> +        break;
> +    case p2m_access_rx:
> +    case p2m_access_rx2rw:
> +        flags &= ~(_PAGE_NX_BIT | _PAGE_RW);
> +        break;
> +    case p2m_access_x:
> +        flags &= ~_PAGE_RW;
> +        break;

I can't seem to be able to follow you here. In fact I don't see
how you would be able to express execute-only with NPT. If this
is really needed for some reason, then a justifying comment
should be added.

> @@ -152,6 +184,17 @@ p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order)
>      p2m_free_ptp(p2m, l1e_get_page(*p2m_entry));
>  }
>  
> +static void p2m_set_access(intpte_t *entry, p2m_access_t access)
> +{
> +    *entry |= put_pte_flags((get_pte_flags(*entry) & ~_PAGE_ACCESS_BITS) |
> +                            (MASK_INSR(access, _PAGE_ACCESS_BITS)));
> +}
> +
> +static p2m_access_t p2m_get_access(intpte_t entry)
> +{
> +    return (p2m_access_t)(MASK_EXTR(get_pte_flags(entry), _PAGE_ACCESS_BITS));

Is the cast really needed here?

> @@ -226,6 +269,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
>          {
>              new_entry = l1e_from_pfn(pfn | (i << ((level - 1) * PAGETABLE_ORDER)),
>                                       flags);
> +            p2m_set_access(&new_entry.l1, p2m->default_access);
>              rc = p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, level);
>              if ( rc )
>              {
> @@ -237,6 +281,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
>          unmap_domain_page(l1_entry);
>  
>          new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
> +        p2m_set_access(&new_entry.l1, p2m->default_access);
>          rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry,
>                                    level + 1);
>          if ( rc )

Is it really intended to insert the access bits also into non-leaf
entries (which may be what is being processed here)? (May also
apply further down.)

> @@ -474,6 +520,32 @@ int p2m_pt_handle_deferred_changes(uint64_t gpa)
>      return rc;
>  }
>  
> +static int p2m_pt_check_access(p2m_access_t p2ma)
> +{
> +    switch ( p2ma )
> +    {
> +    case p2m_access_n:
> +    case p2m_access_w:
> +    case p2m_access_wx:
> +    case p2m_access_n2rwx:
> +        return -EINVAL;

I'm not convinced EINVAL is appropriate here - the argument isn't
invalid, it's just that there's no way to represent it.

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [Xen-devel] [PATCH v2] x86/mm: Add mem access rights to NPT
  2019-08-29 15:04 ` Jan Beulich
@ 2019-09-02 11:23   ` Alexandru Stefan ISAILA
  2019-09-02 11:46     ` Jan Beulich
  0 siblings, 1 reply; 4+ messages in thread
From: Alexandru Stefan ISAILA @ 2019-09-02 11:23 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Petre Ovidiu PIRCALABU, kevin.tian, tamas, wl, rcojocaru,
	george.dunlap, andrew.cooper3, George Dunlap, jun.nakajima,
	xen-devel, roger.pau



On 29.08.2019 18:04, Jan Beulich wrote:
> On 22.08.2019 16:02, Alexandru Stefan ISAILA wrote:
>> This patch adds access control for NPT mode.
>>
>> The access rights are stored in the NPT p2m table 56:53.
> 
> Why starting from bit 53? I can't seem to find any use of bit 52.

There is a comment in page.h that warns that bit 12(52) is taken.
"/*
  * Bit 12 of a 24-bit flag mask. This corresponds to bit 52 of a pte.
  * This is needed to distinguish between user and kernel PTEs since 
_PAGE_USER
  * is asserted for both.
  */
#define _PAGE_GUEST_KERNEL (1U<<12)
"

> 
>> The bits are free after clearing the IOMMU flags [1].
>>
>> Modify p2m_type_to_flags() to accept and interpret an access value,
>> parallel to the ept code.
>>
>> Add a set_default_access() method to the p2m-pt and p2m-ept versions
>> of the p2m rather than setting it directly, to deal with different
>> default permitted access values.
> 
> I think this would better be a separate change.

Ok I will brake the patch in two parts. I was following George's v1 patch.

> 
>> --- a/xen/arch/x86/mm/p2m-pt.c
>> +++ b/xen/arch/x86/mm/p2m-pt.c
>> @@ -63,10 +63,13 @@
>>   #define needs_recalc(level, ent) _needs_recalc(level##e_get_flags(ent))
>>   #define valid_recalc(level, ent) (!(level##e_get_flags(ent) & _PAGE_ACCESSED))
>>   
>> +#define _PAGE_ACCESS_BITS  (0x1e00) /* mem_access rights 16:13 */
> 
> I guess this is too disconnected from the two page.h headers where
> the correlation between bit positions gets explained, so I guess
> you want to extend the comment and either refer there, or replicate
> some of it to make understandable why 16:13 matches 56:53.
> 

I will extend the comment so as the bit shifting will be clear.

> I'm also concerned how easy it'll be for someone to find this
> definition when wanting to use other of the available bits.

Yes you are right, I will add a comment in page.h that bits 56:53 are 
used for memory access rights on SVM.

> 
>> @@ -104,8 +112,32 @@ static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
>>               flags |= _PAGE_PWT;
>>               ASSERT(!level);
>>           }
>> -        return flags | P2M_BASE_FLAGS | _PAGE_PCD;
>> +        flags |= P2M_BASE_FLAGS | _PAGE_PCD;
>> +        break;
>>       }
>> +
>> +    switch ( access )
>> +    {
>> +    case p2m_access_r:
>> +        flags |= _PAGE_NX_BIT;
>> +        flags &= ~_PAGE_RW;
>> +        break;
>> +    case p2m_access_rw:
>> +        flags |= _PAGE_NX_BIT;
>> +        break;
>> +    case p2m_access_rx:
>> +    case p2m_access_rx2rw:
>> +        flags &= ~(_PAGE_NX_BIT | _PAGE_RW);
>> +        break;
>> +    case p2m_access_x:
>> +        flags &= ~_PAGE_RW;
>> +        break;
> 
> I can't seem to be able to follow you here. In fact I don't see
> how you would be able to express execute-only with NPT. If this
> is really needed for some reason, then a justifying comment
> should be added.

Execute-only should be expressed as not PAGE_RW and PAGE_NX_BIT not set.

> 
>> @@ -152,6 +184,17 @@ p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order)
>>       p2m_free_ptp(p2m, l1e_get_page(*p2m_entry));
>>   }
>>   
>> +static void p2m_set_access(intpte_t *entry, p2m_access_t access)
>> +{
>> +    *entry |= put_pte_flags((get_pte_flags(*entry) & ~_PAGE_ACCESS_BITS) |
>> +                            (MASK_INSR(access, _PAGE_ACCESS_BITS)));
>> +}
>> +
>> +static p2m_access_t p2m_get_access(intpte_t entry)
>> +{
>> +    return (p2m_access_t)(MASK_EXTR(get_pte_flags(entry), _PAGE_ACCESS_BITS));
> 
> Is the cast really needed here?

Not really, I can remove it in the next version.

> 
>> @@ -226,6 +269,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
>>           {
>>               new_entry = l1e_from_pfn(pfn | (i << ((level - 1) * PAGETABLE_ORDER)),
>>                                        flags);
>> +            p2m_set_access(&new_entry.l1, p2m->default_access);
>>               rc = p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, level);
>>               if ( rc )
>>               {
>> @@ -237,6 +281,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
>>           unmap_domain_page(l1_entry);
>>   
>>           new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
>> +        p2m_set_access(&new_entry.l1, p2m->default_access);
>>           rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry,
>>                                     level + 1);
>>           if ( rc )
> 
> Is it really intended to insert the access bits also into non-leaf
> entries (which may be what is being processed here)? (May also
> apply further down.)
> 
>> @@ -474,6 +520,32 @@ int p2m_pt_handle_deferred_changes(uint64_t gpa)
>>       return rc;
>>   }
>>   
>> +static int p2m_pt_check_access(p2m_access_t p2ma)
>> +{
>> +    switch ( p2ma )
>> +    {
>> +    case p2m_access_n:
>> +    case p2m_access_w:
>> +    case p2m_access_wx:
>> +    case p2m_access_n2rwx:
>> +        return -EINVAL;
> 
> I'm not convinced EINVAL is appropriate here - the argument isn't
> invalid, it's just that there's no way to represent it.

Would EPERM be a better return here?

Regards,
Alex
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [Xen-devel] [PATCH v2] x86/mm: Add mem access rights to NPT
  2019-09-02 11:23   ` Alexandru Stefan ISAILA
@ 2019-09-02 11:46     ` Jan Beulich
  0 siblings, 0 replies; 4+ messages in thread
From: Jan Beulich @ 2019-09-02 11:46 UTC (permalink / raw)
  To: Alexandru Stefan ISAILA
  Cc: PetreOvidiu PIRCALABU, kevin.tian, tamas, wl, rcojocaru,
	george.dunlap, andrew.cooper3, George Dunlap, jun.nakajima,
	xen-devel, roger.pau

On 02.09.2019 13:23, Alexandru Stefan ISAILA wrote:
> On 29.08.2019 18:04, Jan Beulich wrote:
>> On 22.08.2019 16:02, Alexandru Stefan ISAILA wrote:
>>> This patch adds access control for NPT mode.
>>>
>>> The access rights are stored in the NPT p2m table 56:53.
>>
>> Why starting from bit 53? I can't seem to find any use of bit 52.
> 
> There is a comment in page.h that warns that bit 12(52) is taken.
> "/*
>   * Bit 12 of a 24-bit flag mask. This corresponds to bit 52 of a pte.
>   * This is needed to distinguish between user and kernel PTEs since 
> _PAGE_USER
>   * is asserted for both.
>   */
> #define _PAGE_GUEST_KERNEL (1U<<12)
> "

But that's a PV-only thing. With sufficient care it should be
possible to have overlapping uses. And given that the available
bit are a pretty limited resource, I'd very much appreciate if
you at least tried to make this work.

>>> @@ -104,8 +112,32 @@ static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
>>>               flags |= _PAGE_PWT;
>>>               ASSERT(!level);
>>>           }
>>> -        return flags | P2M_BASE_FLAGS | _PAGE_PCD;
>>> +        flags |= P2M_BASE_FLAGS | _PAGE_PCD;
>>> +        break;
>>>       }
>>> +
>>> +    switch ( access )
>>> +    {
>>> +    case p2m_access_r:
>>> +        flags |= _PAGE_NX_BIT;
>>> +        flags &= ~_PAGE_RW;
>>> +        break;
>>> +    case p2m_access_rw:
>>> +        flags |= _PAGE_NX_BIT;
>>> +        break;
>>> +    case p2m_access_rx:
>>> +    case p2m_access_rx2rw:
>>> +        flags &= ~(_PAGE_NX_BIT | _PAGE_RW);
>>> +        break;
>>> +    case p2m_access_x:
>>> +        flags &= ~_PAGE_RW;
>>> +        break;
>>
>> I can't seem to be able to follow you here. In fact I don't see
>> how you would be able to express execute-only with NPT. If this
>> is really needed for some reason, then a justifying comment
>> should be added.
> 
> Execute-only should be expressed as not PAGE_RW and PAGE_NX_BIT not set.

But that still doesn't yield exec-only. Where is this "should be
expressed" stated? I.e. on what basis is it tolerable to also allow
read access despite a request to the contrary?

>>> @@ -474,6 +520,32 @@ int p2m_pt_handle_deferred_changes(uint64_t gpa)
>>>       return rc;
>>>   }
>>>   
>>> +static int p2m_pt_check_access(p2m_access_t p2ma)
>>> +{
>>> +    switch ( p2ma )
>>> +    {
>>> +    case p2m_access_n:
>>> +    case p2m_access_w:
>>> +    case p2m_access_wx:
>>> +    case p2m_access_n2rwx:
>>> +        return -EINVAL;
>>
>> I'm not convinced EINVAL is appropriate here - the argument isn't
>> invalid, it's just that there's no way to represent it.
> 
> Would EPERM be a better return here?

Quite a bit better, yes. But still not optimal, but I confess that
I also can't find an optimal one. EDOM would look to be suitable too,
if one was to ignore the "math" aspect of it.

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2019-09-02 11:46 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-08-22 14:02 [Xen-devel] [PATCH v2] x86/mm: Add mem access rights to NPT Alexandru Stefan ISAILA
2019-08-29 15:04 ` Jan Beulich
2019-09-02 11:23   ` Alexandru Stefan ISAILA
2019-09-02 11:46     ` Jan Beulich

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).