xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Alexandru Stefan ISAILA <aisaila@bitdefender.com>
To: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Cc: Petre Ovidiu PIRCALABU <ppircalabu@bitdefender.com>,
	"kevin.tian@intel.com" <kevin.tian@intel.com>,
	"tamas@tklengyel.com" <tamas@tklengyel.com>,
	"wl@xen.org" <wl@xen.org>,
	"rcojocaru@bitdefender.com" <rcojocaru@bitdefender.com>,
	"george.dunlap@eu.citrix.com" <george.dunlap@eu.citrix.com>,
	"andrew.cooper3@citrix.com" <andrew.cooper3@citrix.com>,
	George Dunlap <george.dunlap@citrix.com>,
	Alexandru Stefan ISAILA <aisaila@bitdefender.com>,
	"jbeulich@suse.com" <jbeulich@suse.com>,
	"jun.nakajima@intel.com" <jun.nakajima@intel.com>,
	"roger.pau@citrix.com" <roger.pau@citrix.com>
Subject: [Xen-devel] [PATCH v2] x86/mm: Add mem access rights to NPT
Date: Thu, 22 Aug 2019 14:02:24 +0000	[thread overview]
Message-ID: <20190822140210.24749-1-aisaila@bitdefender.com> (raw)

This patch adds access control for NPT mode.

The access rights are stored in the NPT p2m table 56:53.
The bits are free after clearing the IOMMU flags [1].

Modify p2m_type_to_flags() to accept and interpret an access value,
parallel to the ept code.

Add a set_default_access() method to the p2m-pt and p2m-ept versions
of the p2m rather than setting it directly, to deal with different
default permitted access values.

[1] 854a49a7486a02edae5b3e53617bace526e9c1b1

Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
Signed-off-by: George Dunlap <george.dunlap@citrix.com>

---
Note: Tested with xen-access write/exec

Changes since V1:
	- Remove radix tree and store access bits in NPT p2m table.
---
 xen/arch/x86/mm/mem_access.c |   7 +--
 xen/arch/x86/mm/p2m-ept.c    |   9 +++
 xen/arch/x86/mm/p2m-pt.c     | 111 ++++++++++++++++++++++++++++++-----
 xen/include/asm-x86/p2m.h    |   2 +
 4 files changed, 109 insertions(+), 20 deletions(-)

diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index 0144f92b98..d8cc7a50de 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -374,10 +374,7 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
 
     /* If request to set default access. */
     if ( gfn_eq(gfn, INVALID_GFN) )
-    {
-        p2m->default_access = a;
-        return 0;
-    }
+        return p2m->set_default_access(p2m, a);
 
     p2m_lock(p2m);
     if ( ap2m )
@@ -520,7 +517,7 @@ void arch_p2m_set_access_required(struct domain *d, bool access_required)
 
 bool p2m_mem_access_sanity_check(const struct domain *d)
 {
-    return is_hvm_domain(d) && cpu_has_vmx && hap_enabled(d);
+    return is_hvm_domain(d) && hap_enabled(d);
 }
 
 /*
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 6b8468c793..a6f2347bc1 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -624,6 +624,14 @@ bool_t ept_handle_misconfig(uint64_t gpa)
     return spurious ? (rc >= 0) : (rc > 0);
 }
 
+int ept_set_default_access(struct p2m_domain *p2m, p2m_access_t p2ma)
+{
+    /* All access is permitted */
+    p2m->default_access = p2ma;
+    
+    return 0;
+}
+
 /*
  * ept_set_entry() computes 'need_modify_vtd_table' for itself,
  * by observing whether any gfn->mfn translations are modified.
@@ -1222,6 +1230,7 @@ int ept_p2m_init(struct p2m_domain *p2m)
     p2m->change_entry_type_global = ept_change_entry_type_global;
     p2m->change_entry_type_range = ept_change_entry_type_range;
     p2m->memory_type_changed = ept_memory_type_changed;
+    p2m->set_default_access = ept_set_default_access;
     p2m->audit_p2m = NULL;
     p2m->tlb_flush = ept_tlb_flush;
 
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 3a0a500d66..7f79eac979 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -63,10 +63,13 @@
 #define needs_recalc(level, ent) _needs_recalc(level##e_get_flags(ent))
 #define valid_recalc(level, ent) (!(level##e_get_flags(ent) & _PAGE_ACCESSED))
 
+#define _PAGE_ACCESS_BITS  (0x1e00) /* mem_access rights 16:13 */
+
 static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
                                        p2m_type_t t,
                                        mfn_t mfn,
-                                       unsigned int level)
+                                       unsigned int level,
+                                       p2m_access_t access)
 {
     unsigned long flags = (unsigned long)(t & 0x7f) << 12;
 
@@ -79,23 +82,28 @@ static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
     case p2m_ram_paged:
     case p2m_ram_paging_in:
     default:
-        return flags | _PAGE_NX_BIT;
+        flags |= _PAGE_NX_BIT;
+        break;
     case p2m_grant_map_ro:
-        return flags | P2M_BASE_FLAGS | _PAGE_NX_BIT;
+        flags |= P2M_BASE_FLAGS | _PAGE_NX_BIT;
+        break;
     case p2m_ioreq_server:
         flags |= P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
         if ( p2m->ioreq.flags & XEN_DMOP_IOREQ_MEM_ACCESS_WRITE )
-            return flags & ~_PAGE_RW;
-        return flags;
+            flags &= ~_PAGE_RW;
+        break;
     case p2m_ram_ro:
     case p2m_ram_logdirty:
     case p2m_ram_shared:
-        return flags | P2M_BASE_FLAGS;
+        flags |= P2M_BASE_FLAGS;
+        break;
     case p2m_ram_rw:
-        return flags | P2M_BASE_FLAGS | _PAGE_RW;
+        flags |= P2M_BASE_FLAGS | _PAGE_RW;
+        break;
     case p2m_grant_map_rw:
     case p2m_map_foreign:
-        return flags | P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
+        flags |= P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
+        break;
     case p2m_mmio_direct:
         if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) )
             flags |= _PAGE_RW;
@@ -104,8 +112,32 @@ static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
             flags |= _PAGE_PWT;
             ASSERT(!level);
         }
-        return flags | P2M_BASE_FLAGS | _PAGE_PCD;
+        flags |= P2M_BASE_FLAGS | _PAGE_PCD;
+        break;
     }
+
+    switch ( access )
+    {
+    case p2m_access_r:
+        flags |= _PAGE_NX_BIT;
+        flags &= ~_PAGE_RW;
+        break;
+    case p2m_access_rw:
+        flags |= _PAGE_NX_BIT;
+        break;
+    case p2m_access_rx:
+    case p2m_access_rx2rw:
+        flags &= ~(_PAGE_NX_BIT | _PAGE_RW);
+        break;
+    case p2m_access_x:
+        flags &= ~_PAGE_RW;
+        break;
+    case p2m_access_rwx:
+    default:
+        break;
+    }
+
+    return flags;
 }
 
 
@@ -152,6 +184,17 @@ p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order)
     p2m_free_ptp(p2m, l1e_get_page(*p2m_entry));
 }
 
+static void p2m_set_access(intpte_t *entry, p2m_access_t access)
+{
+    *entry |= put_pte_flags((get_pte_flags(*entry) & ~_PAGE_ACCESS_BITS) |
+                            (MASK_INSR(access, _PAGE_ACCESS_BITS)));
+}
+
+static p2m_access_t p2m_get_access(intpte_t entry)
+{
+    return (p2m_access_t)(MASK_EXTR(get_pte_flags(entry), _PAGE_ACCESS_BITS));
+}
+
 // Walk one level of the P2M table, allocating a new table if required.
 // Returns 0 on error.
 //
@@ -226,6 +269,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
         {
             new_entry = l1e_from_pfn(pfn | (i << ((level - 1) * PAGETABLE_ORDER)),
                                      flags);
+            p2m_set_access(&new_entry.l1, p2m->default_access);
             rc = p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, level);
             if ( rc )
             {
@@ -237,6 +281,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
         unmap_domain_page(l1_entry);
 
         new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
+        p2m_set_access(&new_entry.l1, p2m->default_access);
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry,
                                   level + 1);
         if ( rc )
@@ -425,8 +470,9 @@ static int do_recalc(struct p2m_domain *p2m, unsigned long gfn)
         if ( nt != ot )
         {
             unsigned long mfn = l1e_get_pfn(e);
+            p2m_access_t a = p2m_get_access(e.l1);
             unsigned long flags = p2m_type_to_flags(p2m, nt,
-                                                    _mfn(mfn), level);
+                                                    _mfn(mfn), level, a);
 
             if ( level )
             {
@@ -474,6 +520,32 @@ int p2m_pt_handle_deferred_changes(uint64_t gpa)
     return rc;
 }
 
+static int p2m_pt_check_access(p2m_access_t p2ma)
+{
+    switch ( p2ma )
+    {
+    case p2m_access_n:
+    case p2m_access_w:
+    case p2m_access_wx:
+    case p2m_access_n2rwx:
+        return -EINVAL;
+    default:
+        break;
+    }
+    return 0;
+}
+
+static int p2m_pt_set_default_access(struct p2m_domain *p2m,
+                                     p2m_access_t p2ma)
+{
+    int rc = p2m_pt_check_access(p2ma);
+
+    if ( !rc )
+        p2m->default_access = p2ma;
+
+    return rc;
+}
+
 /* Checks only applicable to entries with order > PAGE_ORDER_4K */
 static void check_entry(mfn_t mfn, p2m_type_t new, p2m_type_t old,
                         unsigned int order)
@@ -514,6 +586,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
     if ( !sve )
         return -EOPNOTSUPP;
 
+    if ( (rc = p2m_pt_check_access(p2ma)) )
+        return rc;
+
     if ( tb_init_done )
     {
         struct {
@@ -564,10 +639,11 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
         check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order);
         l3e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt)
             ? p2m_l3e_from_pfn(mfn_x(mfn),
-                               p2m_type_to_flags(p2m, p2mt, mfn, 2))
+                               p2m_type_to_flags(p2m, p2mt, mfn, 2, p2ma))
             : l3e_empty();
         entry_content.l1 = l3e_content.l3;
 
+        p2m_set_access(&entry_content.l1, p2ma);
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
         if ( rc )
@@ -597,10 +673,11 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
 
         if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
             entry_content = p2m_l1e_from_pfn(mfn_x(mfn),
-                                         p2m_type_to_flags(p2m, p2mt, mfn, 0));
+                                         p2m_type_to_flags(p2m, p2mt, mfn, 0, p2ma));
         else
             entry_content = l1e_empty();
 
+        p2m_set_access(&entry_content.l1, p2ma);
         /* level 1 entry */
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -625,10 +702,11 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
         check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order);
         l2e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt)
             ? p2m_l2e_from_pfn(mfn_x(mfn),
-                               p2m_type_to_flags(p2m, p2mt, mfn, 1))
+                               p2m_type_to_flags(p2m, p2mt, mfn, 1, p2ma))
             : l2e_empty();
         entry_content.l1 = l2e_content.l2;
 
+        p2m_set_access(&entry_content.l1, p2ma);
         rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
         if ( rc )
@@ -676,8 +754,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, gfn_t gfn_,
      * XXX Once we start explicitly registering MMIO regions in the p2m 
      * XXX we will return p2m_invalid for unmapped gfns */
     *t = p2m_mmio_dm;
-    /* Not implemented except with EPT */
-    *a = p2m_access_rwx; 
+    *a = p2m_access_n;
 
     if ( gfn > p2m->max_mapped_pfn )
     {
@@ -740,6 +817,7 @@ pod_retry_l3:
                        l1_table_offset(addr));
             *t = p2m_recalc_type(recalc || _needs_recalc(flags),
                                  p2m_flags_to_type(flags), p2m, gfn);
+            *a = p2m_get_access(l3e->l3);
             unmap_domain_page(l3e);
 
             ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
@@ -779,6 +857,7 @@ pod_retry_l2:
         mfn = _mfn(l2e_get_pfn(*l2e) + l1_table_offset(addr));
         *t = p2m_recalc_type(recalc || _needs_recalc(flags),
                              p2m_flags_to_type(flags), p2m, gfn);
+        *a = p2m_get_access(l2e->l2);
         unmap_domain_page(l2e);
         
         ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
@@ -815,6 +894,7 @@ pod_retry_l1:
     }
     mfn = l1e_get_mfn(*l1e);
     *t = p2m_recalc_type(recalc || _needs_recalc(flags), l1t, p2m, gfn);
+    *a = p2m_get_access(l1e->l1);
     unmap_domain_page(l1e);
 
     ASSERT(mfn_valid(mfn) || !p2m_is_any_ram(*t) || p2m_is_paging(*t));
@@ -1063,6 +1143,7 @@ void p2m_pt_init(struct p2m_domain *p2m)
     p2m->change_entry_type_global = p2m_pt_change_entry_type_global;
     p2m->change_entry_type_range = p2m_pt_change_entry_type_range;
     p2m->write_p2m_entry = paging_write_p2m_entry;
+    p2m->set_default_access = p2m_pt_set_default_access;
 #if P2M_AUDIT
     p2m->audit_p2m = p2m_pt_audit_p2m;
 #else
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 94285db1b4..32e71067b7 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -269,6 +269,8 @@ struct p2m_domain {
                                           unsigned long gfn, l1_pgentry_t *p,
                                           l1_pgentry_t new, unsigned int level);
     long               (*audit_p2m)(struct p2m_domain *p2m);
+    int                (*set_default_access)(struct p2m_domain *p2m,
+                                             p2m_access_t p2ma);
 
     /*
      * P2M updates may require TLBs to be flushed (invalidated).
-- 
2.17.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

             reply	other threads:[~2019-08-22 14:02 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-22 14:02 Alexandru Stefan ISAILA [this message]
2019-08-29 15:04 ` [Xen-devel] [PATCH v2] x86/mm: Add mem access rights to NPT Jan Beulich
2019-09-02 11:23   ` Alexandru Stefan ISAILA
2019-09-02 11:46     ` Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190822140210.24749-1-aisaila@bitdefender.com \
    --to=aisaila@bitdefender.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=george.dunlap@citrix.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=jun.nakajima@intel.com \
    --cc=kevin.tian@intel.com \
    --cc=ppircalabu@bitdefender.com \
    --cc=rcojocaru@bitdefender.com \
    --cc=roger.pau@citrix.com \
    --cc=tamas@tklengyel.com \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).