All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tamas K Lengyel <tklengyel@sec.in.tum.de>
To: xen-devel@lists.xen.org
Cc: ian.campbell@citrix.com, tim@xen.org, julien.grall@linaro.org,
	ian.jackson@eu.citrix.com, stefano.stabellini@citrix.com,
	andres@lagarcavilla.org, jbeulich@suse.com,
	dgdegra@tycho.nsa.gov, Tamas K Lengyel <tklengyel@sec.in.tum.de>
Subject: [PATCH for-4.5 v8 12/19] xen/arm: p2m changes for mem_access support
Date: Tue, 23 Sep 2014 15:14:23 +0200	[thread overview]
Message-ID: <1411478070-13836-13-git-send-email-tklengyel@sec.in.tum.de> (raw)
In-Reply-To: <1411478070-13836-1-git-send-email-tklengyel@sec.in.tum.de>

Add p2m_access_t to struct page_info and add necessary changes for
page table construction routines to pass the default access information.
We store the p2m_access_t info in page_info as the PTE lacks enough
software programmable bits.

Signed-off-by: Tamas K Lengyel <tklengyel@sec.in.tum.de>
---
v8: - Drop lock inputs as common mem_access_check is postponed.
    - Resurrect the radix tree with an extra boolean access_in_use flag
      to indicate if the tree is empty to avoid lookups.
v7: - Remove radix tree init/destroy and move p2m_access_t store to page_info.
    - Add p2m_gpfn_lock/unlock functions.
    - Add bool_t lock input to p2m_lookup and apply_p2m_changes so the caller
      can specify if locking should be performed. This is needed in order to
      support mem_access_check from common.
v6: - Move mem_event header include to first patch that needs it.
v5: - #include grouping style-fix.
v4: - Move p2m_get_hostp2m definition here.
---
 xen/arch/arm/p2m.c           | 49 +++++++++++++++++++---------
 xen/include/asm-arm/domain.h |  1 +
 xen/include/asm-arm/p2m.h    | 77 ++++++++++++++++++++++++++++++--------------
 3 files changed, 86 insertions(+), 41 deletions(-)

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 4dccf7b..760d064 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -228,7 +228,7 @@ int p2m_pod_decrease_reservation(struct domain *d,
 }
 
 static lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned int mattr,
-                               p2m_type_t t)
+                               p2m_type_t t, p2m_access_t a)
 {
     paddr_t pa = ((paddr_t) mfn) << PAGE_SHIFT;
     /* sh, xn and write bit will be defined in the following switches
@@ -346,7 +346,7 @@ static int p2m_create_table(struct domain *d, lpae_t *entry,
          for ( i=0 ; i < LPAE_ENTRIES; i++ )
          {
              pte = mfn_to_p2m_entry(base_pfn + (i<<(level_shift-LPAE_SHIFT)),
-                                    MATTR_MEM, t);
+                                    MATTR_MEM, t, p2m->default_access);
 
              /*
               * First and second level super pages set p2m.table = 0, but
@@ -366,7 +366,8 @@ static int p2m_create_table(struct domain *d, lpae_t *entry,
 
     unmap_domain_page(p);
 
-    pte = mfn_to_p2m_entry(page_to_mfn(page), MATTR_MEM, p2m_invalid);
+    pte = mfn_to_p2m_entry(page_to_mfn(page), MATTR_MEM, p2m_invalid,
+                           p2m->default_access);
 
     p2m_write_pte(entry, pte, flush_cache);
 
@@ -469,7 +470,8 @@ static int apply_one_level(struct domain *d,
                            paddr_t *maddr,
                            bool_t *flush,
                            int mattr,
-                           p2m_type_t t)
+                           p2m_type_t t,
+                           p2m_access_t a)
 {
     const paddr_t level_size = level_sizes[level];
     const paddr_t level_mask = level_masks[level];
@@ -498,7 +500,7 @@ static int apply_one_level(struct domain *d,
             page = alloc_domheap_pages(d, level_shift - PAGE_SHIFT, 0);
             if ( page )
             {
-                pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t);
+                pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t, a);
                 if ( level < 3 )
                     pte.p2m.table = 0;
                 p2m_write_pte(entry, pte, flush_cache);
@@ -533,7 +535,7 @@ static int apply_one_level(struct domain *d,
              (level == 3 || !p2m_table(orig_pte)) )
         {
             /* New mapping is superpage aligned, make it */
-            pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t);
+            pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t, a);
             if ( level < 3 )
                 pte.p2m.table = 0; /* Superpage entry */
 
@@ -712,7 +714,9 @@ static int apply_p2m_changes(struct domain *d,
                      paddr_t end_gpaddr,
                      paddr_t maddr,
                      int mattr,
-                     p2m_type_t t)
+                     uint32_t mask,
+                     p2m_type_t t,
+                     p2m_access_t a)
 {
     int rc, ret;
     struct p2m_domain *p2m = &d->arch.p2m;
@@ -805,7 +809,7 @@ static int apply_p2m_changes(struct domain *d,
                                   level, flush_pt, op,
                                   start_gpaddr, end_gpaddr,
                                   &addr, &maddr, &flush,
-                                  mattr, t);
+                                  mattr, t, a);
             if ( ret < 0 ) { rc = ret ; goto out; }
             count += ret;
             /* L3 had better have done something! We cannot descend any further */
@@ -863,7 +867,7 @@ out:
          */
         apply_p2m_changes(d, REMOVE,
                           start_gpaddr, addr + level_sizes[level], orig_maddr,
-                          mattr, p2m_invalid);
+                          mattr, 0, p2m_invalid, d->arch.p2m.default_access);
     }
 
     for ( level = P2M_ROOT_LEVEL; level < 4; level ++ )
@@ -882,7 +886,8 @@ int p2m_populate_ram(struct domain *d,
                      paddr_t end)
 {
     return apply_p2m_changes(d, ALLOCATE, start, end,
-                             0, MATTR_MEM, p2m_ram_rw);
+                             0, MATTR_MEM, 0, p2m_ram_rw,
+                             d->arch.p2m.default_access);
 }
 
 int map_mmio_regions(struct domain *d,
@@ -894,7 +899,8 @@ int map_mmio_regions(struct domain *d,
                              pfn_to_paddr(start_gfn),
                              pfn_to_paddr(start_gfn + nr),
                              pfn_to_paddr(mfn),
-                             MATTR_DEV, p2m_mmio_direct);
+                             MATTR_DEV, 0, p2m_mmio_direct,
+                             d->arch.p2m.default_access);
 }
 
 int unmap_mmio_regions(struct domain *d,
@@ -906,7 +912,8 @@ int unmap_mmio_regions(struct domain *d,
                              pfn_to_paddr(start_gfn),
                              pfn_to_paddr(start_gfn + nr),
                              pfn_to_paddr(mfn),
-                             MATTR_DEV, p2m_invalid);
+                             MATTR_DEV, 0, p2m_invalid,
+                             d->arch.p2m.default_access);
 }
 
 int guest_physmap_add_entry(struct domain *d,
@@ -918,7 +925,8 @@ int guest_physmap_add_entry(struct domain *d,
     return apply_p2m_changes(d, INSERT,
                              pfn_to_paddr(gpfn),
                              pfn_to_paddr(gpfn + (1 << page_order)),
-                             pfn_to_paddr(mfn), MATTR_MEM, t);
+                             pfn_to_paddr(mfn), MATTR_MEM, 0, t,
+                             d->arch.p2m.default_access);
 }
 
 void guest_physmap_remove_page(struct domain *d,
@@ -928,7 +936,8 @@ void guest_physmap_remove_page(struct domain *d,
     apply_p2m_changes(d, REMOVE,
                       pfn_to_paddr(gpfn),
                       pfn_to_paddr(gpfn + (1<<page_order)),
-                      pfn_to_paddr(mfn), MATTR_MEM, p2m_invalid);
+                      pfn_to_paddr(mfn), MATTR_MEM, 0, p2m_invalid,
+                      d->arch.p2m.default_access);
 }
 
 int arch_grant_map_page_identity(struct domain *d, unsigned long frame,
@@ -1058,6 +1067,8 @@ void p2m_teardown(struct domain *d)
 
     p2m_free_vmid(d);
 
+    radix_tree_destroy(&p2m->mem_access_settings, NULL);
+
     spin_unlock(&p2m->lock);
 }
 
@@ -1083,6 +1094,10 @@ int p2m_init(struct domain *d)
     p2m->max_mapped_gfn = 0;
     p2m->lowest_mapped_gfn = ULONG_MAX;
 
+    p2m->default_access = p2m_access_rwx;
+    p2m->access_in_use = false;
+    radix_tree_init(&p2m->mem_access_settings);
+
 err:
     spin_unlock(&p2m->lock);
 
@@ -1097,7 +1112,8 @@ int relinquish_p2m_mapping(struct domain *d)
                               pfn_to_paddr(p2m->lowest_mapped_gfn),
                               pfn_to_paddr(p2m->max_mapped_gfn),
                               pfn_to_paddr(INVALID_MFN),
-                              MATTR_MEM, p2m_invalid);
+                              MATTR_MEM, 0, p2m_invalid,
+                              d->arch.p2m.default_access);
 }
 
 int p2m_cache_flush(struct domain *d, xen_pfn_t start_mfn, xen_pfn_t end_mfn)
@@ -1111,7 +1127,8 @@ int p2m_cache_flush(struct domain *d, xen_pfn_t start_mfn, xen_pfn_t end_mfn)
                              pfn_to_paddr(start_mfn),
                              pfn_to_paddr(end_mfn),
                              pfn_to_paddr(INVALID_MFN),
-                             MATTR_MEM, p2m_invalid);
+                             MATTR_MEM, 0, p2m_invalid,
+                             d->arch.p2m.default_access);
 }
 
 unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 787e93c..3d69152 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -17,6 +17,7 @@ struct hvm_domain
 {
     uint64_t              params[HVM_NR_PARAMS];
     struct hvm_iommu      iommu;
+    bool_t                introspection_enabled;
 }  __cacheline_aligned;
 
 #ifdef CONFIG_ARM_64
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 27225c4..dba0df5 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -2,6 +2,8 @@
 #define _XEN_P2M_H
 
 #include <xen/mm.h>
+#include <xen/radix-tree.h>
+#include <public/mem_event.h> /* for mem_event_response_t */
 
 #include <xen/p2m-common.h>
 
@@ -11,6 +13,31 @@ struct domain;
 
 extern void memory_type_changed(struct domain *);
 
+/* List of possible type for each page in the p2m entry.
+ * The number of available bit per page in the pte for this purpose is 4 bits.
+ * So it's possible to only have 16 fields. If we run out of value in the
+ * future, it's possible to use higher value for pseudo-type and don't store
+ * them in the p2m entry.
+ */
+typedef enum {
+    p2m_invalid = 0,    /* Nothing mapped here */
+    p2m_ram_rw,         /* Normal read/write guest RAM */
+    p2m_ram_ro,         /* Read-only; writes are silently dropped */
+    p2m_mmio_direct,    /* Read/write mapping of genuine MMIO area */
+    p2m_map_foreign,    /* Ram pages from foreign domain */
+    p2m_grant_map_rw,   /* Read/write grant mapping */
+    p2m_grant_map_ro,   /* Read-only grant mapping */
+    /* The types below are only used to decide the page attribute in the P2M */
+    p2m_iommu_map_rw,   /* Read/write iommu mapping */
+    p2m_iommu_map_ro,   /* Read-only iommu mapping */
+    p2m_max_real_type,  /* Types after this won't be store in the p2m */
+} p2m_type_t;
+
+/* Look up a GFN and take a reference count on the backing page. */
+typedef unsigned int p2m_query_t;
+#define P2M_ALLOC    (1u<<0)   /* Populate PoD and paged-out entries */
+#define P2M_UNSHARE  (1u<<1)   /* Break CoW sharing */
+
 /* Per-p2m-table state */
 struct p2m_domain {
     /* Lock that protects updates to the p2m */
@@ -48,27 +75,20 @@ struct p2m_domain {
     /* If true, and an access fault comes in and there is no mem_event listener,
      * pause domain. Otherwise, remove access restrictions. */
     bool_t access_required;
-};
 
-/* List of possible type for each page in the p2m entry.
- * The number of available bit per page in the pte for this purpose is 4 bits.
- * So it's possible to only have 16 fields. If we run out of value in the
- * future, it's possible to use higher value for pseudo-type and don't store
- * them in the p2m entry.
- */
-typedef enum {
-    p2m_invalid = 0,    /* Nothing mapped here */
-    p2m_ram_rw,         /* Normal read/write guest RAM */
-    p2m_ram_ro,         /* Read-only; writes are silently dropped */
-    p2m_mmio_direct,    /* Read/write mapping of genuine MMIO area */
-    p2m_map_foreign,    /* Ram pages from foreign domain */
-    p2m_grant_map_rw,   /* Read/write grant mapping */
-    p2m_grant_map_ro,   /* Read-only grant mapping */
-    /* The types below are only used to decide the page attribute in the P2M */
-    p2m_iommu_map_rw,   /* Read/write iommu mapping */
-    p2m_iommu_map_ro,   /* Read-only iommu mapping */
-    p2m_max_real_type,  /* Types after this won't be store in the p2m */
-} p2m_type_t;
+    /* Defines if mem_access is in use for the domain to avoid uneccessary radix
+     * lookups. */
+    bool_t access_in_use;
+
+    /* Default P2M access type for each page in the the domain: new pages,
+     * swapped in pages, cleared pages, and pages that are ambiguously
+     * retyped get this access type. See definition of p2m_access_t. */
+    p2m_access_t default_access;
+
+    /* Radix tree to store the p2m_access_t settings as the pte's don't have
+     * enough available bits to store this information. */
+    struct radix_tree_root mem_access_settings;
+};
 
 static inline
 void p2m_mem_event_emulate_check(struct domain *d,
@@ -77,6 +97,7 @@ void p2m_mem_event_emulate_check(struct domain *d,
     /* Not supported on ARM. */
 };
 
+static inline
 void p2m_enable_msr_exit_interception(struct domain *d)
 {
     /* Not supported on ARM. */
@@ -157,11 +178,6 @@ p2m_pod_decrease_reservation(struct domain *d,
                              xen_pfn_t gpfn,
                              unsigned int order);
 
-/* Look up a GFN and take a reference count on the backing page. */
-typedef unsigned int p2m_query_t;
-#define P2M_ALLOC    (1u<<0)   /* Populate PoD and paged-out entries */
-#define P2M_UNSHARE  (1u<<1)   /* Break CoW sharing */
-
 static inline struct page_info *get_page_from_gfn(
     struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
 {
@@ -220,6 +236,17 @@ int arch_grant_unmap_page_identity(struct domain *d, unsigned long frame);
 /* get host p2m table */
 #define p2m_get_hostp2m(d) (&(d)->arch.p2m)
 
+/* mem_event and mem_access are supported on any ARM guest */
+static inline bool_t p2m_mem_access_sanity_check(struct domain *d)
+{
+    return 1;
+}
+
+static inline bool_t p2m_mem_event_sanity_check(struct domain *d)
+{
+    return 1;
+}
+
 #endif /* _XEN_P2M_H */
 
 /*
-- 
2.1.0

  parent reply	other threads:[~2014-09-23 13:14 UTC|newest]

Thread overview: 58+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-09-23 13:14 [PATCH for-4.5 v8 00/19] Mem_event and mem_access for ARM Tamas K Lengyel
2014-09-23 13:14 ` [PATCH for-4.5 v8 01/19] xen: Relocate mem_access and mem_event into common Tamas K Lengyel
2014-09-23 13:14 ` [PATCH for-4.5 v8 02/19] xen: Relocate struct npfec definition " Tamas K Lengyel
2014-09-23 13:14 ` [PATCH for-4.5 v8 03/19] xen: Relocate p2m_access_t into common and swap the order Tamas K Lengyel
2014-09-23 13:14 ` [PATCH for-4.5 v8 04/19] xen: Relocate p2m_mem_access_resume to mem_access common Tamas K Lengyel
2014-09-23 13:28   ` Jan Beulich
2014-09-23 14:04     ` Tamas K Lengyel
2014-09-23 14:08       ` Jan Beulich
2014-09-23 14:15         ` Tamas K Lengyel
2014-09-23 15:02           ` Jan Beulich
2014-09-23 13:14 ` [PATCH for-4.5 v8 05/19] xen: Relocate set_access_required domctl into common Tamas K Lengyel
2014-09-24 14:18   ` Julien Grall
2014-09-24 15:05     ` Tamas K Lengyel
2014-09-23 13:14 ` [PATCH for-4.5 v8 06/19] xen: Relocate mem_event_op domctl and access_op memop " Tamas K Lengyel
2014-09-23 13:32   ` Jan Beulich
2014-09-23 14:00     ` Razvan Cojocaru
2014-09-23 14:07       ` Jan Beulich
2014-09-23 14:13         ` Tamas K Lengyel
2014-09-23 14:23           ` Razvan Cojocaru
2014-09-23 14:28             ` Tamas K Lengyel
2014-09-23 14:19         ` Razvan Cojocaru
2014-09-23 14:08       ` Tamas K Lengyel
2014-09-23 13:14 ` [PATCH for-4.5 v8 07/19] x86/p2m: Typo fix for spelling ambiguous Tamas K Lengyel
2014-09-23 13:14 ` [PATCH for-4.5 v8 08/19] xen/mem_event: Clean out superfluous white-spaces Tamas K Lengyel
2014-09-23 13:14 ` [PATCH for-4.5 v8 09/19] xen/mem_event: Relax error condition on debug builds Tamas K Lengyel
2014-09-23 13:14 ` [PATCH for-4.5 v8 10/19] xen/mem_event: Abstract architecture specific sanity checks Tamas K Lengyel
2014-09-23 13:14 ` [PATCH for-4.5 v8 11/19] xen/mem_access: Abstract architecture specific sanity check Tamas K Lengyel
2014-09-23 13:14 ` Tamas K Lengyel [this message]
2014-09-24 14:40   ` [PATCH for-4.5 v8 12/19] xen/arm: p2m changes for mem_access support Ian Campbell
2014-09-24 16:58     ` Tamas K Lengyel
2014-09-24 17:14       ` Razvan Cojocaru
2014-09-24 14:43   ` Julien Grall
2014-09-24 16:48     ` Tamas K Lengyel
2014-09-23 13:14 ` [PATCH for-4.5 v8 13/19] xen/arm: Implement domain_get_maximum_gpfn Tamas K Lengyel
2014-09-23 13:14 ` [PATCH for-4.5 v8 14/19] xen/arm: Add p2m_set_permission and p2m_shatter_page helpers Tamas K Lengyel
2014-09-24 14:48   ` Julien Grall
2014-09-23 13:14 ` [PATCH for-4.5 v8 15/19] xen/arm: Data abort exception (R/W) mem_events Tamas K Lengyel
2014-09-24 15:02   ` Ian Campbell
2014-09-24 16:17     ` Tamas K Lengyel
2014-09-24 15:35   ` Julien Grall
2014-09-24 16:27     ` Tamas K Lengyel
2014-09-24 16:51       ` Julien Grall
2014-09-24 17:13         ` Tamas K Lengyel
2014-09-24 20:52           ` Julien Grall
2014-09-24 21:24             ` Tamas K Lengyel
2014-09-24 22:07               ` Tamas K Lengyel
2014-09-23 13:14 ` [PATCH for-4.5 v8 16/19] xen/arm: Instruction prefetch abort (X) mem_event handling Tamas K Lengyel
2014-09-24 15:05   ` Ian Campbell
2014-09-24 17:04     ` Tamas K Lengyel
2014-09-24 15:41   ` Julien Grall
2014-09-24 17:08     ` Tamas K Lengyel
2014-09-23 13:14 ` [PATCH for-4.5 v8 17/19] xen/arm: Enable the compilation of mem_access and mem_event on ARM Tamas K Lengyel
2014-09-24 15:08   ` Ian Campbell
2014-09-24 15:42   ` Julien Grall
2014-09-23 13:14 ` [PATCH for-4.5 v8 18/19] tools/libxc: Allocate magic page for mem access " Tamas K Lengyel
2014-09-23 13:14 ` [PATCH for-4.5 v8 19/19] tools/tests: Enable xen-access " Tamas K Lengyel
2014-09-24 15:12   ` Ian Campbell
2014-09-24 16:05     ` Tamas K Lengyel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1411478070-13836-13-git-send-email-tklengyel@sec.in.tum.de \
    --to=tklengyel@sec.in.tum.de \
    --cc=andres@lagarcavilla.org \
    --cc=dgdegra@tycho.nsa.gov \
    --cc=ian.campbell@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=julien.grall@linaro.org \
    --cc=stefano.stabellini@citrix.com \
    --cc=tim@xen.org \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.