All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tamas K Lengyel <tamas.lengyel@zentific.com>
To: xen-devel@lists.xen.org
Cc: kevin.tian@intel.com, wei.liu2@citrix.com,
	ian.campbell@citrix.com, steve@zentific.com,
	stefano.stabellini@eu.citrix.com, jun.nakajima@intel.com,
	tim@xen.org, ian.jackson@eu.citrix.com, eddie.dong@intel.com,
	andres@lagarcavilla.org, jbeulich@suse.com,
	Tamas K Lengyel <tamas.lengyel@zentific.com>,
	rshriram@cs.ubc.ca, keir@xen.org, dgdegra@tycho.nsa.gov,
	yanghy@cn.fujitsu.com
Subject: [PATCH V6 12/13] xen/xsm: Split vm_event_op into three separate labels
Date: Wed, 18 Feb 2015 01:11:42 +0100	[thread overview]
Message-ID: <1424218303-11331-13-git-send-email-tamas.lengyel@zentific.com> (raw)
In-Reply-To: <1424218303-11331-1-git-send-email-tamas.lengyel@zentific.com>

The XSM label vm_event_op has been used to control the three memops
controlling mem_access, mem_paging and mem_sharing. While these systems
rely on vm_event, these are not vm_event operations themselves. Thus,
in this patch we introduce three separate labels for each of these memops.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@zentific.com>
Acked-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
 xen/arch/x86/mm/mem_paging.c        |  2 +-
 xen/arch/x86/mm/mem_sharing.c       |  2 +-
 xen/common/mem_access.c             |  2 +-
 xen/include/xsm/dummy.h             | 20 +++++++++++++++++++-
 xen/include/xsm/xsm.h               | 33 ++++++++++++++++++++++++++++++---
 xen/xsm/dummy.c                     | 13 ++++++++++++-
 xen/xsm/flask/hooks.c               | 33 ++++++++++++++++++++++++++++++---
 xen/xsm/flask/policy/access_vectors |  6 ++++++
 8 files changed, 100 insertions(+), 11 deletions(-)

diff --git a/xen/arch/x86/mm/mem_paging.c b/xen/arch/x86/mm/mem_paging.c
index cbdee99..0fa2abd 100644
--- a/xen/arch/x86/mm/mem_paging.c
+++ b/xen/arch/x86/mm/mem_paging.c
@@ -40,7 +40,7 @@ int mem_paging_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_paging_op_t) arg)
     if ( rc )
         goto out;
 
-    rc = xsm_vm_event_op(XSM_DM_PRIV, d, XENMEM_paging_op);
+    rc = xsm_mem_paging(XSM_DM_PRIV, d);
     if ( rc )
         goto out;
 
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 2d49bc4..d56ecf9 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -1316,7 +1316,7 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
     if ( !hap_enabled(d) || !d->arch.hvm_domain.mem_sharing_enabled )
          rc = -ENODEV;
 
-    rc = xsm_vm_event_op(XSM_DM_PRIV, d, XENMEM_sharing_op);
+    rc = xsm_mem_sharing(XSM_DM_PRIV, d);
     if ( rc )
         goto out;
 
diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
index 511c8c5..aa00513 100644
--- a/xen/common/mem_access.c
+++ b/xen/common/mem_access.c
@@ -48,7 +48,7 @@ int mem_access_memop(unsigned long cmd,
     if ( !p2m_mem_access_sanity_check(d) )
         goto out;
 
-    rc = xsm_vm_event_op(XSM_DM_PRIV, d, XENMEM_access_op);
+    rc = xsm_mem_access(XSM_DM_PRIV, d);
     if ( rc )
         goto out;
 
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index 50ee929..16967ed 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -519,11 +519,29 @@ static XSM_INLINE int xsm_vm_event_control(XSM_DEFAULT_ARG struct domain *d, int
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_vm_event_op(XSM_DEFAULT_ARG struct domain *d, int op)
+#ifdef HAS_MEM_ACCESS
+static XSM_INLINE int xsm_mem_access(XSM_DEFAULT_ARG struct domain *d)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
     return xsm_default_action(action, current->domain, d);
 }
+#endif
+
+#ifdef HAS_MEM_PAGING
+static XSM_INLINE int xsm_mem_paging(XSM_DEFAULT_ARG struct domain *d)
+{
+    XSM_ASSERT_ACTION(XSM_DM_PRIV);
+    return xsm_default_action(action, current->domain, d);
+}
+#endif
+
+#ifdef HAS_MEM_SHARING
+static XSM_INLINE int xsm_mem_sharing(XSM_DEFAULT_ARG struct domain *d)
+{
+    XSM_ASSERT_ACTION(XSM_DM_PRIV);
+    return xsm_default_action(action, current->domain, d);
+}
+#endif
 
 #ifdef CONFIG_X86
 static XSM_INLINE int xsm_do_mca(XSM_DEFAULT_VOID)
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index d56a68f..2a88d84 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -142,7 +142,18 @@ struct xsm_operations {
     int (*get_vnumainfo) (struct domain *d);
 
     int (*vm_event_control) (struct domain *d, int mode, int op);
-    int (*vm_event_op) (struct domain *d, int op);
+
+#ifdef HAS_MEM_ACCESS
+    int (*mem_access) (struct domain *d);
+#endif
+
+#ifdef HAS_MEM_PAGING
+    int (*mem_paging) (struct domain *d);
+#endif
+
+#ifdef HAS_MEM_SHARING
+    int (*mem_sharing) (struct domain *d);
+#endif
 
 #ifdef CONFIG_X86
     int (*do_mca) (void);
@@ -546,10 +557,26 @@ static inline int xsm_vm_event_control (xsm_default_t def, struct domain *d, int
     return xsm_ops->vm_event_control(d, mode, op);
 }
 
-static inline int xsm_vm_event_op (xsm_default_t def, struct domain *d, int op)
+#ifdef HAS_MEM_ACCESS
+static inline int xsm_mem_access (xsm_default_t def, struct domain *d)
 {
-    return xsm_ops->vm_event_op(d, op);
+    return xsm_ops->mem_access(d);
 }
+#endif
+
+#ifdef HAS_MEM_PAGING
+static inline int xsm_mem_paging (xsm_default_t def, struct domain *d)
+{
+    return xsm_ops->mem_paging(d);
+}
+#endif
+
+#ifdef HAS_MEM_SHARING
+static inline int xsm_mem_sharing (xsm_default_t def, struct domain *d)
+{
+    return xsm_ops->mem_sharing(d);
+}
+#endif
 
 #ifdef CONFIG_X86
 static inline int xsm_do_mca(xsm_default_t def)
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index 6d12d32..3ddb4f6 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -119,7 +119,18 @@ void xsm_fixup_ops (struct xsm_operations *ops)
     set_to_dummy_if_null(ops, map_gmfn_foreign);
 
     set_to_dummy_if_null(ops, vm_event_control);
-    set_to_dummy_if_null(ops, vm_event_op);
+
+#ifdef HAS_MEM_ACCESS
+    set_to_dummy_if_null(ops, mem_access);
+#endif
+
+#ifdef HAS_MEM_PAGING
+    set_to_dummy_if_null(ops, mem_paging);
+#endif
+
+#ifdef HAS_MEM_SHARING
+    set_to_dummy_if_null(ops, mem_sharing);
+#endif
 
 #ifdef CONFIG_X86
     set_to_dummy_if_null(ops, do_mca);
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index a65f68c..01d761b 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1142,10 +1142,26 @@ static int flask_vm_event_control(struct domain *d, int mode, int op)
     return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__VM_EVENT);
 }
 
-static int flask_vm_event_op(struct domain *d, int op)
+#ifdef HAS_MEM_ACCESS
+static int flask_mem_access(struct domain *d)
 {
-    return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__VM_EVENT);
+    return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__MEM_ACCESS);
+}
+#endif
+
+#ifdef HAS_MEM_PAGING
+static int flask_mem_paging(struct domain *d)
+{
+    return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__MEM_PAGING);
+}
+#endif
+
+#ifdef HAS_MEM_SHARING
+static int flask_mem_sharing(struct domain *d)
+{
+    return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__MEM_SHARING);
 }
+#endif
 
 #if defined(HAS_PASSTHROUGH) && defined(HAS_PCI)
 static int flask_get_device_group(uint32_t machine_bdf)
@@ -1582,7 +1598,18 @@ static struct xsm_operations flask_ops = {
     .get_vnumainfo = flask_get_vnumainfo,
 
     .vm_event_control = flask_vm_event_control,
-    .vm_event_op = flask_vm_event_op,
+
+#ifdef HAS_MEM_ACCESS
+    .mem_access = flask_mem_access,
+#endif
+
+#ifdef HAS_MEM_PAGING
+    .mem_paging = flask_mem_paging,
+#endif
+
+#ifdef HAS_MEM_SHARING
+    .mem_sharing = flask_mem_sharing,
+#endif
 
 #ifdef CONFIG_COMPAT
     .do_compat_op = compat_flask_op,
diff --git a/xen/xsm/flask/policy/access_vectors b/xen/xsm/flask/policy/access_vectors
index 2e231e1..e5197df 100644
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -225,6 +225,12 @@ class domain2
 # XEN_DOMCTL_monitor_op
 # XEN_DOMCTL_vm_event_op
     vm_event
+# XENMEM_access_op
+    mem_access
+# XENMEM_paging_op
+    mem_paging
+# XENMEM_sharing_op
+    mem_sharing
 }
 
 # Similar to class domain, but primarily contains domctls related to HVM domains
-- 
2.1.4

  parent reply	other threads:[~2015-02-18  0:11 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-02-18  0:11 [PATCH V6 00/13] xen: Clean-up of mem_event subsystem Tamas K Lengyel
2015-02-18  0:11 ` [PATCH V6 01/13] xen/mem_event: Cleanup of mem_event structures Tamas K Lengyel
2015-03-11 16:45   ` Ian Campbell
2015-03-12 12:13   ` Tim Deegan
2015-03-12 12:32     ` Tamas Lengyel
2015-02-18  0:11 ` [PATCH V6 02/13] xen/mem_event: Cleanup mem_event names in rings, functions and domctls Tamas K Lengyel
2015-02-18  0:11 ` [PATCH V6 03/13] xen/mem_paging: Convert mem_event_op to mem_paging_op and cleanup Tamas K Lengyel
2015-02-18  0:11 ` [PATCH V6 04/13] xen: Rename mem_event to vm_event Tamas K Lengyel
2015-02-18  9:46   ` Jan Beulich
2015-02-18 12:21     ` Tamas K Lengyel
2015-02-18 13:35       ` Jan Beulich
2015-02-18 15:55         ` Tamas K Lengyel
2015-02-18 17:22           ` Tamas K Lengyel
2015-02-18  0:11 ` [PATCH V6 05/13] xen/vm_event: Style fixes Tamas K Lengyel
2015-03-12 12:52   ` Tim Deegan
2015-02-18  0:11 ` [PATCH V6 06/13] tools/tests: Clean-up tools/tests/xen-access Tamas K Lengyel
2015-02-18  0:11 ` [PATCH V6 07/13] x86/hvm: factor out and rename vm_event related functions Tamas K Lengyel
2015-03-12 12:57   ` Tim Deegan
2015-02-18  0:11 ` [PATCH V6 08/13] xen: Introduce monitor_op domctl Tamas K Lengyel
2015-02-18 18:15   ` Tamas Lengyel
2015-03-12 14:01   ` Tim Deegan
2015-02-18  0:11 ` [PATCH V6 09/13] xen/vm_event: Deprecate VM_EVENT_FLAG_DUMMY flag Tamas K Lengyel
2015-02-18  0:11 ` [PATCH V6 10/13] xen/vm_event: Decouple vm_event and mem_access Tamas K Lengyel
2015-02-18  0:11 ` [PATCH V6 11/13] xen/vm_event: Relocate memop checks Tamas K Lengyel
2015-03-12 15:36   ` Tim Deegan
2015-03-12 15:55     ` Tamas Lengyel
2015-02-18  0:11 ` Tamas K Lengyel [this message]
2015-02-18  0:11 ` [PATCH V6 13/13] xen/vm_event: Add RESUME option to vm_event_op domctl Tamas K Lengyel
2015-03-12 15:56   ` Tim Deegan
2015-03-12 16:00     ` Tamas Lengyel
2015-03-11 16:49 ` [PATCH V6 00/13] xen: Clean-up of mem_event subsystem Ian Campbell
2015-03-11 17:16   ` Tamas K Lengyel
2015-03-12 15:58 ` Tim Deegan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1424218303-11331-13-git-send-email-tamas.lengyel@zentific.com \
    --to=tamas.lengyel@zentific.com \
    --cc=andres@lagarcavilla.org \
    --cc=dgdegra@tycho.nsa.gov \
    --cc=eddie.dong@intel.com \
    --cc=ian.campbell@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=jun.nakajima@intel.com \
    --cc=keir@xen.org \
    --cc=kevin.tian@intel.com \
    --cc=rshriram@cs.ubc.ca \
    --cc=stefano.stabellini@eu.citrix.com \
    --cc=steve@zentific.com \
    --cc=tim@xen.org \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xen.org \
    --cc=yanghy@cn.fujitsu.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.