All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tamas K Lengyel <tamas.lengyel@zentific.com>
To: xen-devel@lists.xen.org
Cc: kevin.tian@intel.com, wei.liu2@citrix.com,
	ian.campbell@citrix.com, steve@zentific.com,
	stefano.stabellini@eu.citrix.com, jun.nakajima@intel.com,
	tim@xen.org, ian.jackson@eu.citrix.com, eddie.dong@intel.com,
	andres@lagarcavilla.org, jbeulich@suse.com,
	Tamas K Lengyel <tamas.lengyel@zentific.com>,
	rshriram@cs.ubc.ca, keir@xen.org, dgdegra@tycho.nsa.gov,
	yanghy@cn.fujitsu.com, rcojocaru@bitdefender.com
Subject: [PATCH V9 5/6] xen/xsm: Split vm_event_op into three separate labels
Date: Thu,  9 Apr 2015 16:32:52 +0200	[thread overview]
Message-ID: <1428589973-24438-6-git-send-email-tamas.lengyel@zentific.com> (raw)
In-Reply-To: <1428589973-24438-1-git-send-email-tamas.lengyel@zentific.com>

The XSM label vm_event_op has been used to control the three memops
controlling mem_access, mem_paging and mem_sharing. While these systems
rely on vm_event, these are not vm_event operations themselves. Thus,
in this patch we introduce three separate labels for each of these memops.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@zentific.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/arch/x86/mm/mem_paging.c        |  2 +-
 xen/arch/x86/mm/mem_sharing.c       |  2 +-
 xen/common/mem_access.c             |  2 +-
 xen/include/xsm/dummy.h             | 20 +++++++++++++++++++-
 xen/include/xsm/xsm.h               | 33 ++++++++++++++++++++++++++++++---
 xen/xsm/dummy.c                     | 13 ++++++++++++-
 xen/xsm/flask/hooks.c               | 33 ++++++++++++++++++++++++++++++---
 xen/xsm/flask/policy/access_vectors |  6 ++++++
 8 files changed, 100 insertions(+), 11 deletions(-)

diff --git a/xen/arch/x86/mm/mem_paging.c b/xen/arch/x86/mm/mem_paging.c
index 17d2319..9ee3aba 100644
--- a/xen/arch/x86/mm/mem_paging.c
+++ b/xen/arch/x86/mm/mem_paging.c
@@ -39,7 +39,7 @@ int mem_paging_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_paging_op_t) arg)
     if ( rc )
         return rc;
 
-    rc = xsm_vm_event_op(XSM_DM_PRIV, d, XENMEM_paging_op);
+    rc = xsm_mem_paging(XSM_DM_PRIV, d);
     if ( rc )
         goto out;
 
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index ff01378..78fb013 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -1311,7 +1311,7 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
     if ( rc )
         return rc;
 
-    rc = xsm_vm_event_op(XSM_DM_PRIV, d, XENMEM_sharing_op);
+    rc = xsm_mem_sharing(XSM_DM_PRIV, d);
     if ( rc )
         goto out;
 
diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
index 511c8c5..aa00513 100644
--- a/xen/common/mem_access.c
+++ b/xen/common/mem_access.c
@@ -48,7 +48,7 @@ int mem_access_memop(unsigned long cmd,
     if ( !p2m_mem_access_sanity_check(d) )
         goto out;
 
-    rc = xsm_vm_event_op(XSM_DM_PRIV, d, XENMEM_access_op);
+    rc = xsm_mem_access(XSM_DM_PRIV, d);
     if ( rc )
         goto out;
 
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index 50ee929..16967ed 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -519,11 +519,29 @@ static XSM_INLINE int xsm_vm_event_control(XSM_DEFAULT_ARG struct domain *d, int
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_vm_event_op(XSM_DEFAULT_ARG struct domain *d, int op)
+#ifdef HAS_MEM_ACCESS
+static XSM_INLINE int xsm_mem_access(XSM_DEFAULT_ARG struct domain *d)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
     return xsm_default_action(action, current->domain, d);
 }
+#endif
+
+#ifdef HAS_MEM_PAGING
+static XSM_INLINE int xsm_mem_paging(XSM_DEFAULT_ARG struct domain *d)
+{
+    XSM_ASSERT_ACTION(XSM_DM_PRIV);
+    return xsm_default_action(action, current->domain, d);
+}
+#endif
+
+#ifdef HAS_MEM_SHARING
+static XSM_INLINE int xsm_mem_sharing(XSM_DEFAULT_ARG struct domain *d)
+{
+    XSM_ASSERT_ACTION(XSM_DM_PRIV);
+    return xsm_default_action(action, current->domain, d);
+}
+#endif
 
 #ifdef CONFIG_X86
 static XSM_INLINE int xsm_do_mca(XSM_DEFAULT_VOID)
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index ca8371c..49f06c9 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -142,7 +142,18 @@ struct xsm_operations {
     int (*get_vnumainfo) (struct domain *d);
 
     int (*vm_event_control) (struct domain *d, int mode, int op);
-    int (*vm_event_op) (struct domain *d, int op);
+
+#ifdef HAS_MEM_ACCESS
+    int (*mem_access) (struct domain *d);
+#endif
+
+#ifdef HAS_MEM_PAGING
+    int (*mem_paging) (struct domain *d);
+#endif
+
+#ifdef HAS_MEM_SHARING
+    int (*mem_sharing) (struct domain *d);
+#endif
 
 #ifdef CONFIG_X86
     int (*do_mca) (void);
@@ -546,10 +557,26 @@ static inline int xsm_vm_event_control (xsm_default_t def, struct domain *d, int
     return xsm_ops->vm_event_control(d, mode, op);
 }
 
-static inline int xsm_vm_event_op (xsm_default_t def, struct domain *d, int op)
+#ifdef HAS_MEM_ACCESS
+static inline int xsm_mem_access (xsm_default_t def, struct domain *d)
 {
-    return xsm_ops->vm_event_op(d, op);
+    return xsm_ops->mem_access(d);
 }
+#endif
+
+#ifdef HAS_MEM_PAGING
+static inline int xsm_mem_paging (xsm_default_t def, struct domain *d)
+{
+    return xsm_ops->mem_paging(d);
+}
+#endif
+
+#ifdef HAS_MEM_SHARING
+static inline int xsm_mem_sharing (xsm_default_t def, struct domain *d)
+{
+    return xsm_ops->mem_sharing(d);
+}
+#endif
 
 #ifdef CONFIG_X86
 static inline int xsm_do_mca(xsm_default_t def)
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index 6d12d32..3ddb4f6 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -119,7 +119,18 @@ void xsm_fixup_ops (struct xsm_operations *ops)
     set_to_dummy_if_null(ops, map_gmfn_foreign);
 
     set_to_dummy_if_null(ops, vm_event_control);
-    set_to_dummy_if_null(ops, vm_event_op);
+
+#ifdef HAS_MEM_ACCESS
+    set_to_dummy_if_null(ops, mem_access);
+#endif
+
+#ifdef HAS_MEM_PAGING
+    set_to_dummy_if_null(ops, mem_paging);
+#endif
+
+#ifdef HAS_MEM_SHARING
+    set_to_dummy_if_null(ops, mem_sharing);
+#endif
 
 #ifdef CONFIG_X86
     set_to_dummy_if_null(ops, do_mca);
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 061d897..6215001 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1139,10 +1139,26 @@ static int flask_vm_event_control(struct domain *d, int mode, int op)
     return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__VM_EVENT);
 }
 
-static int flask_vm_event_op(struct domain *d, int op)
+#ifdef HAS_MEM_ACCESS
+static int flask_mem_access(struct domain *d)
 {
-    return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__VM_EVENT);
+    return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__MEM_ACCESS);
+}
+#endif
+
+#ifdef HAS_MEM_PAGING
+static int flask_mem_paging(struct domain *d)
+{
+    return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__MEM_PAGING);
+}
+#endif
+
+#ifdef HAS_MEM_SHARING
+static int flask_mem_sharing(struct domain *d)
+{
+    return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__MEM_SHARING);
 }
+#endif
 
 #if defined(HAS_PASSTHROUGH) && defined(HAS_PCI)
 static int flask_get_device_group(uint32_t machine_bdf)
@@ -1579,7 +1595,18 @@ static struct xsm_operations flask_ops = {
     .get_vnumainfo = flask_get_vnumainfo,
 
     .vm_event_control = flask_vm_event_control,
-    .vm_event_op = flask_vm_event_op,
+
+#ifdef HAS_MEM_ACCESS
+    .mem_access = flask_mem_access,
+#endif
+
+#ifdef HAS_MEM_PAGING
+    .mem_paging = flask_mem_paging,
+#endif
+
+#ifdef HAS_MEM_SHARING
+    .mem_sharing = flask_mem_sharing,
+#endif
 
 #ifdef CONFIG_COMPAT
     .do_compat_op = compat_flask_op,
diff --git a/xen/xsm/flask/policy/access_vectors b/xen/xsm/flask/policy/access_vectors
index 9a9d1c5..af4a6ae 100644
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -223,6 +223,12 @@ class domain2
 # XEN_DOMCTL_monitor_op
 # XEN_DOMCTL_vm_event_op
     vm_event
+# XENMEM_access_op
+    mem_access
+# XENMEM_paging_op
+    mem_paging
+# XENMEM_sharing_op
+    mem_sharing
 }
 
 # Similar to class domain, but primarily contains domctls related to HVM domains
-- 
2.1.4

  parent reply	other threads:[~2015-04-09 14:32 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-04-09 14:32 [PATCH V9 0/6] xen: Clean-up of mem_event subsystem Tamas K Lengyel
2015-04-09 14:32 ` [PATCH V9 1/6] xen: Introduce monitor_op domctl Tamas K Lengyel
2015-04-09 14:32 ` [PATCH V9 2/6] xen/vm_event: Deprecate VM_EVENT_FLAG_DUMMY flag Tamas K Lengyel
2015-04-09 14:32 ` [PATCH V9 3/6] xen/vm_event: Decouple vm_event and mem_access Tamas K Lengyel
2015-04-09 14:32 ` [PATCH V9 4/6] xen/vm_event: Relocate memop checks Tamas K Lengyel
2015-04-09 14:32 ` Tamas K Lengyel [this message]
2015-04-09 14:32 ` [PATCH V9 6/6] xen/vm_event: Add RESUME option to vm_event_op domctl Tamas K Lengyel
2015-04-16  8:53 ` [PATCH V9 0/6] xen: Clean-up of mem_event subsystem Tim Deegan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1428589973-24438-6-git-send-email-tamas.lengyel@zentific.com \
    --to=tamas.lengyel@zentific.com \
    --cc=andres@lagarcavilla.org \
    --cc=dgdegra@tycho.nsa.gov \
    --cc=eddie.dong@intel.com \
    --cc=ian.campbell@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=jun.nakajima@intel.com \
    --cc=keir@xen.org \
    --cc=kevin.tian@intel.com \
    --cc=rcojocaru@bitdefender.com \
    --cc=rshriram@cs.ubc.ca \
    --cc=stefano.stabellini@eu.citrix.com \
    --cc=steve@zentific.com \
    --cc=tim@xen.org \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xen.org \
    --cc=yanghy@cn.fujitsu.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.