From mboxrd@z Thu Jan 1 00:00:00 1970 From: Tamas K Lengyel Subject: [PATCH V6 12/13] xen/xsm: Split vm_event_op into three separate labels Date: Wed, 18 Feb 2015 01:11:42 +0100 Message-ID: <1424218303-11331-13-git-send-email-tamas.lengyel@zentific.com> References: <1424218303-11331-1-git-send-email-tamas.lengyel@zentific.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <1424218303-11331-1-git-send-email-tamas.lengyel@zentific.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xen.org Cc: kevin.tian@intel.com, wei.liu2@citrix.com, ian.campbell@citrix.com, steve@zentific.com, stefano.stabellini@eu.citrix.com, jun.nakajima@intel.com, tim@xen.org, ian.jackson@eu.citrix.com, eddie.dong@intel.com, andres@lagarcavilla.org, jbeulich@suse.com, Tamas K Lengyel , rshriram@cs.ubc.ca, keir@xen.org, dgdegra@tycho.nsa.gov, yanghy@cn.fujitsu.com List-Id: xen-devel@lists.xenproject.org The XSM label vm_event_op has been used to control the three memops controlling mem_access, mem_paging and mem_sharing. While these systems rely on vm_event, these are not vm_event operations themselves. Thus, in this patch we introduce three separate labels for each of these memops. Signed-off-by: Tamas K Lengyel Acked-by: Daniel De Graaf Reviewed-by: Andrew Cooper --- xen/arch/x86/mm/mem_paging.c | 2 +- xen/arch/x86/mm/mem_sharing.c | 2 +- xen/common/mem_access.c | 2 +- xen/include/xsm/dummy.h | 20 +++++++++++++++++++- xen/include/xsm/xsm.h | 33 ++++++++++++++++++++++++++++++--- xen/xsm/dummy.c | 13 ++++++++++++- xen/xsm/flask/hooks.c | 33 ++++++++++++++++++++++++++++++--- xen/xsm/flask/policy/access_vectors | 6 ++++++ 8 files changed, 100 insertions(+), 11 deletions(-) diff --git a/xen/arch/x86/mm/mem_paging.c b/xen/arch/x86/mm/mem_paging.c index cbdee99..0fa2abd 100644 --- a/xen/arch/x86/mm/mem_paging.c +++ b/xen/arch/x86/mm/mem_paging.c @@ -40,7 +40,7 @@ int mem_paging_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_paging_op_t) arg) if ( rc ) goto out; - rc = xsm_vm_event_op(XSM_DM_PRIV, d, XENMEM_paging_op); + rc = xsm_mem_paging(XSM_DM_PRIV, d); if ( rc ) goto out; diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c index 2d49bc4..d56ecf9 100644 --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -1316,7 +1316,7 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg) if ( !hap_enabled(d) || !d->arch.hvm_domain.mem_sharing_enabled ) rc = -ENODEV; - rc = xsm_vm_event_op(XSM_DM_PRIV, d, XENMEM_sharing_op); + rc = xsm_mem_sharing(XSM_DM_PRIV, d); if ( rc ) goto out; diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c index 511c8c5..aa00513 100644 --- a/xen/common/mem_access.c +++ b/xen/common/mem_access.c @@ -48,7 +48,7 @@ int mem_access_memop(unsigned long cmd, if ( !p2m_mem_access_sanity_check(d) ) goto out; - rc = xsm_vm_event_op(XSM_DM_PRIV, d, XENMEM_access_op); + rc = xsm_mem_access(XSM_DM_PRIV, d); if ( rc ) goto out; diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h index 50ee929..16967ed 100644 --- a/xen/include/xsm/dummy.h +++ b/xen/include/xsm/dummy.h @@ -519,11 +519,29 @@ static XSM_INLINE int xsm_vm_event_control(XSM_DEFAULT_ARG struct domain *d, int return xsm_default_action(action, current->domain, d); } -static XSM_INLINE int xsm_vm_event_op(XSM_DEFAULT_ARG struct domain *d, int op) +#ifdef HAS_MEM_ACCESS +static XSM_INLINE int xsm_mem_access(XSM_DEFAULT_ARG struct domain *d) { XSM_ASSERT_ACTION(XSM_DM_PRIV); return xsm_default_action(action, current->domain, d); } +#endif + +#ifdef HAS_MEM_PAGING +static XSM_INLINE int xsm_mem_paging(XSM_DEFAULT_ARG struct domain *d) +{ + XSM_ASSERT_ACTION(XSM_DM_PRIV); + return xsm_default_action(action, current->domain, d); +} +#endif + +#ifdef HAS_MEM_SHARING +static XSM_INLINE int xsm_mem_sharing(XSM_DEFAULT_ARG struct domain *d) +{ + XSM_ASSERT_ACTION(XSM_DM_PRIV); + return xsm_default_action(action, current->domain, d); +} +#endif #ifdef CONFIG_X86 static XSM_INLINE int xsm_do_mca(XSM_DEFAULT_VOID) diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h index d56a68f..2a88d84 100644 --- a/xen/include/xsm/xsm.h +++ b/xen/include/xsm/xsm.h @@ -142,7 +142,18 @@ struct xsm_operations { int (*get_vnumainfo) (struct domain *d); int (*vm_event_control) (struct domain *d, int mode, int op); - int (*vm_event_op) (struct domain *d, int op); + +#ifdef HAS_MEM_ACCESS + int (*mem_access) (struct domain *d); +#endif + +#ifdef HAS_MEM_PAGING + int (*mem_paging) (struct domain *d); +#endif + +#ifdef HAS_MEM_SHARING + int (*mem_sharing) (struct domain *d); +#endif #ifdef CONFIG_X86 int (*do_mca) (void); @@ -546,10 +557,26 @@ static inline int xsm_vm_event_control (xsm_default_t def, struct domain *d, int return xsm_ops->vm_event_control(d, mode, op); } -static inline int xsm_vm_event_op (xsm_default_t def, struct domain *d, int op) +#ifdef HAS_MEM_ACCESS +static inline int xsm_mem_access (xsm_default_t def, struct domain *d) { - return xsm_ops->vm_event_op(d, op); + return xsm_ops->mem_access(d); } +#endif + +#ifdef HAS_MEM_PAGING +static inline int xsm_mem_paging (xsm_default_t def, struct domain *d) +{ + return xsm_ops->mem_paging(d); +} +#endif + +#ifdef HAS_MEM_SHARING +static inline int xsm_mem_sharing (xsm_default_t def, struct domain *d) +{ + return xsm_ops->mem_sharing(d); +} +#endif #ifdef CONFIG_X86 static inline int xsm_do_mca(xsm_default_t def) diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c index 6d12d32..3ddb4f6 100644 --- a/xen/xsm/dummy.c +++ b/xen/xsm/dummy.c @@ -119,7 +119,18 @@ void xsm_fixup_ops (struct xsm_operations *ops) set_to_dummy_if_null(ops, map_gmfn_foreign); set_to_dummy_if_null(ops, vm_event_control); - set_to_dummy_if_null(ops, vm_event_op); + +#ifdef HAS_MEM_ACCESS + set_to_dummy_if_null(ops, mem_access); +#endif + +#ifdef HAS_MEM_PAGING + set_to_dummy_if_null(ops, mem_paging); +#endif + +#ifdef HAS_MEM_SHARING + set_to_dummy_if_null(ops, mem_sharing); +#endif #ifdef CONFIG_X86 set_to_dummy_if_null(ops, do_mca); diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c index a65f68c..01d761b 100644 --- a/xen/xsm/flask/hooks.c +++ b/xen/xsm/flask/hooks.c @@ -1142,10 +1142,26 @@ static int flask_vm_event_control(struct domain *d, int mode, int op) return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__VM_EVENT); } -static int flask_vm_event_op(struct domain *d, int op) +#ifdef HAS_MEM_ACCESS +static int flask_mem_access(struct domain *d) { - return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__VM_EVENT); + return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__MEM_ACCESS); +} +#endif + +#ifdef HAS_MEM_PAGING +static int flask_mem_paging(struct domain *d) +{ + return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__MEM_PAGING); +} +#endif + +#ifdef HAS_MEM_SHARING +static int flask_mem_sharing(struct domain *d) +{ + return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__MEM_SHARING); } +#endif #if defined(HAS_PASSTHROUGH) && defined(HAS_PCI) static int flask_get_device_group(uint32_t machine_bdf) @@ -1582,7 +1598,18 @@ static struct xsm_operations flask_ops = { .get_vnumainfo = flask_get_vnumainfo, .vm_event_control = flask_vm_event_control, - .vm_event_op = flask_vm_event_op, + +#ifdef HAS_MEM_ACCESS + .mem_access = flask_mem_access, +#endif + +#ifdef HAS_MEM_PAGING + .mem_paging = flask_mem_paging, +#endif + +#ifdef HAS_MEM_SHARING + .mem_sharing = flask_mem_sharing, +#endif #ifdef CONFIG_COMPAT .do_compat_op = compat_flask_op, diff --git a/xen/xsm/flask/policy/access_vectors b/xen/xsm/flask/policy/access_vectors index 2e231e1..e5197df 100644 --- a/xen/xsm/flask/policy/access_vectors +++ b/xen/xsm/flask/policy/access_vectors @@ -225,6 +225,12 @@ class domain2 # XEN_DOMCTL_monitor_op # XEN_DOMCTL_vm_event_op vm_event +# XENMEM_access_op + mem_access +# XENMEM_paging_op + mem_paging +# XENMEM_sharing_op + mem_sharing } # Similar to class domain, but primarily contains domctls related to HVM domains -- 2.1.4