From mboxrd@z Thu Jan 1 00:00:00 1970 From: Boris Ostrovsky Subject: [PATCH v10 04/20] x86/VPMU: Make vpmu macros a bit more efficient Date: Wed, 3 Sep 2014 23:41:04 -0400 Message-ID: <1409802080-6160-5-git-send-email-boris.ostrovsky@oracle.com> References: <1409802080-6160-1-git-send-email-boris.ostrovsky@oracle.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <1409802080-6160-1-git-send-email-boris.ostrovsky@oracle.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: jbeulich@suse.com, kevin.tian@intel.com, suravee.suthikulpanit@amd.com, eddie.dong@intel.com, Aravind.Gopalakrishnan@amd.com Cc: keir@xen.org, andrew.cooper3@citrix.com, tim@xen.org, xen-devel@lists.xen.org, jun.nakajima@intel.com, boris.ostrovsky@oracle.com List-Id: xen-devel@lists.xenproject.org Introduce vpmu_are_all_set that allows testing multiple bits at once. Convert macros into inlines for better compiler checking. Signed-off-by: Boris Ostrovsky Acked-by: Kevin Tian Reviewed-by: Konrad Rzeszutek Wilk Reviewed-by: Dietmar Hahn Tested-by: Dietmar Hahn --- xen/arch/x86/hvm/vmx/vpmu_core2.c | 5 +---- xen/arch/x86/hvm/vpmu.c | 3 +-- xen/include/asm-x86/hvm/vpmu.h | 25 +++++++++++++++++++++---- 3 files changed, 23 insertions(+), 10 deletions(-) diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c b/xen/arch/x86/hvm/vmx/vpmu_core2.c index 2c1fa0e..9496ae8 100644 --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c @@ -326,10 +326,7 @@ static int core2_vpmu_save(struct vcpu *v) { struct vpmu_struct *vpmu = vcpu_vpmu(v); - if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_SAVE) ) - return 0; - - if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) ) + if ( !vpmu_are_all_set(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED) ) return 0; __core2_vpmu_save(v); diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c index 451b346..7929290 100644 --- a/xen/arch/x86/hvm/vpmu.c +++ b/xen/arch/x86/hvm/vpmu.c @@ -145,8 +145,7 @@ void vpmu_save(struct vcpu *v) struct vpmu_struct *vpmu = vcpu_vpmu(v); int pcpu = smp_processor_id(); - if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) && - vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)) ) + if ( !vpmu_are_all_set(vpmu, VPMU_CONTEXT_ALLOCATED | VPMU_CONTEXT_LOADED) ) return; vpmu->last_pcpu = pcpu; diff --git a/xen/include/asm-x86/hvm/vpmu.h b/xen/include/asm-x86/hvm/vpmu.h index 9a5ac01..40a6e57 100644 --- a/xen/include/asm-x86/hvm/vpmu.h +++ b/xen/include/asm-x86/hvm/vpmu.h @@ -82,10 +82,27 @@ struct vpmu_struct { #define VPMU_CPU_HAS_BTS 0x200 /* Has Branch Trace Store */ -#define vpmu_set(_vpmu, _x) ((_vpmu)->flags |= (_x)) -#define vpmu_reset(_vpmu, _x) ((_vpmu)->flags &= ~(_x)) -#define vpmu_is_set(_vpmu, _x) ((_vpmu)->flags & (_x)) -#define vpmu_clear(_vpmu) ((_vpmu)->flags = 0) +static inline void vpmu_set(struct vpmu_struct *vpmu, const u32 mask) +{ + vpmu->flags |= mask; +} +static inline void vpmu_reset(struct vpmu_struct *vpmu, const u32 mask) +{ + vpmu->flags &= ~mask; +} +static inline void vpmu_clear(struct vpmu_struct *vpmu) +{ + vpmu->flags = 0; +} +static inline bool_t vpmu_is_set(const struct vpmu_struct *vpmu, const u32 mask) +{ + return !!(vpmu->flags & mask); +} +static inline bool_t vpmu_are_all_set(const struct vpmu_struct *vpmu, + const u32 mask) +{ + return !!((vpmu->flags & mask) == mask); +} int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, uint64_t supported); int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content); -- 1.8.1.4