All of lore.kernel.org
 help / color / mirror / Atom feed
From: Xenia Ragiadakou <burzalodowa@gmail.com>
To: xen-devel@lists.xenproject.org
Cc: "Jun Nakajima" <jun.nakajima@intel.com>,
	"Kevin Tian" <kevin.tian@intel.com>,
	"Jan Beulich" <jbeulich@suse.com>,
	"Andrew Cooper" <andrew.cooper3@citrix.com>,
	"Roger Pau Monné" <roger.pau@citrix.com>, "Wei Liu" <wl@xen.org>
Subject: [PATCH 3/4] x86/vmx: replace enum vmx_msr_intercept_type with the msr access flags
Date: Mon, 27 Feb 2023 09:56:51 +0200	[thread overview]
Message-ID: <20230227075652.3782973-4-burzalodowa@gmail.com> (raw)
In-Reply-To: <20230227075652.3782973-1-burzalodowa@gmail.com>

Replace enum vmx_msr_intercept_type with the msr access flags, defined
in hvm.h, so that the functions {svm,vmx}_{set,clear}_msr_intercept()
share the same prototype.

No functional change intended.

Signed-off-by: Xenia Ragiadakou <burzalodowa@gmail.com>
---
 xen/arch/x86/cpu/vpmu_intel.c           | 24 +++++++-------
 xen/arch/x86/hvm/vmx/vmcs.c             | 38 ++++++++++-----------
 xen/arch/x86/hvm/vmx/vmx.c              | 44 ++++++++++++-------------
 xen/arch/x86/include/asm/hvm/vmx/vmcs.h | 14 ++------
 4 files changed, 54 insertions(+), 66 deletions(-)

diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index bcfa187a14..bd91c79a36 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -230,22 +230,22 @@ static void core2_vpmu_set_msr_bitmap(struct vcpu *v)
 
     /* Allow Read/Write PMU Counters MSR Directly. */
     for ( i = 0; i < fixed_pmc_cnt; i++ )
-        vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, VMX_MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
 
     for ( i = 0; i < arch_pmc_cnt; i++ )
     {
-        vmx_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, VMX_MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
 
         if ( full_width_write )
-            vmx_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, VMX_MSR_RW);
+            vmx_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
     }
 
     /* Allow Read PMU Non-global Controls Directly. */
     for ( i = 0; i < arch_pmc_cnt; i++ )
-        vmx_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), VMX_MSR_R);
+        vmx_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
 
-    vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, VMX_MSR_R);
-    vmx_clear_msr_intercept(v, MSR_IA32_DS_AREA, VMX_MSR_R);
+    vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
+    vmx_clear_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
 }
 
 static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
@@ -253,21 +253,21 @@ static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
     unsigned int i;
 
     for ( i = 0; i < fixed_pmc_cnt; i++ )
-        vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, VMX_MSR_RW);
+        vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
 
     for ( i = 0; i < arch_pmc_cnt; i++ )
     {
-        vmx_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, VMX_MSR_RW);
+        vmx_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
 
         if ( full_width_write )
-            vmx_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, VMX_MSR_RW);
+            vmx_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
     }
 
     for ( i = 0; i < arch_pmc_cnt; i++ )
-        vmx_set_msr_intercept(v, MSR_P6_EVNTSEL(i), VMX_MSR_R);
+        vmx_set_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
 
-    vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, VMX_MSR_R);
-    vmx_set_msr_intercept(v, MSR_IA32_DS_AREA, VMX_MSR_R);
+    vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
+    vmx_set_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
 }
 
 static inline void __core2_vpmu_save(struct vcpu *v)
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index ed71ecfb62..22c12509d5 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -902,8 +902,7 @@ static void vmx_set_host_env(struct vcpu *v)
               (unsigned long)&get_cpu_info()->guest_cpu_user_regs.error_code);
 }
 
-void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
-                             enum vmx_msr_intercept_type type)
+void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr, int type)
 {
     struct vmx_msr_bitmap *msr_bitmap = v->arch.hvm.vmx.msr_bitmap;
     struct domain *d = v->domain;
@@ -917,25 +916,24 @@ void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
 
     if ( msr <= 0x1fff )
     {
-        if ( type & VMX_MSR_R )
+        if ( type & MSR_R )
             clear_bit(msr, msr_bitmap->read_low);
-        if ( type & VMX_MSR_W )
+        if ( type & MSR_W )
             clear_bit(msr, msr_bitmap->write_low);
     }
     else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
     {
         msr &= 0x1fff;
-        if ( type & VMX_MSR_R )
+        if ( type & MSR_R )
             clear_bit(msr, msr_bitmap->read_high);
-        if ( type & VMX_MSR_W )
+        if ( type & MSR_W )
             clear_bit(msr, msr_bitmap->write_high);
     }
     else
         ASSERT(!"MSR out of range for interception\n");
 }
 
-void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
-                           enum vmx_msr_intercept_type type)
+void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr, int type)
 {
     struct vmx_msr_bitmap *msr_bitmap = v->arch.hvm.vmx.msr_bitmap;
 
@@ -945,17 +943,17 @@ void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
 
     if ( msr <= 0x1fff )
     {
-        if ( type & VMX_MSR_R )
+        if ( type & MSR_R )
             set_bit(msr, msr_bitmap->read_low);
-        if ( type & VMX_MSR_W )
+        if ( type & MSR_W )
             set_bit(msr, msr_bitmap->write_low);
     }
     else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
     {
         msr &= 0x1fff;
-        if ( type & VMX_MSR_R )
+        if ( type & MSR_R )
             set_bit(msr, msr_bitmap->read_high);
-        if ( type & VMX_MSR_W )
+        if ( type & MSR_W )
             set_bit(msr, msr_bitmap->write_high);
     }
     else
@@ -1162,17 +1160,17 @@ static int construct_vmcs(struct vcpu *v)
         v->arch.hvm.vmx.msr_bitmap = msr_bitmap;
         __vmwrite(MSR_BITMAP, virt_to_maddr(msr_bitmap));
 
-        vmx_clear_msr_intercept(v, MSR_FS_BASE, VMX_MSR_RW);
-        vmx_clear_msr_intercept(v, MSR_GS_BASE, VMX_MSR_RW);
-        vmx_clear_msr_intercept(v, MSR_SHADOW_GS_BASE, VMX_MSR_RW);
-        vmx_clear_msr_intercept(v, MSR_IA32_SYSENTER_CS, VMX_MSR_RW);
-        vmx_clear_msr_intercept(v, MSR_IA32_SYSENTER_ESP, VMX_MSR_RW);
-        vmx_clear_msr_intercept(v, MSR_IA32_SYSENTER_EIP, VMX_MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_FS_BASE, MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_GS_BASE, MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_SHADOW_GS_BASE, MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_IA32_SYSENTER_CS, MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_IA32_SYSENTER_ESP, MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_IA32_SYSENTER_EIP, MSR_RW);
         if ( paging_mode_hap(d) && (!is_iommu_enabled(d) || iommu_snoop) )
-            vmx_clear_msr_intercept(v, MSR_IA32_CR_PAT, VMX_MSR_RW);
+            vmx_clear_msr_intercept(v, MSR_IA32_CR_PAT, MSR_RW);
         if ( (vmexit_ctl & VM_EXIT_CLEAR_BNDCFGS) &&
              (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) )
-            vmx_clear_msr_intercept(v, MSR_IA32_BNDCFGS, VMX_MSR_RW);
+            vmx_clear_msr_intercept(v, MSR_IA32_BNDCFGS, MSR_RW);
     }
 
     /* I/O access bitmap. */
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 0ec33bcc18..87c47c002c 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -802,7 +802,7 @@ static void cf_check vmx_cpuid_policy_changed(struct vcpu *v)
      */
     if ( cp->feat.ibrsb )
     {
-        vmx_clear_msr_intercept(v, MSR_SPEC_CTRL, VMX_MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_SPEC_CTRL, MSR_RW);
 
         rc = vmx_add_guest_msr(v, MSR_SPEC_CTRL, 0);
         if ( rc )
@@ -810,7 +810,7 @@ static void cf_check vmx_cpuid_policy_changed(struct vcpu *v)
     }
     else
     {
-        vmx_set_msr_intercept(v, MSR_SPEC_CTRL, VMX_MSR_RW);
+        vmx_set_msr_intercept(v, MSR_SPEC_CTRL, MSR_RW);
 
         rc = vmx_del_msr(v, MSR_SPEC_CTRL, VMX_MSR_GUEST);
         if ( rc && rc != -ESRCH )
@@ -820,20 +820,20 @@ static void cf_check vmx_cpuid_policy_changed(struct vcpu *v)
 
     /* MSR_PRED_CMD is safe to pass through if the guest knows about it. */
     if ( cp->feat.ibrsb || cp->extd.ibpb )
-        vmx_clear_msr_intercept(v, MSR_PRED_CMD,  VMX_MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_PRED_CMD, MSR_RW);
     else
-        vmx_set_msr_intercept(v, MSR_PRED_CMD,  VMX_MSR_RW);
+        vmx_set_msr_intercept(v, MSR_PRED_CMD, MSR_RW);
 
     /* MSR_FLUSH_CMD is safe to pass through if the guest knows about it. */
     if ( cp->feat.l1d_flush )
-        vmx_clear_msr_intercept(v, MSR_FLUSH_CMD, VMX_MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_FLUSH_CMD, MSR_RW);
     else
-        vmx_set_msr_intercept(v, MSR_FLUSH_CMD, VMX_MSR_RW);
+        vmx_set_msr_intercept(v, MSR_FLUSH_CMD, MSR_RW);
 
     if ( cp->feat.pks )
-        vmx_clear_msr_intercept(v, MSR_PKRS, VMX_MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_PKRS, MSR_RW);
     else
-        vmx_set_msr_intercept(v, MSR_PKRS, VMX_MSR_RW);
+        vmx_set_msr_intercept(v, MSR_PKRS, MSR_RW);
 
  out:
     vmx_vmcs_exit(v);
@@ -1429,7 +1429,7 @@ static void cf_check vmx_handle_cd(struct vcpu *v, unsigned long value)
 
             vmx_get_guest_pat(v, pat);
             vmx_set_guest_pat(v, uc_pat);
-            vmx_set_msr_intercept(v, MSR_IA32_CR_PAT, VMX_MSR_RW);
+            vmx_set_msr_intercept(v, MSR_IA32_CR_PAT, MSR_RW);
 
             wbinvd();               /* flush possibly polluted cache */
             hvm_asid_flush_vcpu(v); /* invalidate memory type cached in TLB */
@@ -1440,7 +1440,7 @@ static void cf_check vmx_handle_cd(struct vcpu *v, unsigned long value)
             v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
             vmx_set_guest_pat(v, *pat);
             if ( !is_iommu_enabled(v->domain) || iommu_snoop )
-                vmx_clear_msr_intercept(v, MSR_IA32_CR_PAT, VMX_MSR_RW);
+                vmx_clear_msr_intercept(v, MSR_IA32_CR_PAT, MSR_RW);
             hvm_asid_flush_vcpu(v); /* no need to flush cache */
         }
     }
@@ -1906,9 +1906,9 @@ static void cf_check vmx_update_guest_efer(struct vcpu *v)
      * into hardware, clear the read intercept to avoid unnecessary VMExits.
      */
     if ( guest_efer == v->arch.hvm.guest_efer )
-        vmx_clear_msr_intercept(v, MSR_EFER, VMX_MSR_R);
+        vmx_clear_msr_intercept(v, MSR_EFER, MSR_R);
     else
-        vmx_set_msr_intercept(v, MSR_EFER, VMX_MSR_R);
+        vmx_set_msr_intercept(v, MSR_EFER, MSR_R);
 }
 
 void nvmx_enqueue_n2_exceptions(struct vcpu *v, 
@@ -2335,7 +2335,7 @@ static void cf_check vmx_enable_msr_interception(struct domain *d, uint32_t msr)
     struct vcpu *v;
 
     for_each_vcpu ( d, v )
-        vmx_set_msr_intercept(v, msr, VMX_MSR_W);
+        vmx_set_msr_intercept(v, msr, MSR_W);
 }
 
 static void cf_check vmx_vcpu_update_eptp(struct vcpu *v)
@@ -3502,17 +3502,17 @@ void cf_check vmx_vlapic_msr_changed(struct vcpu *v)
             {
                 for ( msr = MSR_X2APIC_FIRST;
                       msr <= MSR_X2APIC_LAST; msr++ )
-                    vmx_clear_msr_intercept(v, msr, VMX_MSR_R);
+                    vmx_clear_msr_intercept(v, msr, MSR_R);
 
-                vmx_set_msr_intercept(v, MSR_X2APIC_PPR, VMX_MSR_R);
-                vmx_set_msr_intercept(v, MSR_X2APIC_TMICT, VMX_MSR_R);
-                vmx_set_msr_intercept(v, MSR_X2APIC_TMCCT, VMX_MSR_R);
+                vmx_set_msr_intercept(v, MSR_X2APIC_PPR, MSR_R);
+                vmx_set_msr_intercept(v, MSR_X2APIC_TMICT, MSR_R);
+                vmx_set_msr_intercept(v, MSR_X2APIC_TMCCT, MSR_R);
             }
             if ( cpu_has_vmx_virtual_intr_delivery )
             {
-                vmx_clear_msr_intercept(v, MSR_X2APIC_TPR, VMX_MSR_W);
-                vmx_clear_msr_intercept(v, MSR_X2APIC_EOI, VMX_MSR_W);
-                vmx_clear_msr_intercept(v, MSR_X2APIC_SELF, VMX_MSR_W);
+                vmx_clear_msr_intercept(v, MSR_X2APIC_TPR, MSR_W);
+                vmx_clear_msr_intercept(v, MSR_X2APIC_EOI, MSR_W);
+                vmx_clear_msr_intercept(v, MSR_X2APIC_SELF, MSR_W);
             }
         }
         else
@@ -3523,7 +3523,7 @@ void cf_check vmx_vlapic_msr_changed(struct vcpu *v)
            SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE) )
         for ( msr = MSR_X2APIC_FIRST;
               msr <= MSR_X2APIC_LAST; msr++ )
-            vmx_set_msr_intercept(v, msr, VMX_MSR_RW);
+            vmx_set_msr_intercept(v, msr, MSR_RW);
 
     vmx_update_secondary_exec_control(v);
     vmx_vmcs_exit(v);
@@ -3659,7 +3659,7 @@ static int cf_check vmx_msr_write_intercept(
                         return X86EMUL_OKAY;
                     }
 
-                    vmx_clear_msr_intercept(v, lbr->base + i, VMX_MSR_RW);
+                    vmx_clear_msr_intercept(v, lbr->base + i, MSR_RW);
                 }
             }
 
diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
index 0a84e74478..e08c506be5 100644
--- a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
@@ -644,18 +644,8 @@ static inline int vmx_write_guest_msr(struct vcpu *v, uint32_t msr,
     return 0;
 }
 
-
-/* MSR intercept bitmap infrastructure. */
-enum vmx_msr_intercept_type {
-    VMX_MSR_R  = 1,
-    VMX_MSR_W  = 2,
-    VMX_MSR_RW = VMX_MSR_R | VMX_MSR_W,
-};
-
-void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
-                             enum vmx_msr_intercept_type type);
-void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
-                           enum vmx_msr_intercept_type type);
+void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr, int type);
+void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr, int type);
 void vmx_vmcs_switch(paddr_t from, paddr_t to);
 void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector);
 void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector);
-- 
2.37.2



  parent reply	other threads:[~2023-02-27  7:57 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-02-27  7:56 [PATCH 0/4] hvm: add hvm_funcs hooks for msr intercept handling Xenia Ragiadakou
2023-02-27  7:56 ` [PATCH 1/4] x86/vpmu: rename {svm,vmx}_vpmu_initialise to {amd,core2}_vpmu_initialise Xenia Ragiadakou
2023-02-28 13:54   ` Jan Beulich
2023-02-27  7:56 ` [PATCH 2/4] x86/svm: split svm_intercept_msr() into svm_{set,clear}_msr_intercept() Xenia Ragiadakou
2023-02-28 14:20   ` Jan Beulich
2023-02-28 15:05     ` Xenia Ragiadakou
2023-02-28 15:10       ` Jan Beulich
2023-02-28 15:17         ` Xenia Ragiadakou
2023-02-28 16:14           ` Jan Beulich
2023-02-27  7:56 ` Xenia Ragiadakou [this message]
2023-02-28 14:31   ` [PATCH 3/4] x86/vmx: replace enum vmx_msr_intercept_type with the msr access flags Jan Beulich
2023-02-28 14:34     ` Jan Beulich
2023-02-28 15:07       ` Xenia Ragiadakou
2023-02-27  7:56 ` [PATCH 4/4] x86/hvm: create hvm_funcs for {svm,vmx}_{set,clear}_msr_intercept() Xenia Ragiadakou
2023-02-28 14:58   ` Jan Beulich
2023-02-28 20:14     ` Xenia Ragiadakou
2023-03-01  9:14       ` Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230227075652.3782973-4-burzalodowa@gmail.com \
    --to=burzalodowa@gmail.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=jun.nakajima@intel.com \
    --cc=kevin.tian@intel.com \
    --cc=roger.pau@citrix.com \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.