All of lore.kernel.org
 help / color / mirror / Atom feed
From: Xenia Ragiadakou <burzalodowa@gmail.com>
To: xen-devel@lists.xenproject.org
Cc: "Jan Beulich" <jbeulich@suse.com>,
	"Andrew Cooper" <andrew.cooper3@citrix.com>,
	"Roger Pau Monné" <roger.pau@citrix.com>, "Wei Liu" <wl@xen.org>,
	"Jun Nakajima" <jun.nakajima@intel.com>,
	"Kevin Tian" <kevin.tian@intel.com>
Subject: [PATCH 4/4] x86/hvm: create hvm_funcs for {svm,vmx}_{set,clear}_msr_intercept()
Date: Mon, 27 Feb 2023 09:56:52 +0200	[thread overview]
Message-ID: <20230227075652.3782973-5-burzalodowa@gmail.com> (raw)
In-Reply-To: <20230227075652.3782973-1-burzalodowa@gmail.com>

Add hvm_funcs hooks for {set,clear}_msr_intercept() for controlling the msr
intercept in common vpmu code.

No functional change intended.

Signed-off-by: Xenia Ragiadakou <burzalodowa@gmail.com>
---
 xen/arch/x86/cpu/vpmu_amd.c             | 10 ++++-----
 xen/arch/x86/cpu/vpmu_intel.c           | 24 ++++++++++-----------
 xen/arch/x86/hvm/svm/svm.c              |  4 ++--
 xen/arch/x86/hvm/vmx/vmcs.c             |  4 ++--
 xen/arch/x86/hvm/vmx/vmx.c              |  2 ++
 xen/arch/x86/include/asm/hvm/hvm.h      | 28 +++++++++++++++++++++++++
 xen/arch/x86/include/asm/hvm/svm/vmcb.h |  4 ++--
 xen/arch/x86/include/asm/hvm/vmx/vmcs.h |  4 ++--
 8 files changed, 55 insertions(+), 25 deletions(-)

diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c
index ed6706959e..a306297a69 100644
--- a/xen/arch/x86/cpu/vpmu_amd.c
+++ b/xen/arch/x86/cpu/vpmu_amd.c
@@ -165,9 +165,9 @@ static void amd_vpmu_set_msr_bitmap(struct vcpu *v)
 
     for ( i = 0; i < num_counters; i++ )
     {
-        svm_clear_msr_intercept(v, counters[i], MSR_RW);
-        svm_set_msr_intercept(v, ctrls[i], MSR_W);
-        svm_clear_msr_intercept(v, ctrls[i], MSR_R);
+        hvm_clear_msr_intercept(v, counters[i], MSR_RW);
+        hvm_set_msr_intercept(v, ctrls[i], MSR_W);
+        hvm_clear_msr_intercept(v, ctrls[i], MSR_R);
     }
 
     msr_bitmap_on(vpmu);
@@ -180,8 +180,8 @@ static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
 
     for ( i = 0; i < num_counters; i++ )
     {
-        svm_set_msr_intercept(v, counters[i], MSR_RW);
-        svm_set_msr_intercept(v, ctrls[i], MSR_RW);
+        hvm_set_msr_intercept(v, counters[i], MSR_RW);
+        hvm_set_msr_intercept(v, ctrls[i], MSR_RW);
     }
 
     msr_bitmap_off(vpmu);
diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index bd91c79a36..46ae38a326 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -230,22 +230,22 @@ static void core2_vpmu_set_msr_bitmap(struct vcpu *v)
 
     /* Allow Read/Write PMU Counters MSR Directly. */
     for ( i = 0; i < fixed_pmc_cnt; i++ )
-        vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
+        hvm_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
 
     for ( i = 0; i < arch_pmc_cnt; i++ )
     {
-        vmx_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
+        hvm_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
 
         if ( full_width_write )
-            vmx_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
+            hvm_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
     }
 
     /* Allow Read PMU Non-global Controls Directly. */
     for ( i = 0; i < arch_pmc_cnt; i++ )
-        vmx_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
+        hvm_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
 
-    vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
-    vmx_clear_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
+    hvm_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
+    hvm_clear_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
 }
 
 static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
@@ -253,21 +253,21 @@ static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
     unsigned int i;
 
     for ( i = 0; i < fixed_pmc_cnt; i++ )
-        vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
+        hvm_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
 
     for ( i = 0; i < arch_pmc_cnt; i++ )
     {
-        vmx_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
+        hvm_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
 
         if ( full_width_write )
-            vmx_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
+            hvm_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
     }
 
     for ( i = 0; i < arch_pmc_cnt; i++ )
-        vmx_set_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
+        hvm_set_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
 
-    vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
-    vmx_set_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
+    hvm_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
+    hvm_set_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
 }
 
 static inline void __core2_vpmu_save(struct vcpu *v)
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index eb144272f4..e54dc08e8a 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -288,7 +288,7 @@ svm_msrbit(unsigned long *msr_bitmap, uint32_t msr)
     return msr_bit;
 }
 
-void svm_set_msr_intercept(struct vcpu *v, uint32_t msr, int flags)
+void cf_check svm_set_msr_intercept(struct vcpu *v, uint32_t msr, int flags)
 {
     unsigned long *msr_bit = svm_msrbit(v->arch.hvm.svm.msrpm, msr);
 
@@ -303,7 +303,7 @@ void svm_set_msr_intercept(struct vcpu *v, uint32_t msr, int flags)
         __set_bit(msr * 2 + 1, msr_bit);
 }
 
-void svm_clear_msr_intercept(struct vcpu *v, uint32_t msr, int flags)
+void cf_check svm_clear_msr_intercept(struct vcpu *v, uint32_t msr, int flags)
 {
     unsigned long *msr_bit = svm_msrbit(v->arch.hvm.svm.msrpm, msr);
 
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 22c12509d5..3d0022f392 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -902,7 +902,7 @@ static void vmx_set_host_env(struct vcpu *v)
               (unsigned long)&get_cpu_info()->guest_cpu_user_regs.error_code);
 }
 
-void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr, int type)
+void cf_check vmx_clear_msr_intercept(struct vcpu *v, uint32_t msr, int type)
 {
     struct vmx_msr_bitmap *msr_bitmap = v->arch.hvm.vmx.msr_bitmap;
     struct domain *d = v->domain;
@@ -933,7 +933,7 @@ void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr, int type)
         ASSERT(!"MSR out of range for interception\n");
 }
 
-void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr, int type)
+void cf_check vmx_set_msr_intercept(struct vcpu *v, uint32_t msr, int type)
 {
     struct vmx_msr_bitmap *msr_bitmap = v->arch.hvm.vmx.msr_bitmap;
 
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 87c47c002c..d3f2b3add4 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2765,6 +2765,8 @@ static struct hvm_function_table __initdata_cf_clobber vmx_function_table = {
     .nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources,
     .update_vlapic_mode = vmx_vlapic_msr_changed,
     .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
+    .set_msr_intercept    = vmx_set_msr_intercept,
+    .clear_msr_intercept  = vmx_clear_msr_intercept,
     .enable_msr_interception = vmx_enable_msr_interception,
     .altp2m_vcpu_update_p2m = vmx_vcpu_update_eptp,
     .altp2m_vcpu_update_vmfunc_ve = vmx_vcpu_update_vmfunc_ve,
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h b/xen/arch/x86/include/asm/hvm/hvm.h
index f853e2f3e8..dd9aa42d0a 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -224,6 +224,8 @@ struct hvm_function_table {
                                 paddr_t *L1_gpa, unsigned int *page_order,
                                 uint8_t *p2m_acc, struct npfec npfec);
 
+    void (*set_msr_intercept)(struct vcpu *v, uint32_t msr, int flags);
+    void (*clear_msr_intercept)(struct vcpu *v, uint32_t msr, int flags);
     void (*enable_msr_interception)(struct domain *d, uint32_t msr);
 
     /* Alternate p2m */
@@ -658,6 +660,20 @@ static inline int nhvm_hap_walk_L1_p2m(
         v, L2_gpa, L1_gpa, page_order, p2m_acc, npfec);
 }
 
+static inline void hvm_set_msr_intercept(struct vcpu *v, uint32_t msr,
+                                         int flags)
+{
+    if ( hvm_funcs.set_msr_intercept )
+        alternative_vcall(hvm_funcs.set_msr_intercept, v, msr, flags);
+}
+
+static inline void hvm_clear_msr_intercept(struct vcpu *v, uint32_t msr,
+                                           int flags)
+{
+    if ( hvm_funcs.clear_msr_intercept )
+        alternative_vcall(hvm_funcs.clear_msr_intercept, v, msr, flags);
+}
+
 static inline void hvm_enable_msr_interception(struct domain *d, uint32_t msr)
 {
     alternative_vcall(hvm_funcs.enable_msr_interception, d, msr);
@@ -916,6 +932,18 @@ static inline void hvm_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
     ASSERT_UNREACHABLE();
 }
 
+static inline void hvm_set_msr_intercept(struct vcpu *v, uint32_t msr,
+                                         int flags)
+{
+    ASSERT_UNREACHABLE();
+}
+
+static inline void hvm_clear_msr_intercept(struct vcpu *v, uint32_t msr,
+                                           int flags)
+{
+    ASSERT_UNREACHABLE();
+}
+
 #define is_viridian_domain(d) ((void)(d), false)
 #define is_viridian_vcpu(v) ((void)(v), false)
 #define has_viridian_time_ref_count(d) ((void)(d), false)
diff --git a/xen/arch/x86/include/asm/hvm/svm/vmcb.h b/xen/arch/x86/include/asm/hvm/svm/vmcb.h
index ed2e55e5cf..dbe8ba89cc 100644
--- a/xen/arch/x86/include/asm/hvm/svm/vmcb.h
+++ b/xen/arch/x86/include/asm/hvm/svm/vmcb.h
@@ -585,8 +585,8 @@ void svm_destroy_vmcb(struct vcpu *v);
 
 void setup_vmcb_dump(void);
 
-void svm_set_msr_intercept(struct vcpu *v, uint32_t msr, int flags);
-void svm_clear_msr_intercept(struct vcpu *v, uint32_t msr, int flags);
+void cf_check svm_set_msr_intercept(struct vcpu *v, uint32_t msr, int flags);
+void cf_check svm_clear_msr_intercept(struct vcpu *v, uint32_t msr, int flags);
 #define svm_disable_intercept_for_msr(v, msr) \
     svm_clear_msr_intercept((v), (msr), MSR_RW)
 #define svm_enable_intercept_for_msr(v, msr) \
diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
index e08c506be5..f2880c8122 100644
--- a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
@@ -644,8 +644,8 @@ static inline int vmx_write_guest_msr(struct vcpu *v, uint32_t msr,
     return 0;
 }
 
-void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr, int type);
-void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr, int type);
+void cf_check vmx_clear_msr_intercept(struct vcpu *v, uint32_t msr, int type);
+void cf_check vmx_set_msr_intercept(struct vcpu *v, uint32_t msr, int type);
 void vmx_vmcs_switch(paddr_t from, paddr_t to);
 void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector);
 void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector);
-- 
2.37.2



  parent reply	other threads:[~2023-02-27  7:57 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-02-27  7:56 [PATCH 0/4] hvm: add hvm_funcs hooks for msr intercept handling Xenia Ragiadakou
2023-02-27  7:56 ` [PATCH 1/4] x86/vpmu: rename {svm,vmx}_vpmu_initialise to {amd,core2}_vpmu_initialise Xenia Ragiadakou
2023-02-28 13:54   ` Jan Beulich
2023-02-27  7:56 ` [PATCH 2/4] x86/svm: split svm_intercept_msr() into svm_{set,clear}_msr_intercept() Xenia Ragiadakou
2023-02-28 14:20   ` Jan Beulich
2023-02-28 15:05     ` Xenia Ragiadakou
2023-02-28 15:10       ` Jan Beulich
2023-02-28 15:17         ` Xenia Ragiadakou
2023-02-28 16:14           ` Jan Beulich
2023-02-27  7:56 ` [PATCH 3/4] x86/vmx: replace enum vmx_msr_intercept_type with the msr access flags Xenia Ragiadakou
2023-02-28 14:31   ` Jan Beulich
2023-02-28 14:34     ` Jan Beulich
2023-02-28 15:07       ` Xenia Ragiadakou
2023-02-27  7:56 ` Xenia Ragiadakou [this message]
2023-02-28 14:58   ` [PATCH 4/4] x86/hvm: create hvm_funcs for {svm,vmx}_{set,clear}_msr_intercept() Jan Beulich
2023-02-28 20:14     ` Xenia Ragiadakou
2023-03-01  9:14       ` Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230227075652.3782973-5-burzalodowa@gmail.com \
    --to=burzalodowa@gmail.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=jun.nakajima@intel.com \
    --cc=kevin.tian@intel.com \
    --cc=roger.pau@citrix.com \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.