From mboxrd@z Thu Jan 1 00:00:00 1970 From: "Jan Beulich" Subject: [PATCH 13/17] x86/PV: split out dealing with MSRs from privileged instruction handling Date: Thu, 08 Sep 2016 07:18:08 -0600 Message-ID: <57D18130020000780010D1DA@prv-mh.provo.novell.com> References: <57D17C78020000780010D127@prv-mh.provo.novell.com> Mime-Version: 1.0 Content-Type: multipart/mixed; boundary="=__Part8BBD0000.1__=" Return-path: Received: from mail6.bemta3.messagelabs.com ([195.245.230.39]) by lists.xenproject.org with esmtp (Exim 4.84_2) (envelope-from ) id 1bhzDb-0001eu-I4 for xen-devel@lists.xenproject.org; Thu, 08 Sep 2016 13:18:15 +0000 In-Reply-To: <57D17C78020000780010D127@prv-mh.provo.novell.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Errors-To: xen-devel-bounces@lists.xen.org Sender: "Xen-devel" To: xen-devel Cc: Andrew Cooper List-Id: xen-devel@lists.xenproject.org This is a MIME message. If you are reading this text, you may want to consider changing to a mail reader or gateway that understands how to properly handle MIME multipart messages. --=__Part8BBD0000.1__= Content-Type: text/plain; charset=US-ASCII Content-Transfer-Encoding: quoted-printable Content-Disposition: inline This is in preparation for using the generic emulator here. Signed-off-by: Jan Beulich --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -2373,6 +2373,332 @@ static inline uint64_t guest_misc_enable return val; } =20 +static inline bool is_cpufreq_controller(const struct domain *d) +{ + return ((cpufreq_controller =3D=3D FREQCTL_dom0_kernel) && + is_hardware_domain(d)); +} + +static int priv_op_read_msr(unsigned int reg, uint64_t *val, + struct x86_emulate_ctxt *ctxt) +{ + const struct vcpu *curr =3D current; + const struct domain *currd =3D curr->domain; + bool vpmu_msr =3D false; + + switch ( reg ) + { + int rc; + + case MSR_FS_BASE: + if ( is_pv_32bit_domain(currd) ) + break; + *val =3D cpu_has_fsgsbase ? __rdfsbase() : curr->arch.pv_vcpu.fs_b= ase; + return X86EMUL_OKAY; + + case MSR_GS_BASE: + if ( is_pv_32bit_domain(currd) ) + break; + *val =3D cpu_has_fsgsbase ? __rdgsbase() + : curr->arch.pv_vcpu.gs_base_kernel; + return X86EMUL_OKAY; + + case MSR_SHADOW_GS_BASE: + if ( is_pv_32bit_domain(currd) ) + break; + *val =3D curr->arch.pv_vcpu.gs_base_user; + return X86EMUL_OKAY; + + case MSR_K7_FID_VID_CTL: + case MSR_K7_FID_VID_STATUS: + case MSR_K8_PSTATE_LIMIT: + case MSR_K8_PSTATE_CTRL: + case MSR_K8_PSTATE_STATUS: + case MSR_K8_PSTATE0: + case MSR_K8_PSTATE1: + case MSR_K8_PSTATE2: + case MSR_K8_PSTATE3: + case MSR_K8_PSTATE4: + case MSR_K8_PSTATE5: + case MSR_K8_PSTATE6: + case MSR_K8_PSTATE7: + if ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_AMD ) + break; + if ( unlikely(is_cpufreq_controller(currd)) ) + goto normal; + *val =3D 0; + return X86EMUL_OKAY; + + case MSR_IA32_UCODE_REV: + BUILD_BUG_ON(MSR_IA32_UCODE_REV !=3D MSR_AMD_PATCHLEVEL); + if ( boot_cpu_data.x86_vendor =3D=3D X86_VENDOR_INTEL ) + { + if ( wrmsr_safe(MSR_IA32_UCODE_REV, 0) ) + break; + sync_core(); + } + goto normal; + + case MSR_IA32_MISC_ENABLE: + if ( rdmsr_safe(reg, *val) ) + break; + *val =3D guest_misc_enable(*val); + return X86EMUL_OKAY; + + case MSR_AMD64_DR0_ADDRESS_MASK: + if ( !boot_cpu_has(X86_FEATURE_DBEXT) ) + break; + *val =3D curr->arch.pv_vcpu.dr_mask[0]; + return X86EMUL_OKAY; + + case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: + if ( !boot_cpu_has(X86_FEATURE_DBEXT) ) + break; + *val =3D curr->arch.pv_vcpu.dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MA= SK + 1]; + return X86EMUL_OKAY; + + case MSR_IA32_PERF_CAPABILITIES: + /* No extra capabilities are supported. */ + *val =3D 0; + return X86EMUL_OKAY; + + case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7): + case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3): + case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2: + case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL: + if ( boot_cpu_data.x86_vendor =3D=3D X86_VENDOR_INTEL ) + { + vpmu_msr =3D true; + /* fall through */ + case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5: + case MSR_K7_EVNTSEL0...MSR_K7_PERFCTR3: + if ( vpmu_msr || (boot_cpu_data.x86_vendor =3D=3D X86_VENDOR_A= MD) ) + { + /* Don't leak PMU MSRs to unprivileged domains. */ + if ( (vpmu_mode & XENPMU_MODE_ALL) && + !is_hardware_domain(currd) ) + *val =3D 0; + else if ( vpmu_do_rdmsr(reg, val) ) + break; + return X86EMUL_OKAY; + } + } + /* fall through */ + default: + if ( rdmsr_hypervisor_regs(reg, val) ) + return X86EMUL_OKAY; + + rc =3D vmce_rdmsr(reg, val); + if ( rc < 0 ) + break; + if ( rc ) + return X86EMUL_OKAY; + /* fall through */ + case MSR_EFER: + normal: + /* Everyone can read the MSR space. */ + /* gdprintk(XENLOG_WARNING, "Domain attempted RDMSR %08x\n", = reg); */ + if ( rdmsr_safe(reg, *val) ) + break; + return X86EMUL_OKAY; + } + + return X86EMUL_UNHANDLEABLE; +} + +#include "x86_64/mmconfig.h" + +static int priv_op_write_msr(unsigned int reg, uint64_t val, + struct x86_emulate_ctxt *ctxt) +{ + struct vcpu *curr =3D current; + const struct domain *currd =3D curr->domain; + bool vpmu_msr =3D false; + + switch ( reg ) + { + uint64_t temp; + int rc; + + case MSR_FS_BASE: + if ( is_pv_32bit_domain(currd) ) + break; + wrfsbase(val); + curr->arch.pv_vcpu.fs_base =3D val; + return X86EMUL_OKAY; + + case MSR_GS_BASE: + if ( is_pv_32bit_domain(currd) ) + break; + wrgsbase(val); + curr->arch.pv_vcpu.gs_base_kernel =3D val; + return X86EMUL_OKAY; + + case MSR_SHADOW_GS_BASE: + if ( is_pv_32bit_domain(currd) || + wrmsr_safe(MSR_SHADOW_GS_BASE, val) ) + break; + curr->arch.pv_vcpu.gs_base_user =3D val; + return X86EMUL_OKAY; + + case MSR_K7_FID_VID_STATUS: + case MSR_K7_FID_VID_CTL: + case MSR_K8_PSTATE_LIMIT: + case MSR_K8_PSTATE_CTRL: + case MSR_K8_PSTATE_STATUS: + case MSR_K8_PSTATE0: + case MSR_K8_PSTATE1: + case MSR_K8_PSTATE2: + case MSR_K8_PSTATE3: + case MSR_K8_PSTATE4: + case MSR_K8_PSTATE5: + case MSR_K8_PSTATE6: + case MSR_K8_PSTATE7: + case MSR_K8_HWCR: + if ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_AMD ) + break; + if ( likely(!is_cpufreq_controller(currd)) || + wrmsr_safe(reg, val) =3D=3D 0 ) + return X86EMUL_OKAY; + break; + + case MSR_AMD64_NB_CFG: + if ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_AMD || + boot_cpu_data.x86 < 0x10 || boot_cpu_data.x86 > 0x17 ) + break; + if ( !is_hardware_domain(currd) || !is_pinned_vcpu(curr) ) + return X86EMUL_OKAY; + if ( (rdmsr_safe(MSR_AMD64_NB_CFG, temp) !=3D 0) || + ((val ^ temp) & ~(1ULL << AMD64_NB_CFG_CF8_EXT_ENABLE_BIT)) = ) + goto invalid; + if ( wrmsr_safe(MSR_AMD64_NB_CFG, val) =3D=3D 0 ) + return X86EMUL_OKAY; + break; + + case MSR_FAM10H_MMIO_CONF_BASE: + if ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_AMD || + boot_cpu_data.x86 < 0x10 || boot_cpu_data.x86 > 0x17 ) + break; + if ( !is_hardware_domain(currd) || !is_pinned_vcpu(curr) ) + return X86EMUL_OKAY; + if ( rdmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, temp) !=3D 0 ) + break; + if ( (pci_probe & PCI_PROBE_MASK) =3D=3D PCI_PROBE_MMCONF ? + temp !=3D val : + ((temp ^ val) & + ~(FAM10H_MMIO_CONF_ENABLE | + (FAM10H_MMIO_CONF_BUSRANGE_MASK << + FAM10H_MMIO_CONF_BUSRANGE_SHIFT) | + ((u64)FAM10H_MMIO_CONF_BASE_MASK << + FAM10H_MMIO_CONF_BASE_SHIFT))) ) + goto invalid; + if ( wrmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, val) =3D=3D 0 ) + return X86EMUL_OKAY; + break; + + case MSR_IA32_UCODE_REV: + if ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_INTEL ) + break; + if ( !is_hardware_domain(currd) || !is_pinned_vcpu(curr) ) + return X86EMUL_OKAY; + if ( rdmsr_safe(reg, temp) ) + break; + if ( val ) + goto invalid; + return X86EMUL_OKAY; + + case MSR_IA32_MISC_ENABLE: + if ( rdmsr_safe(reg, temp) ) + break; + if ( val !=3D guest_misc_enable(temp) ) + goto invalid; + return X86EMUL_OKAY; + + case MSR_IA32_MPERF: + case MSR_IA32_APERF: + if ( (boot_cpu_data.x86_vendor !=3D X86_VENDOR_INTEL) && + (boot_cpu_data.x86_vendor !=3D X86_VENDOR_AMD) ) + break; + if ( likely(!is_cpufreq_controller(currd)) || + wrmsr_safe(reg, val) =3D=3D 0 ) + return X86EMUL_OKAY; + break; + + case MSR_IA32_PERF_CTL: + if ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_INTEL ) + break; + if ( likely(!is_cpufreq_controller(currd)) || + wrmsr_safe(reg, val) =3D=3D 0 ) + return X86EMUL_OKAY; + break; + + case MSR_IA32_THERM_CONTROL: + case MSR_IA32_ENERGY_PERF_BIAS: + if ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_INTEL ) + break; + if ( !is_hardware_domain(currd) || !is_pinned_vcpu(curr) || + wrmsr_safe(reg, val) =3D=3D 0 ) + return X86EMUL_OKAY; + break; + + case MSR_AMD64_DR0_ADDRESS_MASK: + if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (val >> 32) ) + break; + curr->arch.pv_vcpu.dr_mask[0] =3D val; + if ( curr->arch.debugreg[7] & DR7_ACTIVE_MASK ) + wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, val); + return X86EMUL_OKAY; + + case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: + if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (val >> 32) ) + break; + curr->arch.pv_vcpu.dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1] = =3D val; + if ( curr->arch.debugreg[7] & DR7_ACTIVE_MASK ) + wrmsrl(reg, val); + return X86EMUL_OKAY; + + case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7): + case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3): + case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2: + case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL: + if ( boot_cpu_data.x86_vendor =3D=3D X86_VENDOR_INTEL ) + { + vpmu_msr =3D true; + case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5: + case MSR_K7_EVNTSEL0...MSR_K7_PERFCTR3: + if ( vpmu_msr || (boot_cpu_data.x86_vendor =3D=3D X86_VENDOR_A= MD) ) + { + if ( (vpmu_mode & XENPMU_MODE_ALL) && + !is_hardware_domain(currd) ) + return X86EMUL_OKAY; + + if ( vpmu_do_wrmsr(reg, val, 0) ) + break; + return X86EMUL_OKAY; + } + } + /* fall through */ + default: + if ( wrmsr_hypervisor_regs(reg, val) =3D=3D 1 ) + return X86EMUL_OKAY; + + rc =3D vmce_wrmsr(reg, val); + if ( rc < 0 ) + break; + if ( rc ) + return X86EMUL_OKAY; + + if ( (rdmsr_safe(reg, temp) !=3D 0) || (val !=3D temp) ) + invalid: + gdprintk(XENLOG_WARNING, + "Domain attempted WRMSR %08x from 0x%016"PRIx64" to = 0x%016"PRIx64"\n", + reg, temp, val); + return X86EMUL_OKAY; + } + + return X86EMUL_UNHANDLEABLE; +} + /* Instruction fetch with error handling. */ #define insn_fetch(type, base, eip, limit) = \ ({ unsigned long _rc, _ptr =3D (base) + (eip); = \ @@ -2388,14 +2714,6 @@ static inline uint64_t guest_misc_enable } = \ (eip) +=3D sizeof(_x); _x; }) =20 -static int is_cpufreq_controller(struct domain *d) -{ - return ((cpufreq_controller =3D=3D FREQCTL_dom0_kernel) && - is_hardware_domain(d)); -} - -#include "x86_64/mmconfig.h" - static int emulate_privileged_op(struct cpu_user_regs *regs) { struct vcpu *v =3D current; @@ -2420,7 +2738,6 @@ static int emulate_privileged_op(struct char *io_emul_stub =3D NULL; void (*io_emul)(struct cpu_user_regs *); uint64_t val; - bool_t vpmu_msr; =20 if ( !read_descriptor(regs->cs, v, &code_base, &code_limit, &ar, 1) ) goto fail; @@ -2821,188 +3138,11 @@ static int emulate_privileged_op(struct goto fail; break; =20 - case 0x30: /* WRMSR */ { - uint32_t eax =3D regs->eax; - uint32_t edx =3D regs->edx; - uint64_t msr_content =3D ((uint64_t)edx << 32) | eax; - vpmu_msr =3D 0; - switch ( regs->_ecx ) - { - case MSR_FS_BASE: - if ( is_pv_32bit_domain(currd) ) - goto fail; - wrfsbase(msr_content); - v->arch.pv_vcpu.fs_base =3D msr_content; - break; - case MSR_GS_BASE: - if ( is_pv_32bit_domain(currd) ) - goto fail; - wrgsbase(msr_content); - v->arch.pv_vcpu.gs_base_kernel =3D msr_content; - break; - case MSR_SHADOW_GS_BASE: - if ( is_pv_32bit_domain(currd) ) - goto fail; - if ( wrmsr_safe(MSR_SHADOW_GS_BASE, msr_content) ) - goto fail; - v->arch.pv_vcpu.gs_base_user =3D msr_content; - break; - case MSR_K7_FID_VID_STATUS: - case MSR_K7_FID_VID_CTL: - case MSR_K8_PSTATE_LIMIT: - case MSR_K8_PSTATE_CTRL: - case MSR_K8_PSTATE_STATUS: - case MSR_K8_PSTATE0: - case MSR_K8_PSTATE1: - case MSR_K8_PSTATE2: - case MSR_K8_PSTATE3: - case MSR_K8_PSTATE4: - case MSR_K8_PSTATE5: - case MSR_K8_PSTATE6: - case MSR_K8_PSTATE7: - case MSR_K8_HWCR: - if ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_AMD ) - goto fail; - if ( !is_cpufreq_controller(currd) ) - break; - if ( wrmsr_safe(regs->ecx, msr_content) !=3D 0 ) - goto fail; - break; - case MSR_AMD64_NB_CFG: - if ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_AMD || - boot_cpu_data.x86 < 0x10 || boot_cpu_data.x86 > 0x17 ) - goto fail; - if ( !is_hardware_domain(currd) || !is_pinned_vcpu(v) ) - break; - if ( (rdmsr_safe(MSR_AMD64_NB_CFG, val) !=3D 0) || - (eax !=3D (uint32_t)val) || - ((edx ^ (val >> 32)) & ~(1 << (AMD64_NB_CFG_CF8_EXT_ENABL= E_BIT - 32))) ) - goto invalid; - if ( wrmsr_safe(MSR_AMD64_NB_CFG, msr_content) !=3D 0 ) - goto fail; - break; - case MSR_FAM10H_MMIO_CONF_BASE: - if ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_AMD || - boot_cpu_data.x86 < 0x10 || boot_cpu_data.x86 > 0x17 ) - goto fail; - if ( !is_hardware_domain(currd) || !is_pinned_vcpu(v) ) - break; - if ( (rdmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, val) !=3D 0) ) - goto fail; - if ( - (pci_probe & PCI_PROBE_MASK) =3D=3D PCI_PROBE_MMCONF ? - val !=3D msr_content : - ((val ^ msr_content) & - ~( FAM10H_MMIO_CONF_ENABLE | - (FAM10H_MMIO_CONF_BUSRANGE_MASK << - FAM10H_MMIO_CONF_BUSRANGE_SHIFT) | - ((u64)FAM10H_MMIO_CONF_BASE_MASK << - FAM10H_MMIO_CONF_BASE_SHIFT))) ) - goto invalid; - if ( wrmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, msr_content) !=3D = 0 ) - goto fail; - break; - case MSR_IA32_UCODE_REV: - if ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_INTEL ) - goto fail; - if ( !is_hardware_domain(currd) || !is_pinned_vcpu(v) ) - break; - if ( rdmsr_safe(regs->ecx, val) ) - goto fail; - if ( msr_content ) - goto invalid; - break; - case MSR_IA32_MISC_ENABLE: - if ( rdmsr_safe(regs->ecx, val) ) - goto fail; - val =3D guest_misc_enable(val); - if ( msr_content !=3D val ) - goto invalid; - break; - case MSR_IA32_MPERF: - case MSR_IA32_APERF: - if (( boot_cpu_data.x86_vendor !=3D X86_VENDOR_INTEL ) && - ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_AMD ) ) - goto fail; - if ( !is_cpufreq_controller(currd) ) - break; - if ( wrmsr_safe(regs->ecx, msr_content ) !=3D 0 ) - goto fail; - break; - case MSR_IA32_PERF_CTL: - if ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_INTEL ) - goto fail; - if ( !is_cpufreq_controller(currd) ) - break; - if ( wrmsr_safe(regs->ecx, msr_content) !=3D 0 ) - goto fail; - break; - case MSR_IA32_THERM_CONTROL: - case MSR_IA32_ENERGY_PERF_BIAS: - if ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_INTEL ) - goto fail; - if ( !is_hardware_domain(currd) || !is_pinned_vcpu(v) ) - break; - if ( wrmsr_safe(regs->ecx, msr_content) !=3D 0 ) - goto fail; - break; - - case MSR_AMD64_DR0_ADDRESS_MASK: - if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (msr_content >> 32) = ) - goto fail; - v->arch.pv_vcpu.dr_mask[0] =3D msr_content; - if ( v->arch.debugreg[7] & DR7_ACTIVE_MASK ) - wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, msr_content); - break; - case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: - if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (msr_content >> 32) = ) - goto fail; - v->arch.pv_vcpu.dr_mask - [regs->_ecx - MSR_AMD64_DR1_ADDRESS_MASK + 1] =3D = msr_content; - if ( v->arch.debugreg[7] & DR7_ACTIVE_MASK ) - wrmsrl(regs->_ecx, msr_content); - break; - case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7): - case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3): - case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2: - case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:= - if ( boot_cpu_data.x86_vendor =3D=3D X86_VENDOR_INTEL ) - { - vpmu_msr =3D 1; - case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5: - case MSR_K7_EVNTSEL0...MSR_K7_PERFCTR3: - if ( vpmu_msr || (boot_cpu_data.x86_vendor =3D=3D = X86_VENDOR_AMD) ) - { - if ( (vpmu_mode & XENPMU_MODE_ALL) && - !is_hardware_domain(v->domain) ) - break; - - if ( vpmu_do_wrmsr(regs->ecx, msr_content, 0) ) - goto fail; - break; - } - } - /*FALLTHROUGH*/ - - default: - if ( wrmsr_hypervisor_regs(regs->ecx, msr_content) =3D=3D 1 ) - break; - - rc =3D vmce_wrmsr(regs->ecx, msr_content); - if ( rc < 0 ) - goto fail; - if ( rc ) - break; - - if ( (rdmsr_safe(regs->ecx, val) !=3D 0) || (msr_content !=3D = val) ) - invalid: - gdprintk(XENLOG_WARNING, "Domain attempted WRMSR %p from = " - "0x%016"PRIx64" to 0x%016"PRIx64".\n", - _p(regs->ecx), val, msr_content); - break; - } + case 0x30: /* WRMSR */ + if ( priv_op_write_msr(regs->_ecx, (regs->rdx << 32) | regs->_eax,= + NULL) !=3D X86EMUL_OKAY ) + goto fail; break; - } =20 case 0x31: /* RDTSC */ if ( (v->arch.pv_vcpu.ctrlreg[4] & X86_CR4_TSD) && @@ -3018,130 +3158,11 @@ static int emulate_privileged_op(struct break; =20 case 0x32: /* RDMSR */ - vpmu_msr =3D 0; - switch ( regs->_ecx ) - { - case MSR_FS_BASE: - if ( is_pv_32bit_domain(currd) ) - goto fail; - val =3D cpu_has_fsgsbase ? __rdfsbase() : v->arch.pv_vcpu.fs_b= ase; - goto rdmsr_writeback; - case MSR_GS_BASE: - if ( is_pv_32bit_domain(currd) ) - goto fail; - val =3D cpu_has_fsgsbase ? __rdgsbase() - : v->arch.pv_vcpu.gs_base_kernel; - goto rdmsr_writeback; - case MSR_SHADOW_GS_BASE: - if ( is_pv_32bit_domain(currd) ) - goto fail; - val =3D v->arch.pv_vcpu.gs_base_user; - goto rdmsr_writeback; - case MSR_K7_FID_VID_CTL: - case MSR_K7_FID_VID_STATUS: - case MSR_K8_PSTATE_LIMIT: - case MSR_K8_PSTATE_CTRL: - case MSR_K8_PSTATE_STATUS: - case MSR_K8_PSTATE0: - case MSR_K8_PSTATE1: - case MSR_K8_PSTATE2: - case MSR_K8_PSTATE3: - case MSR_K8_PSTATE4: - case MSR_K8_PSTATE5: - case MSR_K8_PSTATE6: - case MSR_K8_PSTATE7: - if ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_AMD ) - goto fail; - if ( !is_cpufreq_controller(currd) ) - { - regs->eax =3D regs->edx =3D 0; - break; - } - goto rdmsr_normal; - case MSR_IA32_UCODE_REV: - BUILD_BUG_ON(MSR_IA32_UCODE_REV !=3D MSR_AMD_PATCHLEVEL); - if ( boot_cpu_data.x86_vendor =3D=3D X86_VENDOR_INTEL ) - { - if ( wrmsr_safe(MSR_IA32_UCODE_REV, 0) ) - goto fail; - sync_core(); - } - goto rdmsr_normal; - case MSR_IA32_MISC_ENABLE: - if ( rdmsr_safe(regs->ecx, val) ) - goto fail; - val =3D guest_misc_enable(val); - goto rdmsr_writeback; - - case MSR_AMD64_DR0_ADDRESS_MASK: - if ( !boot_cpu_has(X86_FEATURE_DBEXT) ) - goto fail; - regs->eax =3D v->arch.pv_vcpu.dr_mask[0]; - regs->edx =3D 0; - break; - case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: - if ( !boot_cpu_has(X86_FEATURE_DBEXT) ) - goto fail; - regs->eax =3D v->arch.pv_vcpu.dr_mask - [regs->_ecx - MSR_AMD64_DR1_ADDRESS_MASK + = 1]; - regs->edx =3D 0; - break; - case MSR_IA32_PERF_CAPABILITIES: - /* No extra capabilities are supported */ - regs->eax =3D regs->edx =3D 0; - break; - case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7): - case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3): - case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2: - case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:= - if ( boot_cpu_data.x86_vendor =3D=3D X86_VENDOR_INTEL ) - { - vpmu_msr =3D 1; - case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5: - case MSR_K7_EVNTSEL0...MSR_K7_PERFCTR3: - if ( vpmu_msr || (boot_cpu_data.x86_vendor =3D=3D = X86_VENDOR_AMD) ) - { - - if ( (vpmu_mode & XENPMU_MODE_ALL) && - !is_hardware_domain(v->domain) ) - { - /* Don't leak PMU MSRs to unprivileged domains */ - regs->eax =3D regs->edx =3D 0; - break; - } - - if ( vpmu_do_rdmsr(regs->ecx, &val) ) - goto fail; - - regs->eax =3D (uint32_t)val; - regs->edx =3D (uint32_t)(val >> 32); - break; - } - } - /*FALLTHROUGH*/ - - default: - if ( rdmsr_hypervisor_regs(regs->ecx, &val) ) - goto rdmsr_writeback; - - rc =3D vmce_rdmsr(regs->ecx, &val); - if ( rc < 0 ) - goto fail; - if ( rc ) - goto rdmsr_writeback; - - case MSR_EFER: - rdmsr_normal: - /* Everyone can read the MSR space. */ - /* gdprintk(XENLOG_WARNING,"Domain attempted RDMSR %p.\n", - _p(regs->ecx));*/ - if ( rdmsr_safe(regs->ecx, val) ) - goto fail; + if ( priv_op_read_msr(regs->_ecx, &val, NULL) !=3D X86EMUL_OKAY ) + goto fail; rdmsr_writeback: - regs->eax =3D (uint32_t)val; - regs->edx =3D (uint32_t)(val >> 32); - break; - } + regs->eax =3D (uint32_t)val; + regs->edx =3D (uint32_t)(val >> 32); break; =20 case 0xa2: /* CPUID */ --=__Part8BBD0000.1__= Content-Type: text/plain; name="x86-PV-priv-op-split-MSR.patch" Content-Transfer-Encoding: quoted-printable Content-Disposition: attachment; filename="x86-PV-priv-op-split-MSR.patch" x86/PV: split out dealing with MSRs from privileged instruction handling=0A= =0AThis is in preparation for using the generic emulator here.=0A=0ASigned-= off-by: Jan Beulich =0A=0A--- a/xen/arch/x86/traps.c=0A+= ++ b/xen/arch/x86/traps.c=0A@@ -2373,6 +2373,332 @@ static inline uint64_t = guest_misc_enable=0A return val;=0A }=0A =0A+static inline bool = is_cpufreq_controller(const struct domain *d)=0A+{=0A+ return ((cpufreq_= controller =3D=3D FREQCTL_dom0_kernel) &&=0A+ is_hardware_domain= (d));=0A+}=0A+=0A+static int priv_op_read_msr(unsigned int reg, uint64_t = *val,=0A+ struct x86_emulate_ctxt *ctxt)=0A+{=0A= + const struct vcpu *curr =3D current;=0A+ const struct domain = *currd =3D curr->domain;=0A+ bool vpmu_msr =3D false;=0A+=0A+ switch = ( reg )=0A+ {=0A+ int rc;=0A+=0A+ case MSR_FS_BASE:=0A+ = if ( is_pv_32bit_domain(currd) )=0A+ break;=0A+ *val = =3D cpu_has_fsgsbase ? __rdfsbase() : curr->arch.pv_vcpu.fs_base;=0A+ = return X86EMUL_OKAY;=0A+=0A+ case MSR_GS_BASE:=0A+ if ( = is_pv_32bit_domain(currd) )=0A+ break;=0A+ *val =3D = cpu_has_fsgsbase ? __rdgsbase()=0A+ : = curr->arch.pv_vcpu.gs_base_kernel;=0A+ return X86EMUL_OKAY;=0A+=0A+ = case MSR_SHADOW_GS_BASE:=0A+ if ( is_pv_32bit_domain(currd) = )=0A+ break;=0A+ *val =3D curr->arch.pv_vcpu.gs_base_user= ;=0A+ return X86EMUL_OKAY;=0A+=0A+ case MSR_K7_FID_VID_CTL:=0A+ = case MSR_K7_FID_VID_STATUS:=0A+ case MSR_K8_PSTATE_LIMIT:=0A+ case = MSR_K8_PSTATE_CTRL:=0A+ case MSR_K8_PSTATE_STATUS:=0A+ case = MSR_K8_PSTATE0:=0A+ case MSR_K8_PSTATE1:=0A+ case MSR_K8_PSTATE2:=0A+= case MSR_K8_PSTATE3:=0A+ case MSR_K8_PSTATE4:=0A+ case MSR_K8_PST= ATE5:=0A+ case MSR_K8_PSTATE6:=0A+ case MSR_K8_PSTATE7:=0A+ = if ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_AMD )=0A+ = break;=0A+ if ( unlikely(is_cpufreq_controller(currd)) )=0A+ = goto normal;=0A+ *val =3D 0;=0A+ return X86EMUL_OKAY;=0A+= =0A+ case MSR_IA32_UCODE_REV:=0A+ BUILD_BUG_ON(MSR_IA32_UCODE_REV= !=3D MSR_AMD_PATCHLEVEL);=0A+ if ( boot_cpu_data.x86_vendor =3D=3D = X86_VENDOR_INTEL )=0A+ {=0A+ if ( wrmsr_safe(MSR_IA32_UCO= DE_REV, 0) )=0A+ break;=0A+ sync_core();=0A+ = }=0A+ goto normal;=0A+=0A+ case MSR_IA32_MISC_ENABLE:=0A+ = if ( rdmsr_safe(reg, *val) )=0A+ break;=0A+ *val =3D = guest_misc_enable(*val);=0A+ return X86EMUL_OKAY;=0A+=0A+ case = MSR_AMD64_DR0_ADDRESS_MASK:=0A+ if ( !boot_cpu_has(X86_FEATURE_DBEXT= ) )=0A+ break;=0A+ *val =3D curr->arch.pv_vcpu.dr_mask[0]= ;=0A+ return X86EMUL_OKAY;=0A+=0A+ case MSR_AMD64_DR1_ADDRESS_MAS= K ... MSR_AMD64_DR3_ADDRESS_MASK:=0A+ if ( !boot_cpu_has(X86_FEATURE= _DBEXT) )=0A+ break;=0A+ *val =3D curr->arch.pv_vcpu.dr_m= ask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1];=0A+ return X86EMUL_OKAY;= =0A+=0A+ case MSR_IA32_PERF_CAPABILITIES:=0A+ /* No extra = capabilities are supported. */=0A+ *val =3D 0;=0A+ return = X86EMUL_OKAY;=0A+=0A+ case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):=0A+ = case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):=0A+ case MSR_CORE_PERF_FIXE= D_CTR0...MSR_CORE_PERF_FIXED_CTR2:=0A+ case MSR_CORE_PERF_FIXED_CTR_CTRL= ...MSR_CORE_PERF_GLOBAL_OVF_CTRL:=0A+ if ( boot_cpu_data.x86_vendor = =3D=3D X86_VENDOR_INTEL )=0A+ {=0A+ vpmu_msr =3D = true;=0A+ /* fall through */=0A+ case MSR_AMD_FAM15H_EVNTSEL0= ...MSR_AMD_FAM15H_PERFCTR5:=0A+ case MSR_K7_EVNTSEL0...MSR_K7_PERFCTR3:= =0A+ if ( vpmu_msr || (boot_cpu_data.x86_vendor =3D=3D = X86_VENDOR_AMD) )=0A+ {=0A+ /* Don't leak PMU = MSRs to unprivileged domains. */=0A+ if ( (vpmu_mode & = XENPMU_MODE_ALL) &&=0A+ !is_hardware_domain(currd) = )=0A+ *val =3D 0;=0A+ else if ( = vpmu_do_rdmsr(reg, val) )=0A+ break;=0A+ = return X86EMUL_OKAY;=0A+ }=0A+ }=0A+ /* fall = through */=0A+ default:=0A+ if ( rdmsr_hypervisor_regs(reg, val) = )=0A+ return X86EMUL_OKAY;=0A+=0A+ rc =3D vmce_rdmsr(reg,= val);=0A+ if ( rc < 0 )=0A+ break;=0A+ if ( rc = )=0A+ return X86EMUL_OKAY;=0A+ /* fall through */=0A+ = case MSR_EFER:=0A+ normal:=0A+ /* Everyone can read the MSR = space. */=0A+ /* gdprintk(XENLOG_WARNING, "Domain attempted RDMSR = %08x\n", reg); */=0A+ if ( rdmsr_safe(reg, *val) )=0A+ = break;=0A+ return X86EMUL_OKAY;=0A+ }=0A+=0A+ return = X86EMUL_UNHANDLEABLE;=0A+}=0A+=0A+#include "x86_64/mmconfig.h"=0A+=0A+stati= c int priv_op_write_msr(unsigned int reg, uint64_t val,=0A+ = struct x86_emulate_ctxt *ctxt)=0A+{=0A+ struct vcpu *curr = =3D current;=0A+ const struct domain *currd =3D curr->domain;=0A+ = bool vpmu_msr =3D false;=0A+=0A+ switch ( reg )=0A+ {=0A+ = uint64_t temp;=0A+ int rc;=0A+=0A+ case MSR_FS_BASE:=0A+ = if ( is_pv_32bit_domain(currd) )=0A+ break;=0A+ = wrfsbase(val);=0A+ curr->arch.pv_vcpu.fs_base =3D val;=0A+ = return X86EMUL_OKAY;=0A+=0A+ case MSR_GS_BASE:=0A+ if ( = is_pv_32bit_domain(currd) )=0A+ break;=0A+ wrgsbase(val);= =0A+ curr->arch.pv_vcpu.gs_base_kernel =3D val;=0A+ return = X86EMUL_OKAY;=0A+=0A+ case MSR_SHADOW_GS_BASE:=0A+ if ( = is_pv_32bit_domain(currd) ||=0A+ wrmsr_safe(MSR_SHADOW_GS_BASE,= val) )=0A+ break;=0A+ curr->arch.pv_vcpu.gs_base_user = =3D val;=0A+ return X86EMUL_OKAY;=0A+=0A+ case MSR_K7_FID_VID_STA= TUS:=0A+ case MSR_K7_FID_VID_CTL:=0A+ case MSR_K8_PSTATE_LIMIT:=0A+ = case MSR_K8_PSTATE_CTRL:=0A+ case MSR_K8_PSTATE_STATUS:=0A+ case = MSR_K8_PSTATE0:=0A+ case MSR_K8_PSTATE1:=0A+ case MSR_K8_PSTATE2:=0A+= case MSR_K8_PSTATE3:=0A+ case MSR_K8_PSTATE4:=0A+ case MSR_K8_PST= ATE5:=0A+ case MSR_K8_PSTATE6:=0A+ case MSR_K8_PSTATE7:=0A+ case = MSR_K8_HWCR:=0A+ if ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_AMD = )=0A+ break;=0A+ if ( likely(!is_cpufreq_controller(currd= )) ||=0A+ wrmsr_safe(reg, val) =3D=3D 0 )=0A+ = return X86EMUL_OKAY;=0A+ break;=0A+=0A+ case MSR_AMD64_NB_CFG:=0A= + if ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_AMD ||=0A+ = boot_cpu_data.x86 < 0x10 || boot_cpu_data.x86 > 0x17 )=0A+ = break;=0A+ if ( !is_hardware_domain(currd) || !is_pinned_vcpu(curr) = )=0A+ return X86EMUL_OKAY;=0A+ if ( (rdmsr_safe(MSR_AMD64= _NB_CFG, temp) !=3D 0) ||=0A+ ((val ^ temp) & ~(1ULL << = AMD64_NB_CFG_CF8_EXT_ENABLE_BIT)) )=0A+ goto invalid;=0A+ = if ( wrmsr_safe(MSR_AMD64_NB_CFG, val) =3D=3D 0 )=0A+ return = X86EMUL_OKAY;=0A+ break;=0A+=0A+ case MSR_FAM10H_MMIO_CONF_BASE:= =0A+ if ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_AMD ||=0A+ = boot_cpu_data.x86 < 0x10 || boot_cpu_data.x86 > 0x17 )=0A+ = break;=0A+ if ( !is_hardware_domain(currd) || !is_pinned_vcpu(curr)= )=0A+ return X86EMUL_OKAY;=0A+ if ( rdmsr_safe(MSR_FAM10= H_MMIO_CONF_BASE, temp) !=3D 0 )=0A+ break;=0A+ if ( = (pci_probe & PCI_PROBE_MASK) =3D=3D PCI_PROBE_MMCONF ?=0A+ = temp !=3D val :=0A+ ((temp ^ val) &=0A+ ~(FAM10H_M= MIO_CONF_ENABLE |=0A+ (FAM10H_MMIO_CONF_BUSRANGE_MASK = <<=0A+ FAM10H_MMIO_CONF_BUSRANGE_SHIFT) |=0A+ = ((u64)FAM10H_MMIO_CONF_BASE_MASK <<=0A+ FAM10H_MMIO_CONF_= BASE_SHIFT))) )=0A+ goto invalid;=0A+ if ( wrmsr_safe(MSR= _FAM10H_MMIO_CONF_BASE, val) =3D=3D 0 )=0A+ return X86EMUL_OKAY;= =0A+ break;=0A+=0A+ case MSR_IA32_UCODE_REV:=0A+ if ( = boot_cpu_data.x86_vendor !=3D X86_VENDOR_INTEL )=0A+ break;=0A+ = if ( !is_hardware_domain(currd) || !is_pinned_vcpu(curr) )=0A+ = return X86EMUL_OKAY;=0A+ if ( rdmsr_safe(reg, temp) )=0A+ = break;=0A+ if ( val )=0A+ goto invalid;=0A+ = return X86EMUL_OKAY;=0A+=0A+ case MSR_IA32_MISC_ENABLE:=0A+ if = ( rdmsr_safe(reg, temp) )=0A+ break;=0A+ if ( val !=3D = guest_misc_enable(temp) )=0A+ goto invalid;=0A+ return = X86EMUL_OKAY;=0A+=0A+ case MSR_IA32_MPERF:=0A+ case MSR_IA32_APERF:= =0A+ if ( (boot_cpu_data.x86_vendor !=3D X86_VENDOR_INTEL) &&=0A+ = (boot_cpu_data.x86_vendor !=3D X86_VENDOR_AMD) )=0A+ = break;=0A+ if ( likely(!is_cpufreq_controller(currd)) ||=0A+ = wrmsr_safe(reg, val) =3D=3D 0 )=0A+ return X86EMUL_OKAY;=0A= + break;=0A+=0A+ case MSR_IA32_PERF_CTL:=0A+ if ( = boot_cpu_data.x86_vendor !=3D X86_VENDOR_INTEL )=0A+ break;=0A+ = if ( likely(!is_cpufreq_controller(currd)) ||=0A+ = wrmsr_safe(reg, val) =3D=3D 0 )=0A+ return X86EMUL_OKAY;=0A+ = break;=0A+=0A+ case MSR_IA32_THERM_CONTROL:=0A+ case MSR_IA32_ENE= RGY_PERF_BIAS:=0A+ if ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_INT= EL )=0A+ break;=0A+ if ( !is_hardware_domain(currd) || = !is_pinned_vcpu(curr) ||=0A+ wrmsr_safe(reg, val) =3D=3D 0 = )=0A+ return X86EMUL_OKAY;=0A+ break;=0A+=0A+ case = MSR_AMD64_DR0_ADDRESS_MASK:=0A+ if ( !boot_cpu_has(X86_FEATURE_DBEXT= ) || (val >> 32) )=0A+ break;=0A+ curr->arch.pv_vcpu.dr_m= ask[0] =3D val;=0A+ if ( curr->arch.debugreg[7] & DR7_ACTIVE_MASK = )=0A+ wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, val);=0A+ = return X86EMUL_OKAY;=0A+=0A+ case MSR_AMD64_DR1_ADDRESS_MASK ... = MSR_AMD64_DR3_ADDRESS_MASK:=0A+ if ( !boot_cpu_has(X86_FEATURE_DBEXT= ) || (val >> 32) )=0A+ break;=0A+ curr->arch.pv_vcpu.dr_m= ask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1] =3D val;=0A+ if ( = curr->arch.debugreg[7] & DR7_ACTIVE_MASK )=0A+ wrmsrl(reg, = val);=0A+ return X86EMUL_OKAY;=0A+=0A+ case MSR_P6_PERFCTR(0)...M= SR_P6_PERFCTR(7):=0A+ case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):=0A+ = case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:=0A+ case = MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:=0A+ = if ( boot_cpu_data.x86_vendor =3D=3D X86_VENDOR_INTEL )=0A+ {=0A+ = vpmu_msr =3D true;=0A+ case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_F= AM15H_PERFCTR5:=0A+ case MSR_K7_EVNTSEL0...MSR_K7_PERFCTR3:=0A+ = if ( vpmu_msr || (boot_cpu_data.x86_vendor =3D=3D X86_VENDOR_AMD) )=0A+ = {=0A+ if ( (vpmu_mode & XENPMU_MODE_ALL) &&=0A+ = !is_hardware_domain(currd) )=0A+ = return X86EMUL_OKAY;=0A+=0A+ if ( vpmu_do_wrmsr(reg, val, = 0) )=0A+ break;=0A+ return X86EMUL_OKAY;= =0A+ }=0A+ }=0A+ /* fall through */=0A+ = default:=0A+ if ( wrmsr_hypervisor_regs(reg, val) =3D=3D 1 )=0A+ = return X86EMUL_OKAY;=0A+=0A+ rc =3D vmce_wrmsr(reg, = val);=0A+ if ( rc < 0 )=0A+ break;=0A+ if ( rc = )=0A+ return X86EMUL_OKAY;=0A+=0A+ if ( (rdmsr_safe(reg, = temp) !=3D 0) || (val !=3D temp) )=0A+ invalid:=0A+ = gdprintk(XENLOG_WARNING,=0A+ "Domain attempted WRMSR = %08x from 0x%016"PRIx64" to 0x%016"PRIx64"\n",=0A+ = reg, temp, val);=0A+ return X86EMUL_OKAY;=0A+ }=0A+=0A+ = return X86EMUL_UNHANDLEABLE;=0A+}=0A+=0A /* Instruction fetch with error = handling. */=0A #define insn_fetch(type, base, eip, limit) = \=0A ({ unsigned long _rc, _ptr =3D (base) + (eip); = \=0A@@ -2388,14 +2714,6 @@ static inline uint64_t = guest_misc_enable=0A } = \=0A (eip) +=3D sizeof(_x); _x; })=0A =0A-static = int is_cpufreq_controller(struct domain *d)=0A-{=0A- return ((cpufreq_co= ntroller =3D=3D FREQCTL_dom0_kernel) &&=0A- is_hardware_domain(d= ));=0A-}=0A-=0A-#include "x86_64/mmconfig.h"=0A-=0A static int emulate_priv= ileged_op(struct cpu_user_regs *regs)=0A {=0A struct vcpu *v =3D = current;=0A@@ -2420,7 +2738,6 @@ static int emulate_privileged_op(struct=0A= char *io_emul_stub =3D NULL;=0A void (*io_emul)(struct cpu_user_re= gs *);=0A uint64_t val;=0A- bool_t vpmu_msr;=0A =0A if ( = !read_descriptor(regs->cs, v, &code_base, &code_limit, &ar, 1) )=0A = goto fail;=0A@@ -2821,188 +3138,11 @@ static int emulate_privileged_op(str= uct=0A goto fail;=0A break;=0A =0A- case 0x30: /* = WRMSR */ {=0A- uint32_t eax =3D regs->eax;=0A- uint32_t edx = =3D regs->edx;=0A- uint64_t msr_content =3D ((uint64_t)edx << 32) | = eax;=0A- vpmu_msr =3D 0;=0A- switch ( regs->_ecx )=0A- = {=0A- case MSR_FS_BASE:=0A- if ( is_pv_32bit_domain(curr= d) )=0A- goto fail;=0A- wrfsbase(msr_content);=0A= - v->arch.pv_vcpu.fs_base =3D msr_content;=0A- = break;=0A- case MSR_GS_BASE:=0A- if ( is_pv_32bit_domain(= currd) )=0A- goto fail;=0A- wrgsbase(msr_content)= ;=0A- v->arch.pv_vcpu.gs_base_kernel =3D msr_content;=0A- = break;=0A- case MSR_SHADOW_GS_BASE:=0A- if ( = is_pv_32bit_domain(currd) )=0A- goto fail;=0A- = if ( wrmsr_safe(MSR_SHADOW_GS_BASE, msr_content) )=0A- goto = fail;=0A- v->arch.pv_vcpu.gs_base_user =3D msr_content;=0A- = break;=0A- case MSR_K7_FID_VID_STATUS:=0A- case = MSR_K7_FID_VID_CTL:=0A- case MSR_K8_PSTATE_LIMIT:=0A- case = MSR_K8_PSTATE_CTRL:=0A- case MSR_K8_PSTATE_STATUS:=0A- case = MSR_K8_PSTATE0:=0A- case MSR_K8_PSTATE1:=0A- case MSR_K8_PSTA= TE2:=0A- case MSR_K8_PSTATE3:=0A- case MSR_K8_PSTATE4:=0A- = case MSR_K8_PSTATE5:=0A- case MSR_K8_PSTATE6:=0A- case = MSR_K8_PSTATE7:=0A- case MSR_K8_HWCR:=0A- if ( boot_cpu_d= ata.x86_vendor !=3D X86_VENDOR_AMD )=0A- goto fail;=0A- = if ( !is_cpufreq_controller(currd) )=0A- break;=0A- = if ( wrmsr_safe(regs->ecx, msr_content) !=3D 0 )=0A- = goto fail;=0A- break;=0A- case MSR_AMD64_NB_CFG:=0A- = if ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_AMD ||=0A- = boot_cpu_data.x86 < 0x10 || boot_cpu_data.x86 > 0x17 )=0A- = goto fail;=0A- if ( !is_hardware_domain(currd) || = !is_pinned_vcpu(v) )=0A- break;=0A- if ( = (rdmsr_safe(MSR_AMD64_NB_CFG, val) !=3D 0) ||=0A- (eax = !=3D (uint32_t)val) ||=0A- ((edx ^ (val >> 32)) & ~(1 << = (AMD64_NB_CFG_CF8_EXT_ENABLE_BIT - 32))) )=0A- goto = invalid;=0A- if ( wrmsr_safe(MSR_AMD64_NB_CFG, msr_content) = !=3D 0 )=0A- goto fail;=0A- break;=0A- = case MSR_FAM10H_MMIO_CONF_BASE:=0A- if ( boot_cpu_data.x86_vendo= r !=3D X86_VENDOR_AMD ||=0A- boot_cpu_data.x86 < 0x10 || = boot_cpu_data.x86 > 0x17 )=0A- goto fail;=0A- if = ( !is_hardware_domain(currd) || !is_pinned_vcpu(v) )=0A- = break;=0A- if ( (rdmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, val) = !=3D 0) )=0A- goto fail;=0A- if (=0A- = (pci_probe & PCI_PROBE_MASK) =3D=3D PCI_PROBE_MMCONF ?=0A- = val !=3D msr_content :=0A- ((val ^ msr_content) &=0A- = ~( FAM10H_MMIO_CONF_ENABLE |=0A- = (FAM10H_MMIO_CONF_BUSRANGE_MASK <<=0A- FAM10H_MMIO_CONF= _BUSRANGE_SHIFT) |=0A- ((u64)FAM10H_MMIO_CONF_BASE_MASK = <<=0A- FAM10H_MMIO_CONF_BASE_SHIFT))) )=0A- = goto invalid;=0A- if ( wrmsr_safe(MSR_FAM10H_MMIO_CONF_BASE,= msr_content) !=3D 0 )=0A- goto fail;=0A- = break;=0A- case MSR_IA32_UCODE_REV:=0A- if ( boot_cpu_dat= a.x86_vendor !=3D X86_VENDOR_INTEL )=0A- goto fail;=0A- = if ( !is_hardware_domain(currd) || !is_pinned_vcpu(v) )=0A- = break;=0A- if ( rdmsr_safe(regs->ecx, val) )=0A- = goto fail;=0A- if ( msr_content )=0A- = goto invalid;=0A- break;=0A- case MSR_IA32_MISC_ENABLE:= =0A- if ( rdmsr_safe(regs->ecx, val) )=0A- goto = fail;=0A- val =3D guest_misc_enable(val);=0A- if ( = msr_content !=3D val )=0A- goto invalid;=0A- = break;=0A- case MSR_IA32_MPERF:=0A- case MSR_IA32_APERF:=0A- = if (( boot_cpu_data.x86_vendor !=3D X86_VENDOR_INTEL ) &&=0A- = ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_AMD ) )=0A- = goto fail;=0A- if ( !is_cpufreq_controller(currd) )=0A- = break;=0A- if ( wrmsr_safe(regs->ecx, msr_content= ) !=3D 0 )=0A- goto fail;=0A- break;=0A- = case MSR_IA32_PERF_CTL:=0A- if ( boot_cpu_data.x86_vendor !=3D = X86_VENDOR_INTEL )=0A- goto fail;=0A- if ( = !is_cpufreq_controller(currd) )=0A- break;=0A- = if ( wrmsr_safe(regs->ecx, msr_content) !=3D 0 )=0A- goto = fail;=0A- break;=0A- case MSR_IA32_THERM_CONTROL:=0A- = case MSR_IA32_ENERGY_PERF_BIAS:=0A- if ( boot_cpu_data.x86_v= endor !=3D X86_VENDOR_INTEL )=0A- goto fail;=0A- = if ( !is_hardware_domain(currd) || !is_pinned_vcpu(v) )=0A- = break;=0A- if ( wrmsr_safe(regs->ecx, msr_content) !=3D 0 )=0A- = goto fail;=0A- break;=0A-=0A- case = MSR_AMD64_DR0_ADDRESS_MASK:=0A- if ( !boot_cpu_has(X86_FEATURE_D= BEXT) || (msr_content >> 32) )=0A- goto fail;=0A- = v->arch.pv_vcpu.dr_mask[0] =3D msr_content;=0A- if ( v->arch.de= bugreg[7] & DR7_ACTIVE_MASK )=0A- wrmsrl(MSR_AMD64_DR0_ADDRE= SS_MASK, msr_content);=0A- break;=0A- case MSR_AMD64_DR1_= ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:=0A- if ( !boot_cpu_= has(X86_FEATURE_DBEXT) || (msr_content >> 32) )=0A- goto = fail;=0A- v->arch.pv_vcpu.dr_mask=0A- [regs->_ecx= - MSR_AMD64_DR1_ADDRESS_MASK + 1] =3D msr_content;=0A- if ( = v->arch.debugreg[7] & DR7_ACTIVE_MASK )=0A- wrmsrl(regs->_ec= x, msr_content);=0A- break;=0A- case MSR_P6_PERFCTR(0)...= MSR_P6_PERFCTR(7):=0A- case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):= =0A- case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:=0A- = case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:=0A= - if ( boot_cpu_data.x86_vendor =3D=3D X86_VENDOR_INTEL )=0A- = {=0A- vpmu_msr =3D 1;=0A- case MSR_AMD_FAM15= H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5:=0A- case MSR_K7_EVNTSEL0...MSR= _K7_PERFCTR3:=0A- if ( vpmu_msr || (boot_cpu_data.x86_vendor= =3D=3D X86_VENDOR_AMD) )=0A- {=0A- if ( = (vpmu_mode & XENPMU_MODE_ALL) &&=0A- !is_hardware_d= omain(v->domain) )=0A- break;=0A-=0A- = if ( vpmu_do_wrmsr(regs->ecx, msr_content, 0) )=0A- = goto fail;=0A- break;=0A- }=0A- = }=0A- /*FALLTHROUGH*/=0A-=0A- default:=0A- = if ( wrmsr_hypervisor_regs(regs->ecx, msr_content) =3D=3D 1 )=0A- = break;=0A-=0A- rc =3D vmce_wrmsr(regs->ecx, msr_conten= t);=0A- if ( rc < 0 )=0A- goto fail;=0A- = if ( rc )=0A- break;=0A-=0A- if ( (rdmsr_safe(= regs->ecx, val) !=3D 0) || (msr_content !=3D val) )=0A- invalid:=0A-= gdprintk(XENLOG_WARNING, "Domain attempted WRMSR %p from = "=0A- "0x%016"PRIx64" to 0x%016"PRIx64".\n",=0A- = _p(regs->ecx), val, msr_content);=0A- = break;=0A- }=0A+ case 0x30: /* WRMSR */=0A+ if ( = priv_op_write_msr(regs->_ecx, (regs->rdx << 32) | regs->_eax,=0A+ = NULL) !=3D X86EMUL_OKAY )=0A+ goto = fail;=0A break;=0A- }=0A =0A case 0x31: /* RDTSC */=0A = if ( (v->arch.pv_vcpu.ctrlreg[4] & X86_CR4_TSD) &&=0A@@ -3018,130 = +3158,11 @@ static int emulate_privileged_op(struct=0A break;=0A = =0A case 0x32: /* RDMSR */=0A- vpmu_msr =3D 0;=0A- = switch ( regs->_ecx )=0A- {=0A- case MSR_FS_BASE:=0A- = if ( is_pv_32bit_domain(currd) )=0A- goto fail;=0A- = val =3D cpu_has_fsgsbase ? __rdfsbase() : v->arch.pv_vcpu.fs_base;= =0A- goto rdmsr_writeback;=0A- case MSR_GS_BASE:=0A- = if ( is_pv_32bit_domain(currd) )=0A- goto fail;=0A- = val =3D cpu_has_fsgsbase ? __rdgsbase()=0A- = : v->arch.pv_vcpu.gs_base_kernel;=0A- goto = rdmsr_writeback;=0A- case MSR_SHADOW_GS_BASE:=0A- if ( = is_pv_32bit_domain(currd) )=0A- goto fail;=0A- = val =3D v->arch.pv_vcpu.gs_base_user;=0A- goto rdmsr_writeback;= =0A- case MSR_K7_FID_VID_CTL:=0A- case MSR_K7_FID_VID_STATUS:= =0A- case MSR_K8_PSTATE_LIMIT:=0A- case MSR_K8_PSTATE_CTRL:= =0A- case MSR_K8_PSTATE_STATUS:=0A- case MSR_K8_PSTATE0:=0A- = case MSR_K8_PSTATE1:=0A- case MSR_K8_PSTATE2:=0A- = case MSR_K8_PSTATE3:=0A- case MSR_K8_PSTATE4:=0A- case = MSR_K8_PSTATE5:=0A- case MSR_K8_PSTATE6:=0A- case MSR_K8_PSTA= TE7:=0A- if ( boot_cpu_data.x86_vendor !=3D X86_VENDOR_AMD = )=0A- goto fail;=0A- if ( !is_cpufreq_controller(= currd) )=0A- {=0A- regs->eax =3D regs->edx =3D = 0;=0A- break;=0A- }=0A- goto = rdmsr_normal;=0A- case MSR_IA32_UCODE_REV:=0A- BUILD_BUG_= ON(MSR_IA32_UCODE_REV !=3D MSR_AMD_PATCHLEVEL);=0A- if ( = boot_cpu_data.x86_vendor =3D=3D X86_VENDOR_INTEL )=0A- {=0A- = if ( wrmsr_safe(MSR_IA32_UCODE_REV, 0) )=0A- = goto fail;=0A- sync_core();=0A- }=0A- = goto rdmsr_normal;=0A- case MSR_IA32_MISC_ENABLE:=0A- = if ( rdmsr_safe(regs->ecx, val) )=0A- goto fail;=0A- = val =3D guest_misc_enable(val);=0A- goto rdmsr_writeback;=0A= -=0A- case MSR_AMD64_DR0_ADDRESS_MASK:=0A- if ( = !boot_cpu_has(X86_FEATURE_DBEXT) )=0A- goto fail;=0A- = regs->eax =3D v->arch.pv_vcpu.dr_mask[0];=0A- regs->edx = =3D 0;=0A- break;=0A- case MSR_AMD64_DR1_ADDRESS_MASK = ... MSR_AMD64_DR3_ADDRESS_MASK:=0A- if ( !boot_cpu_has(X86_FEATU= RE_DBEXT) )=0A- goto fail;=0A- regs->eax =3D = v->arch.pv_vcpu.dr_mask=0A- [regs->_ecx - = MSR_AMD64_DR1_ADDRESS_MASK + 1];=0A- regs->edx =3D 0;=0A- = break;=0A- case MSR_IA32_PERF_CAPABILITIES:=0A- /* = No extra capabilities are supported */=0A- regs->eax =3D = regs->edx =3D 0;=0A- break;=0A- case MSR_P6_PERFCTR(0)...= MSR_P6_PERFCTR(7):=0A- case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):= =0A- case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:=0A- = case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:=0A= - if ( boot_cpu_data.x86_vendor =3D=3D X86_VENDOR_INTEL )=0A- = {=0A- vpmu_msr =3D 1;=0A- case MSR_AMD_FAM15= H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5:=0A- case MSR_K7_EVNTSEL0...MSR= _K7_PERFCTR3:=0A- if ( vpmu_msr || (boot_cpu_data.x86_vendor= =3D=3D X86_VENDOR_AMD) )=0A- {=0A-=0A- = if ( (vpmu_mode & XENPMU_MODE_ALL) &&=0A- = !is_hardware_domain(v->domain) )=0A- {=0A- = /* Don't leak PMU MSRs to unprivileged domains */=0A- = regs->eax =3D regs->edx =3D 0;=0A- = break;=0A- }=0A-=0A- if ( vpmu_do_rdm= sr(regs->ecx, &val) )=0A- goto fail;=0A-=0A- = regs->eax =3D (uint32_t)val;=0A- regs->edx = =3D (uint32_t)(val >> 32);=0A- break;=0A- = }=0A- }=0A- /*FALLTHROUGH*/=0A-=0A- = default:=0A- if ( rdmsr_hypervisor_regs(regs->ecx, &val) )=0A- = goto rdmsr_writeback;=0A-=0A- rc =3D vmce_rdmsr(re= gs->ecx, &val);=0A- if ( rc < 0 )=0A- goto = fail;=0A- if ( rc )=0A- goto rdmsr_writeback;=0A-= =0A- case MSR_EFER:=0A- rdmsr_normal:=0A- /* Everyone = can read the MSR space. */=0A- /* gdprintk(XENLOG_WARNING,"Domai= n attempted RDMSR %p.\n",=0A- _p(regs->ecx));*/=0A- = if ( rdmsr_safe(regs->ecx, val) )=0A- goto = fail;=0A+ if ( priv_op_read_msr(regs->_ecx, &val, NULL) !=3D = X86EMUL_OKAY )=0A+ goto fail;=0A rdmsr_writeback:=0A- = regs->eax =3D (uint32_t)val;=0A- regs->edx =3D (uint32_t)(val = >> 32);=0A- break;=0A- }=0A+ regs->eax =3D = (uint32_t)val;=0A+ regs->edx =3D (uint32_t)(val >> 32);=0A = break;=0A =0A case 0xa2: /* CPUID */=0A --=__Part8BBD0000.1__= Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: base64 Content-Disposition: inline X19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX18KWGVuLWRldmVs IG1haWxpbmcgbGlzdApYZW4tZGV2ZWxAbGlzdHMueGVuLm9yZwpodHRwczovL2xpc3RzLnhlbi5v cmcveGVuLWRldmVsCg== --=__Part8BBD0000.1__=--