From mboxrd@z Thu Jan 1 00:00:00 1970 From: "Jan Beulich" Subject: [PATCH v3 1/3] SVM: support data breakpoint extension registers Date: Mon, 07 Apr 2014 10:38:18 +0100 Message-ID: <53428E2A0200007800006081@nat28.tlf.novell.com> References: <53428C8B020000780000606B@nat28.tlf.novell.com> Mime-Version: 1.0 Content-Type: multipart/mixed; boundary="=__Part11230D1A.2__=" Return-path: Received: from mail6.bemta14.messagelabs.com ([193.109.254.103]) by lists.xen.org with esmtp (Exim 4.72) (envelope-from ) id 1WX60U-0005RT-Ju for xen-devel@lists.xenproject.org; Mon, 07 Apr 2014 09:38:23 +0000 In-Reply-To: <53428C8B020000780000606B@nat28.tlf.novell.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel Cc: Ian Campbell , Ian Jackson , Keir Fraser , Aravind Gopalakrishnan , suravee.suthikulpanit@amd.com List-Id: xen-devel@lists.xenproject.org This is a MIME message. If you are reading this text, you may want to consider changing to a mail reader or gateway that understands how to properly handle MIME multipart messages. --=__Part11230D1A.2__= Content-Type: text/plain; charset=US-ASCII Content-Transfer-Encoding: quoted-printable Content-Disposition: inline Leveraging the generic MSR save/restore logic introduced a little while ago. Signed-off-by: Jan Beulich Tested-by: Aravind Gopalakrishnan --- a/tools/libxc/xc_cpufeature.h +++ b/tools/libxc/xc_cpufeature.h @@ -125,6 +125,7 @@ #define X86_FEATURE_NODEID_MSR 19 /* NodeId MSR */ #define X86_FEATURE_TBM 21 /* trailing bit manipulations */ #define X86_FEATURE_TOPOEXT 22 /* topology extensions CPUID leafs */ +#define X86_FEATURE_DBEXT 26 /* data breakpoint extension */ =20 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx) */ #define X86_FEATURE_FSGSBASE 0 /* {RD,WR}{FS,GS}BASE instructions */ --- a/tools/libxc/xc_cpuid_x86.c +++ b/tools/libxc/xc_cpuid_x86.c @@ -110,9 +110,10 @@ static void amd_xc_cpuid_policy( bitmaskof(X86_FEATURE_3DNOWPREFETCH) | bitmaskof(X86_FEATURE_OSVW) | bitmaskof(X86_FEATURE_XOP) | + bitmaskof(X86_FEATURE_LWP) | bitmaskof(X86_FEATURE_FMA4) | bitmaskof(X86_FEATURE_TBM) | - bitmaskof(X86_FEATURE_LWP)); + bitmaskof(X86_FEATURE_DBEXT)); regs[3] &=3D (0x0183f3ff | /* features shared with 0x00000001:EDX = */ (is_pae ? bitmaskof(X86_FEATURE_NX) : 0) | (is_64bit ? bitmaskof(X86_FEATURE_LM) : 0) | --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -3082,6 +3082,9 @@ void hvm_cpuid(unsigned int input, unsig /* Only provide PSE36 when guest runs in 32bit PAE or in long = mode */ if ( !(hvm_pae_enabled(v) || hvm_long_mode_enabled(v)) ) *edx &=3D ~cpufeat_mask(X86_FEATURE_PSE36); + /* Hide data breakpoint extensions if the hardware has not = support. */ + if ( !boot_cpu_has(X86_FEATURE_DBEXT) ) + *ecx &=3D ~cpufeat_mask(X86_FEATURE_DBEXT); break; =20 case 0x80000008: --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -160,14 +160,28 @@ void svm_intercept_msr(struct vcpu *v, u static void svm_save_dr(struct vcpu *v) { struct vmcb_struct *vmcb =3D v->arch.hvm_svm.vmcb; + unsigned int flag_dr_dirty =3D v->arch.hvm_vcpu.flag_dr_dirty; =20 - if ( !v->arch.hvm_vcpu.flag_dr_dirty ) + if ( !flag_dr_dirty ) return; =20 /* Clear the DR dirty flag and re-enable intercepts for DR accesses. = */ v->arch.hvm_vcpu.flag_dr_dirty =3D 0; vmcb_set_dr_intercepts(vmcb, ~0u); =20 + if ( flag_dr_dirty & 2 ) + { + svm_intercept_msr(v, MSR_AMD64_DR0_ADDRESS_MASK, MSR_INTERCEPT_RW)= ; + svm_intercept_msr(v, MSR_AMD64_DR1_ADDRESS_MASK, MSR_INTERCEPT_RW)= ; + svm_intercept_msr(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_INTERCEPT_RW)= ; + svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_INTERCEPT_RW)= ; + + rdmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[0]); + rdmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[1]); + rdmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[2]); + rdmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[3]); + } + v->arch.debugreg[0] =3D read_debugreg(0); v->arch.debugreg[1] =3D read_debugreg(1); v->arch.debugreg[2] =3D read_debugreg(2); @@ -178,12 +192,32 @@ static void svm_save_dr(struct vcpu *v) =20 static void __restore_debug_registers(struct vmcb_struct *vmcb, struct = vcpu *v) { + unsigned int ecx; + if ( v->arch.hvm_vcpu.flag_dr_dirty ) return; =20 v->arch.hvm_vcpu.flag_dr_dirty =3D 1; vmcb_set_dr_intercepts(vmcb, 0); =20 + ASSERT(v =3D=3D current); + hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL); + if ( test_bit(X86_FEATURE_DBEXT & 31, &ecx) ) + { + svm_intercept_msr(v, MSR_AMD64_DR0_ADDRESS_MASK, MSR_INTERCEPT_NON= E); + svm_intercept_msr(v, MSR_AMD64_DR1_ADDRESS_MASK, MSR_INTERCEPT_NON= E); + svm_intercept_msr(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_INTERCEPT_NON= E); + svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_INTERCEPT_NON= E); + + wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[0]); + wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[1]); + wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[2]); + wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[3]); + + /* Can't use hvm_cpuid() in svm_save_dr(): v !=3D current. */ + v->arch.hvm_vcpu.flag_dr_dirty |=3D 2; + } + write_debugreg(0, v->arch.debugreg[0]); write_debugreg(1, v->arch.debugreg[1]); write_debugreg(2, v->arch.debugreg[2]); @@ -355,6 +389,72 @@ static int svm_load_vmcb_ctxt(struct vcp return 0; } =20 +static unsigned int __init svm_init_msr(void) +{ + return boot_cpu_has(X86_FEATURE_DBEXT) ? 4 : 0; +} + +static void svm_save_msr(struct vcpu *v, struct hvm_msr *ctxt) +{ + if ( boot_cpu_has(X86_FEATURE_DBEXT) ) + { + ctxt->msr[ctxt->count].val =3D v->arch.hvm_svm.dr_mask[0]; + if ( ctxt->msr[ctxt->count].val ) + ctxt->msr[ctxt->count++].index =3D MSR_AMD64_DR0_ADDRESS_MASK;= + + ctxt->msr[ctxt->count].val =3D v->arch.hvm_svm.dr_mask[1]; + if ( ctxt->msr[ctxt->count].val ) + ctxt->msr[ctxt->count++].index =3D MSR_AMD64_DR1_ADDRESS_MASK;= + + ctxt->msr[ctxt->count].val =3D v->arch.hvm_svm.dr_mask[2]; + if ( ctxt->msr[ctxt->count].val ) + ctxt->msr[ctxt->count++].index =3D MSR_AMD64_DR2_ADDRESS_MASK;= + + ctxt->msr[ctxt->count].val =3D v->arch.hvm_svm.dr_mask[3]; + if ( ctxt->msr[ctxt->count].val ) + ctxt->msr[ctxt->count++].index =3D MSR_AMD64_DR3_ADDRESS_MASK;= + } +} + +static int svm_load_msr(struct vcpu *v, struct hvm_msr *ctxt) +{ + unsigned int i, idx; + int err =3D 0; + + for ( i =3D 0; i < ctxt->count; ++i ) + { + switch ( idx =3D ctxt->msr[i].index ) + { + case MSR_AMD64_DR0_ADDRESS_MASK: + if ( !boot_cpu_has(X86_FEATURE_DBEXT) ) + err =3D -ENXIO; + else if ( ctxt->msr[i].val >> 32 ) + err =3D -EDOM; + else + v->arch.hvm_svm.dr_mask[0] =3D ctxt->msr[i].val; + break; + + case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: + if ( !boot_cpu_has(X86_FEATURE_DBEXT) ) + err =3D -ENXIO; + else if ( ctxt->msr[i].val >> 32 ) + err =3D -EDOM; + else + v->arch.hvm_svm.dr_mask[idx - MSR_AMD64_DR1_ADDRESS_MASK = + 1] =3D + ctxt->msr[i].val; + break; + + default: + continue; + } + if ( err ) + break; + ctxt->msr[i]._rsvd =3D 1; + } + + return err; +} + static void svm_fpu_enter(struct vcpu *v) { struct vmcb_struct *n1vmcb =3D vcpu_nestedhvm(v).nv_n1vmcx; @@ -1451,6 +1551,8 @@ static int svm_msr_read_intercept(unsign =20 switch ( msr ) { + unsigned int ecx; + case MSR_IA32_SYSENTER_CS: *msr_content =3D v->arch.hvm_svm.guest_sysenter_cs; break; @@ -1526,6 +1628,21 @@ static int svm_msr_read_intercept(unsign vpmu_do_rdmsr(msr, msr_content); break; =20 + case MSR_AMD64_DR0_ADDRESS_MASK: + hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL); + if ( !test_bit(X86_FEATURE_DBEXT & 31, &ecx) ) + goto gpf; + *msr_content =3D v->arch.hvm_svm.dr_mask[0]; + break; + + case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: + hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL); + if ( !test_bit(X86_FEATURE_DBEXT & 31, &ecx) ) + goto gpf; + *msr_content =3D + v->arch.hvm_svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + = 1]; + break; + case MSR_AMD_OSVW_ID_LENGTH: case MSR_AMD_OSVW_STATUS: ret =3D svm_handle_osvw(v, msr, msr_content, 1); @@ -1594,6 +1711,8 @@ static int svm_msr_write_intercept(unsig =20 switch ( msr ) { + unsigned int ecx; + case MSR_IA32_SYSENTER_CS: vmcb->sysenter_cs =3D v->arch.hvm_svm.guest_sysenter_cs =3D = msr_content; break; @@ -1669,6 +1788,21 @@ static int svm_msr_write_intercept(unsig */ break; =20 + case MSR_AMD64_DR0_ADDRESS_MASK: + hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL); + if ( !test_bit(X86_FEATURE_DBEXT & 31, &ecx) || (msr_content >> = 32) ) + goto gpf; + v->arch.hvm_svm.dr_mask[0] =3D msr_content; + break; + + case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK: + hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL); + if ( !test_bit(X86_FEATURE_DBEXT & 31, &ecx) || (msr_content >> = 32) ) + goto gpf; + v->arch.hvm_svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1] =3D + msr_content; + break; + case MSR_AMD_OSVW_ID_LENGTH: case MSR_AMD_OSVW_STATUS: ret =3D svm_handle_osvw(v, msr, &msr_content, 0); @@ -2022,6 +2156,9 @@ static struct hvm_function_table __initd .vcpu_destroy =3D svm_vcpu_destroy, .save_cpu_ctxt =3D svm_save_vmcb_ctxt, .load_cpu_ctxt =3D svm_load_vmcb_ctxt, + .init_msr =3D svm_init_msr, + .save_msr =3D svm_save_msr, + .load_msr =3D svm_load_msr, .get_interrupt_shadow =3D svm_get_interrupt_shadow, .set_interrupt_shadow =3D svm_set_interrupt_shadow, .guest_x86_mode =3D svm_guest_x86_mode, --- a/xen/include/asm-x86/cpufeature.h +++ b/xen/include/asm-x86/cpufeature.h @@ -134,6 +134,7 @@ #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ #define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations = */ #define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID = leafs */ +#define X86_FEATURE_DBEXT (6*32+26) /* data breakpoint extension */ =20 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 7 */ #define X86_FEATURE_FSGSBASE (7*32+ 0) /* {RD,WR}{FS,GS}BASE instruction= s */ --- a/xen/include/asm-x86/hvm/svm/vmcb.h +++ b/xen/include/asm-x86/hvm/svm/vmcb.h @@ -515,6 +515,9 @@ struct arch_svm_struct { uint64_t guest_lwp_cfg; /* guest version */ uint64_t cpu_lwp_cfg; /* CPU version */ =20 + /* data breakpoint extension MSRs */ + uint32_t dr_mask[4]; + /* OSVW MSRs */ struct { u64 length; --- a/xen/include/asm-x86/hvm/vcpu.h +++ b/xen/include/asm-x86/hvm/vcpu.h @@ -140,7 +140,7 @@ struct hvm_vcpu { =20 int xen_port; =20 - bool_t flag_dr_dirty; + u8 flag_dr_dirty; bool_t debug_state_latch; bool_t single_step; =20 --- a/xen/include/asm-x86/msr-index.h +++ b/xen/include/asm-x86/msr-index.h @@ -206,6 +206,11 @@ #define MSR_AMD64_DC_CFG 0xc0011022 #define AMD64_NB_CFG_CF8_EXT_ENABLE_BIT 46 =20 +#define MSR_AMD64_DR0_ADDRESS_MASK 0xc0011027 +#define MSR_AMD64_DR1_ADDRESS_MASK 0xc0011019 +#define MSR_AMD64_DR2_ADDRESS_MASK 0xc001101a +#define MSR_AMD64_DR3_ADDRESS_MASK 0xc001101b + /* AMD Family10h machine check MSRs */ #define MSR_F10_MC4_MISC1 0xc0000408 #define MSR_F10_MC4_MISC2 0xc0000409 --=__Part11230D1A.2__= Content-Type: text/plain; name="SVM-debug-address-mask-MSRs.patch" Content-Transfer-Encoding: quoted-printable Content-Disposition: attachment; filename="SVM-debug-address-mask-MSRs.patch" x86/SVM: support data breakpoint extension registers=0A=0ALeveraging the = generic MSR save/restore logic introduced a little while=0Aago.=0A=0ASigned= -off-by: Jan Beulich =0ATested-by: Aravind Gopalakrishna= n=0A=0A--- a/tools/libxc/xc_cpufeature.h=0A= +++ b/tools/libxc/xc_cpufeature.h=0A@@ -125,6 +125,7 @@=0A #define = X86_FEATURE_NODEID_MSR 19 /* NodeId MSR */=0A #define X86_FEATURE_TBM = 21 /* trailing bit manipulations */=0A #define X86_FEATURE_TOPOEXT = 22 /* topology extensions CPUID leafs */=0A+#define X86_FEATURE_DBEXT = 26 /* data breakpoint extension */=0A =0A /* Intel-defined CPU features, = CPUID level 0x00000007:0 (ebx) */=0A #define X86_FEATURE_FSGSBASE 0 /* = {RD,WR}{FS,GS}BASE instructions */=0A--- a/tools/libxc/xc_cpuid_x86.c=0A+++= b/tools/libxc/xc_cpuid_x86.c=0A@@ -110,9 +110,10 @@ static void amd_xc_cpu= id_policy(=0A bitmaskof(X86_FEATURE_3DNOWPREFETCH) = |=0A bitmaskof(X86_FEATURE_OSVW) |=0A = bitmaskof(X86_FEATURE_XOP) |=0A+ bitmaskof(X86_FEATUR= E_LWP) |=0A bitmaskof(X86_FEATURE_FMA4) |=0A = bitmaskof(X86_FEATURE_TBM) |=0A- bitmaskof(X86= _FEATURE_LWP));=0A+ bitmaskof(X86_FEATURE_DBEXT));=0A = regs[3] &=3D (0x0183f3ff | /* features shared with 0x00000001:EDX = */=0A (is_pae ? bitmaskof(X86_FEATURE_NX) : 0) |=0A = (is_64bit ? bitmaskof(X86_FEATURE_LM) : 0) |=0A--- = a/xen/arch/x86/hvm/hvm.c=0A+++ b/xen/arch/x86/hvm/hvm.c=0A@@ -3082,6 = +3082,9 @@ void hvm_cpuid(unsigned int input, unsig=0A /* Only = provide PSE36 when guest runs in 32bit PAE or in long mode */=0A = if ( !(hvm_pae_enabled(v) || hvm_long_mode_enabled(v)) )=0A = *edx &=3D ~cpufeat_mask(X86_FEATURE_PSE36);=0A+ /* Hide data = breakpoint extensions if the hardware has not support. */=0A+ if ( = !boot_cpu_has(X86_FEATURE_DBEXT) )=0A+ *ecx &=3D ~cpufeat_mask(X= 86_FEATURE_DBEXT);=0A break;=0A =0A case 0x80000008:=0A--- = a/xen/arch/x86/hvm/svm/svm.c=0A+++ b/xen/arch/x86/hvm/svm/svm.c=0A@@ = -160,14 +160,28 @@ void svm_intercept_msr(struct vcpu *v, u=0A static void = svm_save_dr(struct vcpu *v)=0A {=0A struct vmcb_struct *vmcb =3D = v->arch.hvm_svm.vmcb;=0A+ unsigned int flag_dr_dirty =3D v->arch.hvm_vcp= u.flag_dr_dirty;=0A =0A- if ( !v->arch.hvm_vcpu.flag_dr_dirty )=0A+ = if ( !flag_dr_dirty )=0A return;=0A =0A /* Clear the DR dirty = flag and re-enable intercepts for DR accesses. */=0A v->arch.hvm_vcpu.f= lag_dr_dirty =3D 0;=0A vmcb_set_dr_intercepts(vmcb, ~0u);=0A =0A+ = if ( flag_dr_dirty & 2 )=0A+ {=0A+ svm_intercept_msr(v, = MSR_AMD64_DR0_ADDRESS_MASK, MSR_INTERCEPT_RW);=0A+ svm_intercept_msr= (v, MSR_AMD64_DR1_ADDRESS_MASK, MSR_INTERCEPT_RW);=0A+ svm_intercept= _msr(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_INTERCEPT_RW);=0A+ = svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_INTERCEPT_RW);=0A+=0A+= rdmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[0]);=0A+= rdmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[1]);=0A+= rdmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[2]);=0A+= rdmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm_svm.dr_mask[3]);=0A+= }=0A+=0A v->arch.debugreg[0] =3D read_debugreg(0);=0A = v->arch.debugreg[1] =3D read_debugreg(1);=0A v->arch.debugreg[2] =3D = read_debugreg(2);=0A@@ -178,12 +192,32 @@ static void svm_save_dr(struct = vcpu *v)=0A =0A static void __restore_debug_registers(struct vmcb_struct = *vmcb, struct vcpu *v)=0A {=0A+ unsigned int ecx;=0A+=0A if ( = v->arch.hvm_vcpu.flag_dr_dirty )=0A return;=0A =0A v->arch.hvm_= vcpu.flag_dr_dirty =3D 1;=0A vmcb_set_dr_intercepts(vmcb, 0);=0A =0A+ = ASSERT(v =3D=3D current);=0A+ hvm_cpuid(0x80000001, NULL, NULL, &ecx, = NULL);=0A+ if ( test_bit(X86_FEATURE_DBEXT & 31, &ecx) )=0A+ {=0A+ = svm_intercept_msr(v, MSR_AMD64_DR0_ADDRESS_MASK, MSR_INTERCEPT_NONE);= =0A+ svm_intercept_msr(v, MSR_AMD64_DR1_ADDRESS_MASK, MSR_INTERCEPT_= NONE);=0A+ svm_intercept_msr(v, MSR_AMD64_DR2_ADDRESS_MASK, = MSR_INTERCEPT_NONE);=0A+ svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_= MASK, MSR_INTERCEPT_NONE);=0A+=0A+ wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK= , v->arch.hvm_svm.dr_mask[0]);=0A+ wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK= , v->arch.hvm_svm.dr_mask[1]);=0A+ wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK= , v->arch.hvm_svm.dr_mask[2]);=0A+ wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK= , v->arch.hvm_svm.dr_mask[3]);=0A+=0A+ /* Can't use hvm_cpuid() in = svm_save_dr(): v !=3D current. */=0A+ v->arch.hvm_vcpu.flag_dr_dirty= |=3D 2;=0A+ }=0A+=0A write_debugreg(0, v->arch.debugreg[0]);=0A = write_debugreg(1, v->arch.debugreg[1]);=0A write_debugreg(2, = v->arch.debugreg[2]);=0A@@ -355,6 +389,72 @@ static int svm_load_vmcb_ctxt(= struct vcp=0A return 0;=0A }=0A =0A+static unsigned int __init = svm_init_msr(void)=0A+{=0A+ return boot_cpu_has(X86_FEATURE_DBEXT) ? 4 = : 0;=0A+}=0A+=0A+static void svm_save_msr(struct vcpu *v, struct hvm_msr = *ctxt)=0A+{=0A+ if ( boot_cpu_has(X86_FEATURE_DBEXT) )=0A+ {=0A+ = ctxt->msr[ctxt->count].val =3D v->arch.hvm_svm.dr_mask[0];=0A+ = if ( ctxt->msr[ctxt->count].val )=0A+ ctxt->msr[ctxt->count++].i= ndex =3D MSR_AMD64_DR0_ADDRESS_MASK;=0A+=0A+ ctxt->msr[ctxt->count].= val =3D v->arch.hvm_svm.dr_mask[1];=0A+ if ( ctxt->msr[ctxt->count].= val )=0A+ ctxt->msr[ctxt->count++].index =3D MSR_AMD64_DR1_ADDRE= SS_MASK;=0A+=0A+ ctxt->msr[ctxt->count].val =3D v->arch.hvm_svm.dr_m= ask[2];=0A+ if ( ctxt->msr[ctxt->count].val )=0A+ = ctxt->msr[ctxt->count++].index =3D MSR_AMD64_DR2_ADDRESS_MASK;=0A+=0A+ = ctxt->msr[ctxt->count].val =3D v->arch.hvm_svm.dr_mask[3];=0A+ = if ( ctxt->msr[ctxt->count].val )=0A+ ctxt->msr[ctxt->count++].i= ndex =3D MSR_AMD64_DR3_ADDRESS_MASK;=0A+ }=0A+}=0A+=0A+static int = svm_load_msr(struct vcpu *v, struct hvm_msr *ctxt)=0A+{=0A+ unsigned = int i, idx;=0A+ int err =3D 0;=0A+=0A+ for ( i =3D 0; i < ctxt->count= ; ++i )=0A+ {=0A+ switch ( idx =3D ctxt->msr[i].index )=0A+ = {=0A+ case MSR_AMD64_DR0_ADDRESS_MASK:=0A+ if ( = !boot_cpu_has(X86_FEATURE_DBEXT) )=0A+ err =3D -ENXIO;=0A+ = else if ( ctxt->msr[i].val >> 32 )=0A+ err =3D = -EDOM;=0A+ else=0A+ v->arch.hvm_svm.dr_mask[0] = =3D ctxt->msr[i].val;=0A+ break;=0A+=0A+ case MSR_AMD64_D= R1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:=0A+ if ( = !boot_cpu_has(X86_FEATURE_DBEXT) )=0A+ err =3D -ENXIO;=0A+ = else if ( ctxt->msr[i].val >> 32 )=0A+ err =3D = -EDOM;=0A+ else=0A+ v->arch.hvm_svm.dr_mask[idx = - MSR_AMD64_DR1_ADDRESS_MASK + 1] =3D=0A+ ctxt->msr[i].v= al;=0A+ break;=0A+=0A+ default:=0A+ = continue;=0A+ }=0A+ if ( err )=0A+ break;=0A+ = ctxt->msr[i]._rsvd =3D 1;=0A+ }=0A+=0A+ return err;=0A+}=0A+=0A = static void svm_fpu_enter(struct vcpu *v)=0A {=0A struct vmcb_struct = *n1vmcb =3D vcpu_nestedhvm(v).nv_n1vmcx;=0A@@ -1451,6 +1551,8 @@ static = int svm_msr_read_intercept(unsign=0A =0A switch ( msr )=0A {=0A+ = unsigned int ecx;=0A+=0A case MSR_IA32_SYSENTER_CS:=0A = *msr_content =3D v->arch.hvm_svm.guest_sysenter_cs;=0A break;=0A@@ = -1526,6 +1628,21 @@ static int svm_msr_read_intercept(unsign=0A = vpmu_do_rdmsr(msr, msr_content);=0A break;=0A =0A+ case = MSR_AMD64_DR0_ADDRESS_MASK:=0A+ hvm_cpuid(0x80000001, NULL, NULL, = &ecx, NULL);=0A+ if ( !test_bit(X86_FEATURE_DBEXT & 31, &ecx) )=0A+ = goto gpf;=0A+ *msr_content =3D v->arch.hvm_svm.dr_mask[0]= ;=0A+ break;=0A+=0A+ case MSR_AMD64_DR1_ADDRESS_MASK ... = MSR_AMD64_DR3_ADDRESS_MASK:=0A+ hvm_cpuid(0x80000001, NULL, NULL, = &ecx, NULL);=0A+ if ( !test_bit(X86_FEATURE_DBEXT & 31, &ecx) )=0A+ = goto gpf;=0A+ *msr_content =3D=0A+ v->arch.hvm= _svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1];=0A+ break;=0A+= =0A case MSR_AMD_OSVW_ID_LENGTH:=0A case MSR_AMD_OSVW_STATUS:=0A = ret =3D svm_handle_osvw(v, msr, msr_content, 1);=0A@@ -1594,6 = +1711,8 @@ static int svm_msr_write_intercept(unsig=0A =0A switch ( = msr )=0A {=0A+ unsigned int ecx;=0A+=0A case MSR_IA32_SYSENT= ER_CS:=0A vmcb->sysenter_cs =3D v->arch.hvm_svm.guest_sysenter_cs = =3D msr_content;=0A break;=0A@@ -1669,6 +1788,21 @@ static int = svm_msr_write_intercept(unsig=0A */=0A break;=0A =0A+ = case MSR_AMD64_DR0_ADDRESS_MASK:=0A+ hvm_cpuid(0x80000001, NULL, = NULL, &ecx, NULL);=0A+ if ( !test_bit(X86_FEATURE_DBEXT & 31, &ecx) = || (msr_content >> 32) )=0A+ goto gpf;=0A+ v->arch.hvm_sv= m.dr_mask[0] =3D msr_content;=0A+ break;=0A+=0A+ case MSR_AMD64_D= R1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:=0A+ hvm_cpuid(0x8000= 0001, NULL, NULL, &ecx, NULL);=0A+ if ( !test_bit(X86_FEATURE_DBEXT = & 31, &ecx) || (msr_content >> 32) )=0A+ goto gpf;=0A+ = v->arch.hvm_svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1] =3D=0A+ = msr_content;=0A+ break;=0A+=0A case MSR_AMD_OSVW_ID_LENGTH= :=0A case MSR_AMD_OSVW_STATUS:=0A ret =3D svm_handle_osvw(v, = msr, &msr_content, 0);=0A@@ -2022,6 +2156,9 @@ static struct hvm_function_t= able __initd=0A .vcpu_destroy =3D svm_vcpu_destroy,=0A = .save_cpu_ctxt =3D svm_save_vmcb_ctxt,=0A .load_cpu_ctxt = =3D svm_load_vmcb_ctxt,=0A+ .init_msr =3D svm_init_msr,=0A+ = .save_msr =3D svm_save_msr,=0A+ .load_msr = =3D svm_load_msr,=0A .get_interrupt_shadow =3D svm_get_interrupt_shadow= ,=0A .set_interrupt_shadow =3D svm_set_interrupt_shadow,=0A = .guest_x86_mode =3D svm_guest_x86_mode,=0A--- a/xen/include/asm-x86/c= pufeature.h=0A+++ b/xen/include/asm-x86/cpufeature.h=0A@@ -134,6 +134,7 = @@=0A #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */=0A = #define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations = */=0A #define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions = CPUID leafs */=0A+#define X86_FEATURE_DBEXT (6*32+26) /* data = breakpoint extension */=0A =0A /* Intel-defined CPU features, CPUID level = 0x00000007:0 (ebx), word 7 */=0A #define X86_FEATURE_FSGSBASE (7*32+ 0) = /* {RD,WR}{FS,GS}BASE instructions */=0A--- a/xen/include/asm-x86/hvm/svm/v= mcb.h=0A+++ b/xen/include/asm-x86/hvm/svm/vmcb.h=0A@@ -515,6 +515,9 @@ = struct arch_svm_struct {=0A uint64_t guest_lwp_cfg; /* guest = version */=0A uint64_t cpu_lwp_cfg; /* CPU version */=0A =0A+ = /* data breakpoint extension MSRs */=0A+ uint32_t dr_mask[4];=0A+=0A = /* OSVW MSRs */=0A struct {=0A u64 length;=0A--- a/xen/includ= e/asm-x86/hvm/vcpu.h=0A+++ b/xen/include/asm-x86/hvm/vcpu.h=0A@@ -140,7 = +140,7 @@ struct hvm_vcpu {=0A =0A int xen_port;=0A = =0A- bool_t flag_dr_dirty;=0A+ u8 = flag_dr_dirty;=0A bool_t debug_state_latch;=0A bool_t = single_step;=0A =0A--- a/xen/include/asm-x86/msr-index.h=0A+++= b/xen/include/asm-x86/msr-index.h=0A@@ -206,6 +206,11 @@=0A #define = MSR_AMD64_DC_CFG 0xc0011022=0A #define AMD64_NB_CFG_CF8_EXT_= ENABLE_BIT 46=0A =0A+#define MSR_AMD64_DR0_ADDRESS_MASK 0xc0011027= =0A+#define MSR_AMD64_DR1_ADDRESS_MASK 0xc0011019=0A+#define MSR_AMD64_DR2= _ADDRESS_MASK 0xc001101a=0A+#define MSR_AMD64_DR3_ADDRESS_MASK = 0xc001101b=0A+=0A /* AMD Family10h machine check MSRs */=0A #define = MSR_F10_MC4_MISC1 0xc0000408=0A #define MSR_F10_MC4_MISC2 = 0xc0000409=0A --=__Part11230D1A.2__= Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Disposition: inline _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel --=__Part11230D1A.2__=--