* [PATCH 1/6] x86/xsaves: enable xsaves/xrstors for pv guest
2015-07-02 14:02 [PATCH 0/6] add xsaves/xrstors support Shuai Ruan
@ 2015-07-02 14:02 ` Shuai Ruan
2015-07-02 14:02 ` [PATCH 2/6] x86/xsaves: enable xsaves/xrstors in xen Shuai Ruan
` (5 subsequent siblings)
6 siblings, 0 replies; 16+ messages in thread
From: Shuai Ruan @ 2015-07-02 14:02 UTC (permalink / raw)
To: xen-devel
Cc: kevin.tian, wei.liu2, Ian.Campbell, stefano.stabellini,
jun.nakajima, andrew.cooper3, ian.jackson, eddie.dong, jbeulich,
keir
This patch emualtes xsaves/xrstors instruction and
XSS msr access.
As xsaves/xrstors instructions and XSS msr access
required be executed only in ring0. So emulation is
needed when pv guest use these instructions.
Signed-off-by: Shuai Ruan <shuai.ruan@intel.com>
---
xen/arch/x86/domain.c | 3 ++
xen/arch/x86/traps.c | 85 +++++++++++++++++++++++++++++++++++++++++
xen/arch/x86/x86_64/mm.c | 52 +++++++++++++++++++++++++
xen/arch/x86/xstate.c | 39 +++++++++++++++++++
xen/include/asm-x86/domain.h | 1 +
xen/include/asm-x86/mm.h | 1 +
xen/include/asm-x86/msr-index.h | 2 +
xen/include/asm-x86/xstate.h | 3 ++
8 files changed, 186 insertions(+)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index a8fe046..66f8231 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -426,6 +426,7 @@ int vcpu_initialise(struct vcpu *v)
/* By default, do not emulate */
v->arch.vm_event.emulate_flags = 0;
+ v->arch.msr_ia32_xss = 0;
rc = mapcache_vcpu_init(v);
if ( rc )
@@ -1494,6 +1495,8 @@ static void __context_switch(void)
if ( xcr0 != get_xcr0() && !set_xcr0(xcr0) )
BUG();
}
+ if ( cpu_has_xsaves )
+ wrmsr_safe(MSR_IA32_XSS, n->arch.msr_ia32_xss);
vcpu_restore_fpu_eager(n);
n->arch.ctxt_switch_to(n);
}
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index ac62f20..227670b 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2346,6 +2346,80 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
}
break;
+ case 0xc7:
+ {
+ void *xsave_addr;
+ int not_page_aligned = 0;
+ u32 guest_xsaves_size = xstate_ctxt_size_compact(v->arch.xcr0);
+
+ switch ( insn_fetch(u8, code_base, eip, code_limit) )
+ {
+ case 0x2f:/* XSAVES */
+ {
+ if ( (regs->edi & ~PAGE_MASK) + guest_xsaves_size > PAGE_SIZE )
+ {
+ mfn_t mfn_list[2];
+ void *va;
+
+ not_page_aligned = 1;
+ mfn_list[0] = _mfn(do_page_walk_mfn(v, regs->edi));
+ mfn_list[1] = _mfn(do_page_walk_mfn(v,
+ PAGE_ALIGN(regs->edi)));
+ va = __vmap(mfn_list, 1, 2, PAGE_SIZE, PAGE_HYPERVISOR);
+ ASSERT(((unsigned long) va & ~PAGE_MASK) == 0);
+ xsave_addr = (void *)((unsigned long)va +
+ (regs->edi & ~PAGE_MASK));
+ }
+ else
+ xsave_addr = do_page_walk(v, regs->edi);
+
+ if ( !xsave_addr )
+ goto fail;
+
+ xsaves(regs->eax, regs->edx, xsave_addr);
+
+ if ( not_page_aligned )
+ vunmap((void *)((unsigned long)xsave_addr & PAGE_MASK));
+ else
+ unmap_domain_page(xsave_addr);
+ break;
+ }
+ case 0x1f:/* XRSTORS */
+ {
+ if( (regs->edi & ~PAGE_MASK) + guest_xsaves_size > PAGE_SIZE )
+ {
+ mfn_t mfn_list[2];
+ void *va;
+
+ not_page_aligned = 1;
+ mfn_list[0] = _mfn(do_page_walk_mfn(v, regs->edi));
+ mfn_list[1] = _mfn(do_page_walk_mfn(v,
+ PAGE_ALIGN(regs->edi)));
+ va = __vmap(mfn_list, 1, 2, PAGE_SIZE, PAGE_HYPERVISOR);
+ ASSERT(((unsigned long) va & ~PAGE_MASK) == 0);
+ xsave_addr = (void *)((unsigned long)va +
+ (regs->edi & ~PAGE_MASK));
+ }
+ else
+ xsave_addr = do_page_walk(v, regs->edi);
+
+ if ( !xsave_addr )
+ goto fail;
+
+ xrstors(regs->eax, regs->edx, xsave_addr);
+
+ if ( not_page_aligned )
+ vunmap((void *)((unsigned long)xsave_addr & PAGE_MASK));
+ else
+ unmap_domain_page(xsave_addr);
+ break;
+ }
+ default:
+ goto fail;
+ }
+ break;
+ }
+
case 0x06: /* CLTS */
(void)do_fpu_taskswitch(0);
break;
@@ -2638,6 +2712,12 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
wrmsrl(regs->_ecx, msr_content);
break;
+ case MSR_IA32_XSS:
+ if ( wrmsr_safe(regs->ecx, msr_content) != 0 )
+ goto fail;
+ v->arch.msr_ia32_xss = msr_content;
+ break;
+
default:
if ( wrmsr_hypervisor_regs(regs->ecx, msr_content) == 1 )
break;
@@ -2740,6 +2820,11 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
regs->edx = 0;
break;
+ case MSR_IA32_XSS:
+ regs->eax = v->arch.msr_ia32_xss;
+ regs->edx = v->arch.msr_ia32_xss >> 32;
+ break;
+
default:
if ( rdmsr_hypervisor_regs(regs->ecx, &val) )
goto rdmsr_writeback;
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 3ef4618..f64aa08 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -48,6 +48,58 @@ l2_pgentry_t __section(".bss.page_aligned") l2_bootmap[L2_PAGETABLE_ENTRIES];
l2_pgentry_t *compat_idle_pg_table_l2;
+unsigned long do_page_walk_mfn(struct vcpu *v, unsigned long addr)
+{
+ unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
+ l4_pgentry_t l4e, *l4t;
+ l3_pgentry_t l3e, *l3t;
+ l2_pgentry_t l2e, *l2t;
+ l1_pgentry_t l1e, *l1t;
+
+ if ( !is_pv_vcpu(v) || !is_canonical_address(addr) )
+ return 0;
+
+ l4t = map_domain_page(mfn);
+ l4e = l4t[l4_table_offset(addr)];
+ unmap_domain_page(l4t);
+ if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
+ return 0;
+
+ l3t = map_l3t_from_l4e(l4e);
+ l3e = l3t[l3_table_offset(addr)];
+ unmap_domain_page(l3t);
+ mfn = l3e_get_pfn(l3e);
+ if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
+ return 0;
+ if ( (l3e_get_flags(l3e) & _PAGE_PSE) )
+ {
+ mfn += PFN_DOWN(addr & ((1UL << L3_PAGETABLE_SHIFT) - 1));
+ goto ret;
+ }
+
+ l2t = map_domain_page(mfn);
+ l2e = l2t[l2_table_offset(addr)];
+ unmap_domain_page(l2t);
+ mfn = l2e_get_pfn(l2e);
+ if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
+ return 0;
+ if ( (l2e_get_flags(l2e) & _PAGE_PSE) )
+ {
+ mfn += PFN_DOWN(addr & ((1UL << L2_PAGETABLE_SHIFT) - 1));
+ goto ret;
+ }
+
+ l1t = map_domain_page(mfn);
+ l1e = l1t[l1_table_offset(addr)];
+ unmap_domain_page(l1t);
+ mfn = l1e_get_pfn(l1e);
+ if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
+ return 0;
+
+ ret:
+ return mfn;
+}
+
void *do_page_walk(struct vcpu *v, unsigned long addr)
{
unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index d5f5e3b..e34eda3 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -65,6 +65,31 @@ uint64_t get_xcr0(void)
return this_cpu(xcr0);
}
+void xsaves(uint32_t lmask, uint32_t hmask, struct xsave_struct *ptr)
+{
+ asm volatile ( ".byte 0x48,0x0f,0xc7,0x2f"
+ : "=m" (*ptr)
+ : "a" (lmask), "d" (hmask), "D" (ptr) );
+}
+
+void xrstors(uint32_t lmask, uint32_t hmask, struct xsave_struct *ptr)
+{
+ asm volatile ( "1: .byte 0x48,0x0f,0xc7,0x1f\n"
+ ".section .fixup,\"ax\" \n"
+ "2: mov %5,%%ecx \n"
+ " xor %1,%1 \n"
+ " rep stosb \n"
+ " lea %2,%0 \n"
+ " mov %3,%1 \n"
+ " jmp 1b \n"
+ ".previous \n"
+ _ASM_EXTABLE(1b, 2b)
+ : "+&D" (ptr), "+&a" (lmask)
+ : "m" (*ptr), "g" (lmask), "d" (hmask),
+ "m" (xsave_cntxt_size)
+ : "ecx" );
+}
+
void xsave(struct vcpu *v, uint64_t mask)
{
struct xsave_struct *ptr = v->arch.xsave_area;
@@ -268,6 +293,20 @@ static unsigned int _xstate_ctxt_size(u64 xcr0)
return ebx;
}
+unsigned int xstate_ctxt_size_compact(u64 xcr0)
+{
+ u64 act_xcr0 = get_xcr0();
+ u32 eax, ebx = 0, ecx, edx;
+ bool_t ok = set_xcr0(xcr0);
+
+ ASSERT(ok);
+ cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
+ ok = set_xcr0(act_xcr0);
+ ASSERT(ok);
+
+ return ebx;
+}
+
/* Fastpath for common xstate size requests, avoiding reloads of xcr0. */
unsigned int xstate_ctxt_size(u64 xcr0)
{
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 96bde65..bcea9d4 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -473,6 +473,7 @@ struct arch_vcpu
*/
struct xsave_struct *xsave_area;
uint64_t xcr0;
+ u64 msr_ia32_xss;
/* Accumulated eXtended features mask for using XSAVE/XRESTORE by Xen
* itself, as we can never know whether guest OS depends on content
* preservation whenever guest OS clears one feature flag (for example,
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 8595c38..94a590e 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -524,6 +524,7 @@ void make_cr3(struct vcpu *v, unsigned long mfn);
void update_cr3(struct vcpu *v);
int vcpu_destroy_pagetables(struct vcpu *);
struct trap_bounce *propagate_page_fault(unsigned long addr, u16 error_code);
+unsigned long do_page_walk_mfn(struct vcpu *v, unsigned long addr);
void *do_page_walk(struct vcpu *v, unsigned long addr);
int __sync_local_execstate(void);
diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
index 83f2f70..9564113 100644
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -58,6 +58,8 @@
#define MSR_IA32_BNDCFGS 0x00000D90
+#define MSR_IA32_XSS 0x00000da0
+
#define MSR_MTRRfix64K_00000 0x00000250
#define MSR_MTRRfix16K_80000 0x00000258
#define MSR_MTRRfix16K_A0000 0x00000259
diff --git a/xen/include/asm-x86/xstate.h b/xen/include/asm-x86/xstate.h
index 4c690db..59c7156 100644
--- a/xen/include/asm-x86/xstate.h
+++ b/xen/include/asm-x86/xstate.h
@@ -82,6 +82,8 @@ struct __packed __attribute__((aligned (64))) xsave_struct
/* extended state operations */
bool_t __must_check set_xcr0(u64 xfeatures);
uint64_t get_xcr0(void);
+void xsaves(uint32_t lmask, uint32_t hmask, struct xsave_struct *ptr);
+void xrstors(uint32_t lmask, uint32_t hmask, struct xsave_struct *ptr);
void xsave(struct vcpu *v, uint64_t mask);
void xrstor(struct vcpu *v, uint64_t mask);
bool_t xsave_enabled(const struct vcpu *v);
@@ -92,6 +94,7 @@ int __must_check handle_xsetbv(u32 index, u64 new_bv);
void xstate_free_save_area(struct vcpu *v);
int xstate_alloc_save_area(struct vcpu *v);
void xstate_init(bool_t bsp);
+unsigned int xstate_ctxt_size_compact(u64 xcr0);
unsigned int xstate_ctxt_size(u64 xcr0);
#endif /* __ASM_XSTATE_H */
--
1.9.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [PATCH 2/6] x86/xsaves: enable xsaves/xrstors in xen
2015-07-02 14:02 [PATCH 0/6] add xsaves/xrstors support Shuai Ruan
2015-07-02 14:02 ` [PATCH 1/6] x86/xsaves: enable xsaves/xrstors for pv guest Shuai Ruan
@ 2015-07-02 14:02 ` Shuai Ruan
2015-07-02 14:02 ` [PATCH 3/6] x86/xsaves: enable xsaves/xrstors for hvm guest Shuai Ruan
` (4 subsequent siblings)
6 siblings, 0 replies; 16+ messages in thread
From: Shuai Ruan @ 2015-07-02 14:02 UTC (permalink / raw)
To: xen-devel
Cc: kevin.tian, wei.liu2, Ian.Campbell, stefano.stabellini,
jun.nakajima, andrew.cooper3, ian.jackson, eddie.dong, jbeulich,
keir
This patch uses xsaves/xrstors instead of xsaveopt/xrstor
when perform task switch in xen if the feature is supported
in hardware.
Please note that xsaves/xrstors only use compact format.
Signed-off-by: Shuai Ruan <shuai.ruan@intel.com>
---
xen/arch/x86/xstate.c | 83 ++++++++++++++++++++++++++++----------------
xen/include/asm-x86/xstate.h | 3 +-
2 files changed, 55 insertions(+), 31 deletions(-)
diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index e34eda3..ff67986 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -14,6 +14,7 @@
#include <asm/xstate.h>
#include <asm/asm_defns.h>
+#define XSTATE_COMPACTION_ENABLED (1ULL << 63)
static bool_t __read_mostly cpu_has_xsaveopt;
static bool_t __read_mostly cpu_has_xsavec;
bool_t __read_mostly cpu_has_xgetbv1;
@@ -102,7 +103,9 @@ void xsave(struct vcpu *v, uint64_t mask)
typeof(ptr->fpu_sse.fip.sel) fcs = ptr->fpu_sse.fip.sel;
typeof(ptr->fpu_sse.fdp.sel) fds = ptr->fpu_sse.fdp.sel;
- if ( cpu_has_xsaveopt )
+ if ( cpu_has_xsaves )
+ xsaves(lmask, hmask, ptr);
+ else if ( cpu_has_xsaveopt )
{
/*
* xsaveopt may not write the FPU portion even when the respective
@@ -155,7 +158,9 @@ void xsave(struct vcpu *v, uint64_t mask)
}
else
{
- if ( cpu_has_xsaveopt )
+ if ( cpu_has_xsaves )
+ xsaves(lmask, hmask, ptr);
+ else if ( cpu_has_xsaveopt )
asm volatile ( ".byte 0x0f,0xae,0x37"
: "=m" (*ptr)
: "a" (lmask), "d" (hmask), "D" (ptr) );
@@ -198,36 +203,54 @@ void xrstor(struct vcpu *v, uint64_t mask)
switch ( __builtin_expect(ptr->fpu_sse.x[FPU_WORD_SIZE_OFFSET], 8) )
{
default:
- asm volatile ( "1: .byte 0x48,0x0f,0xae,0x2f\n"
- ".section .fixup,\"ax\" \n"
- "2: mov %5,%%ecx \n"
- " xor %1,%1 \n"
- " rep stosb \n"
- " lea %2,%0 \n"
- " mov %3,%1 \n"
- " jmp 1b \n"
- ".previous \n"
- _ASM_EXTABLE(1b, 2b)
- : "+&D" (ptr), "+&a" (lmask)
- : "m" (*ptr), "g" (lmask), "d" (hmask),
- "m" (xsave_cntxt_size)
- : "ecx" );
+ if ( cpu_has_xsaves )
+ {
+ if ( !(v->arch.xsave_area->xsave_hdr.xcomp_bv &
+ XSTATE_COMPACTION_ENABLED) )
+ v->arch.xsave_area->xsave_hdr.xcomp_bv = get_xcr0() |
+ XSTATE_COMPACTION_ENABLED;
+ xrstors(lmask, hmask, ptr);
+ }
+ else
+ asm volatile ( "1: .byte 0x48,0x0f,0xae,0x2f\n"
+ ".section .fixup,\"ax\" \n"
+ "2: mov %5,%%ecx \n"
+ " xor %1,%1 \n"
+ " rep stosb \n"
+ " lea %2,%0 \n"
+ " mov %3,%1 \n"
+ " jmp 1b \n"
+ ".previous \n"
+ _ASM_EXTABLE(1b, 2b)
+ : "+&D" (ptr), "+&a" (lmask)
+ : "m" (*ptr), "g" (lmask), "d" (hmask),
+ "m" (xsave_cntxt_size)
+ : "ecx" );
break;
case 4: case 2:
- asm volatile ( "1: .byte 0x0f,0xae,0x2f\n"
- ".section .fixup,\"ax\" \n"
- "2: mov %5,%%ecx \n"
- " xor %1,%1 \n"
- " rep stosb \n"
- " lea %2,%0 \n"
- " mov %3,%1 \n"
- " jmp 1b \n"
- ".previous \n"
- _ASM_EXTABLE(1b, 2b)
- : "+&D" (ptr), "+&a" (lmask)
- : "m" (*ptr), "g" (lmask), "d" (hmask),
- "m" (xsave_cntxt_size)
- : "ecx" );
+ if ( cpu_has_xsaves )
+ {
+ if ( !(v->arch.xsave_area->xsave_hdr.xcomp_bv &
+ XSTATE_COMPACTION_ENABLED) )
+ v->arch.xsave_area->xsave_hdr.xcomp_bv = get_xcr0() |
+ XSTATE_COMPACTION_ENABLED;
+ xrstors(lmask, hmask, ptr);
+ }
+ else
+ asm volatile ( "1: .byte 0x48,0x0f,0xae,0x2f\n"
+ ".section .fixup,\"ax\" \n"
+ "2: mov %5,%%ecx \n"
+ " xor %1,%1 \n"
+ " rep stosb \n"
+ " lea %2,%0 \n"
+ " mov %3,%1 \n"
+ " jmp 1b \n"
+ ".previous \n"
+ _ASM_EXTABLE(1b, 2b)
+ : "+&D" (ptr), "+&a" (lmask)
+ : "m" (*ptr), "g" (lmask), "d" (hmask),
+ "m" (xsave_cntxt_size)
+ : "ecx" );
break;
}
}
diff --git a/xen/include/asm-x86/xstate.h b/xen/include/asm-x86/xstate.h
index 59c7156..d03d824 100644
--- a/xen/include/asm-x86/xstate.h
+++ b/xen/include/asm-x86/xstate.h
@@ -72,7 +72,8 @@ struct __packed __attribute__((aligned (64))) xsave_struct
struct {
u64 xstate_bv;
- u64 reserved[7];
+ u64 xcomp_bv;
+ u64 reserved[6];
} xsave_hdr; /* The 64-byte header */
struct { char x[XSTATE_YMM_SIZE]; } ymm; /* YMM */
--
1.9.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [PATCH 3/6] x86/xsaves: enable xsaves/xrstors for hvm guest
2015-07-02 14:02 [PATCH 0/6] add xsaves/xrstors support Shuai Ruan
2015-07-02 14:02 ` [PATCH 1/6] x86/xsaves: enable xsaves/xrstors for pv guest Shuai Ruan
2015-07-02 14:02 ` [PATCH 2/6] x86/xsaves: enable xsaves/xrstors in xen Shuai Ruan
@ 2015-07-02 14:02 ` Shuai Ruan
2015-07-03 8:04 ` Chao Peng
2015-07-02 14:02 ` [PATCH 4/6] libxc: expose xsaves/xgetbv/xsavec to " Shuai Ruan
` (3 subsequent siblings)
6 siblings, 1 reply; 16+ messages in thread
From: Shuai Ruan @ 2015-07-02 14:02 UTC (permalink / raw)
To: xen-devel
Cc: kevin.tian, wei.liu2, Ian.Campbell, stefano.stabellini,
jun.nakajima, andrew.cooper3, ian.jackson, eddie.dong, jbeulich,
keir
This patch enables xsaves for hvm guest, includes:
1.handle xsaves vmcs init and vmexit.
2.add logic to write/read the XSS msr.
Signed-off-by: Shuai Ruan <shuai.ruan@intel.com>
---
xen/arch/x86/hvm/hvm.c | 40 ++++++++++++++++++++++++++++++++++++++
xen/arch/x86/hvm/vmx/vmcs.c | 7 ++++++-
xen/arch/x86/hvm/vmx/vmx.c | 18 +++++++++++++++++
xen/arch/x86/xstate.c | 4 ++--
xen/include/asm-x86/hvm/vmx/vmcs.h | 5 +++++
xen/include/asm-x86/hvm/vmx/vmx.h | 2 ++
xen/include/asm-x86/xstate.h | 1 +
7 files changed, 74 insertions(+), 3 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 535d622..2958e0d 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4269,6 +4269,10 @@ void hvm_hypervisor_cpuid_leaf(uint32_t sub_idx,
}
}
+#define XSAVEOPT (1 << 0)
+#define XSAVEC (1 << 1)
+#define XGETBV (1 << 2)
+#define XSAVES (1 << 3)
void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
@@ -4355,6 +4359,34 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
*ebx = _eax + _ebx;
}
}
+ if ( count == 1 )
+ {
+ if ( cpu_has_xsaves )
+ {
+ *ebx = XSTATE_AREA_MIN_SIZE;
+ if ( v->arch.xcr0 | v->arch.msr_ia32_xss )
+ for ( sub_leaf = 2; sub_leaf < 63; sub_leaf++ )
+ {
+ if ( !((v->arch.xcr0 | v->arch.msr_ia32_xss)
+ & (1ULL << sub_leaf)) )
+ continue;
+ domain_cpuid(d, input, sub_leaf, &_eax, &_ebx, &_ecx,
+ &_edx);
+ *ebx = *ebx + _eax;
+ }
+ }
+ else
+ {
+ *eax &= ~XSAVES;
+ if ( !cpu_has_xgetbv1 )
+ *eax &= ~XGETBV;
+ if ( !cpu_has_xsavec )
+ *eax &= ~XSAVEC;
+ if ( !cpu_has_xsaveopt )
+ *eax &= ~XSAVEOPT;
+ *ebx = *ecx = *edx = 0;
+ }
+ }
break;
case 0x80000001:
@@ -4454,6 +4486,10 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
*msr_content = v->arch.hvm_vcpu.guest_efer;
break;
+ case MSR_IA32_XSS:
+ *msr_content = v->arch.msr_ia32_xss;
+ break;
+
case MSR_IA32_TSC:
*msr_content = _hvm_rdtsc_intercept();
break;
@@ -4573,6 +4609,10 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
return X86EMUL_EXCEPTION;
break;
+ case MSR_IA32_XSS:
+ v->arch.msr_ia32_xss = msr_content;
+ break;
+
case MSR_IA32_TSC:
hvm_set_guest_tsc(v, msr_content);
break;
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 4c5ceb5..8e61e3f 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -230,7 +230,8 @@ static int vmx_init_vmcs_config(void)
SECONDARY_EXEC_ENABLE_EPT |
SECONDARY_EXEC_ENABLE_RDTSCP |
SECONDARY_EXEC_PAUSE_LOOP_EXITING |
- SECONDARY_EXEC_ENABLE_INVPCID);
+ SECONDARY_EXEC_ENABLE_INVPCID |
+ SECONDARY_EXEC_XSAVES);
rdmsrl(MSR_IA32_VMX_MISC, _vmx_misc_cap);
if ( _vmx_misc_cap & VMX_MISC_VMWRITE_ALL )
opt |= SECONDARY_EXEC_ENABLE_VMCS_SHADOWING;
@@ -921,6 +922,7 @@ void virtual_vmcs_vmwrite(void *vvmcs, u32 vmcs_encoding, u64 val)
virtual_vmcs_exit(vvmcs);
}
+#define VMX_XSS_EXIT_BITMAP 0
static int construct_vmcs(struct vcpu *v)
{
struct domain *d = v->domain;
@@ -1204,6 +1206,9 @@ static int construct_vmcs(struct vcpu *v)
__vmwrite(GUEST_PAT, guest_pat);
}
+ if ( cpu_has_vmx_xsaves )
+ __vmwrite(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
+
vmx_vmcs_exit(v);
/* PVH: paging mode is updated by arch_set_info_guest(). */
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index fc29b89..7c950b3 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2683,6 +2683,16 @@ static int vmx_handle_apic_write(void)
return vlapic_apicv_write(current, exit_qualification & 0xfff);
}
+static void vmx_handle_xsaves(void)
+{
+ WARN();
+}
+
+static void vmx_handle_xrstors(void)
+{
+ WARN();
+}
+
void vmx_vmexit_handler(struct cpu_user_regs *regs)
{
unsigned long exit_qualification, exit_reason, idtv_info, intr_info = 0;
@@ -3201,6 +3211,14 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
vmx_vcpu_flush_pml_buffer(v);
break;
+ case EXIT_REASON_XSAVES:
+ vmx_handle_xsaves();
+ break;
+
+ case EXIT_REASON_XRSTORS:
+ vmx_handle_xrstors();
+ break;
+
case EXIT_REASON_ACCESS_GDTR_OR_IDTR:
case EXIT_REASON_ACCESS_LDTR_OR_TR:
case EXIT_REASON_VMX_PREEMPTION_TIMER_EXPIRED:
diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index ff67986..73a16b9 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -15,8 +15,8 @@
#include <asm/asm_defns.h>
#define XSTATE_COMPACTION_ENABLED (1ULL << 63)
-static bool_t __read_mostly cpu_has_xsaveopt;
-static bool_t __read_mostly cpu_has_xsavec;
+bool_t __read_mostly cpu_has_xsaveopt;
+bool_t __read_mostly cpu_has_xsavec;
bool_t __read_mostly cpu_has_xgetbv1;
bool_t __read_mostly cpu_has_xsaves;
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 1104bda..2687634 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -225,6 +225,7 @@ extern u32 vmx_vmentry_control;
#define SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000
#define SECONDARY_EXEC_ENABLE_VMCS_SHADOWING 0x00004000
#define SECONDARY_EXEC_ENABLE_PML 0x00020000
+#define SECONDARY_EXEC_XSAVES 0x00100000
extern u32 vmx_secondary_exec_control;
#define VMX_EPT_EXEC_ONLY_SUPPORTED 0x00000001
@@ -287,6 +288,8 @@ extern u32 vmx_secondary_exec_control;
(vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VMCS_SHADOWING)
#define cpu_has_vmx_pml \
(vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_PML)
+#define cpu_has_vmx_xsaves \
+ (vmx_secondary_exec_control & SECONDARY_EXEC_XSAVES)
#define VMCS_RID_TYPE_MASK 0x80000000
@@ -356,6 +359,8 @@ enum vmcs_field {
#define EOI_EXIT_BITMAP(n) (EOI_EXIT_BITMAP0 + (n) * 2) /* n = 0...3 */
VMREAD_BITMAP = 0x00002026,
VMWRITE_BITMAP = 0x00002028,
+ XSS_EXIT_BITMAP = 0x0000202c,
+ XSS_EXIT_BITMAP_HIGH = 0x0000202d,
GUEST_PHYSICAL_ADDRESS = 0x00002400,
VMCS_LINK_POINTER = 0x00002800,
GUEST_IA32_DEBUGCTL = 0x00002802,
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
index 35f804a..b4c5e73 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -187,6 +187,8 @@ static inline unsigned long pi_get_pir(struct pi_desc *pi_desc, int group)
#define EXIT_REASON_APIC_WRITE 56
#define EXIT_REASON_INVPCID 58
#define EXIT_REASON_PML_FULL 62
+#define EXIT_REASON_XSAVES 63
+#define EXIT_REASON_XRSTORS 64
/*
* Interruption-information format
diff --git a/xen/include/asm-x86/xstate.h b/xen/include/asm-x86/xstate.h
index d03d824..1357063 100644
--- a/xen/include/asm-x86/xstate.h
+++ b/xen/include/asm-x86/xstate.h
@@ -44,6 +44,7 @@
extern u64 xfeature_mask;
extern bool_t cpu_has_xsaves, cpu_has_xgetbv1;
+extern bool_t cpu_has_xsavec, cpu_has_xsaveopt;
/* extended state save area */
struct __packed __attribute__((aligned (64))) xsave_struct
--
1.9.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* Re: [PATCH 3/6] x86/xsaves: enable xsaves/xrstors for hvm guest
2015-07-02 14:02 ` [PATCH 3/6] x86/xsaves: enable xsaves/xrstors for hvm guest Shuai Ruan
@ 2015-07-03 8:04 ` Chao Peng
0 siblings, 0 replies; 16+ messages in thread
From: Chao Peng @ 2015-07-03 8:04 UTC (permalink / raw)
To: Shuai Ruan
Cc: kevin.tian, wei.liu2, Ian.Campbell, stefano.stabellini,
andrew.cooper3, ian.jackson, xen-devel, jbeulich, eddie.dong,
jun.nakajima, keir
On Thu, Jul 02, 2015 at 10:02:28PM +0800, Shuai Ruan wrote:
> This patch enables xsaves for hvm guest, includes:
> 1.handle xsaves vmcs init and vmexit.
> 2.add logic to write/read the XSS msr.
>
> Signed-off-by: Shuai Ruan <shuai.ruan@intel.com>
> ---
> xen/arch/x86/hvm/hvm.c | 40 ++++++++++++++++++++++++++++++++++++++
> xen/arch/x86/hvm/vmx/vmcs.c | 7 ++++++-
> xen/arch/x86/hvm/vmx/vmx.c | 18 +++++++++++++++++
> xen/arch/x86/xstate.c | 4 ++--
> xen/include/asm-x86/hvm/vmx/vmcs.h | 5 +++++
> xen/include/asm-x86/hvm/vmx/vmx.h | 2 ++
> xen/include/asm-x86/xstate.h | 1 +
> 7 files changed, 74 insertions(+), 3 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 535d622..2958e0d 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -4269,6 +4269,10 @@ void hvm_hypervisor_cpuid_leaf(uint32_t sub_idx,
> }
> }
>
> +#define XSAVEOPT (1 << 0)
> +#define XSAVEC (1 << 1)
> +#define XGETBV (1 << 2)
> +#define XSAVES (1 << 3)
Hard tab is used.
> void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
> unsigned int *ecx, unsigned int *edx)
> {
> @@ -4355,6 +4359,34 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
> *ebx = _eax + _ebx;
> }
> }
> + if ( count == 1 )
> + {
> + if ( cpu_has_xsaves )
> + {
> + *ebx = XSTATE_AREA_MIN_SIZE;
> + if ( v->arch.xcr0 | v->arch.msr_ia32_xss )
> + for ( sub_leaf = 2; sub_leaf < 63; sub_leaf++ )
> + {
> + if ( !((v->arch.xcr0 | v->arch.msr_ia32_xss)
> + & (1ULL << sub_leaf)) )
alignment.
> + continue;
> + domain_cpuid(d, input, sub_leaf, &_eax, &_ebx, &_ecx,
> + &_edx);
> + *ebx = *ebx + _eax;
> + }
> + }
> + else
> + {
> + *eax &= ~XSAVES;
> + if ( !cpu_has_xgetbv1 )
> + *eax &= ~XGETBV;
> + if ( !cpu_has_xsavec )
> + *eax &= ~XSAVEC;
> + if ( !cpu_has_xsaveopt )
> + *eax &= ~XSAVEOPT;
I think these features(XGETBV/XSAVEC/XSAVEOPT) should be independent
of XSAVES, e.g. here they should not be enclosed with 'else' statement.
Chao
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH 4/6] libxc: expose xsaves/xgetbv/xsavec to hvm guest
2015-07-02 14:02 [PATCH 0/6] add xsaves/xrstors support Shuai Ruan
` (2 preceding siblings ...)
2015-07-02 14:02 ` [PATCH 3/6] x86/xsaves: enable xsaves/xrstors for hvm guest Shuai Ruan
@ 2015-07-02 14:02 ` Shuai Ruan
2015-07-13 16:22 ` Jan Beulich
2015-07-02 14:02 ` [PATCH 5/6] x86/xsaves: support compact format for hvm save/restore Shuai Ruan
` (2 subsequent siblings)
6 siblings, 1 reply; 16+ messages in thread
From: Shuai Ruan @ 2015-07-02 14:02 UTC (permalink / raw)
To: xen-devel
Cc: kevin.tian, wei.liu2, Ian.Campbell, stefano.stabellini,
jun.nakajima, andrew.cooper3, ian.jackson, eddie.dong, jbeulich,
keir
This patch exposes xsaves/xgetbv/xsavec to hvm guest.
Signed-off-by: Shuai Ruan <shuai.ruan@intel.com>
---
tools/libxc/xc_cpuid_x86.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/tools/libxc/xc_cpuid_x86.c b/tools/libxc/xc_cpuid_x86.c
index c97f91a..0ed8b68 100644
--- a/tools/libxc/xc_cpuid_x86.c
+++ b/tools/libxc/xc_cpuid_x86.c
@@ -211,6 +211,9 @@ static void intel_xc_cpuid_policy(
}
#define XSAVEOPT (1 << 0)
+#define XSAVEC (1 << 1)
+#define XGETBV (1 << 2)
+#define XSAVES (1 << 3)
/* Configure extended state enumeration leaves (0x0000000D for xsave) */
static void xc_cpuid_config_xsave(
xc_interface *xch, domid_t domid, uint64_t xfeature_mask,
@@ -247,8 +250,7 @@ static void xc_cpuid_config_xsave(
regs[1] = 512 + 64; /* FP/SSE + XSAVE.HEADER */
break;
case 1: /* leaf 1 */
- regs[0] &= XSAVEOPT;
- regs[1] = regs[2] = regs[3] = 0;
+ regs[0] &= (XSAVEOPT | XSAVEC | XGETBV | XSAVES);
break;
case 2 ... 63: /* sub-leaves */
if ( !(xfeature_mask & (1ULL << input[1])) )
@@ -256,8 +258,6 @@ static void xc_cpuid_config_xsave(
regs[0] = regs[1] = regs[2] = regs[3] = 0;
break;
}
- /* Don't touch EAX, EBX. Also cleanup ECX and EDX */
- regs[2] = regs[3] = 0;
break;
}
}
--
1.9.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* Re: [PATCH 4/6] libxc: expose xsaves/xgetbv/xsavec to hvm guest
2015-07-02 14:02 ` [PATCH 4/6] libxc: expose xsaves/xgetbv/xsavec to " Shuai Ruan
@ 2015-07-13 16:22 ` Jan Beulich
0 siblings, 0 replies; 16+ messages in thread
From: Jan Beulich @ 2015-07-13 16:22 UTC (permalink / raw)
To: Shuai Ruan
Cc: kevin.tian, wei.liu2, eddie.dong, stefano.stabellini,
andrew.cooper3, ian.jackson, xen-devel, jun.nakajima, keir,
Ian.Campbell
>>> On 02.07.15 at 16:02, <shuai.ruan@intel.com> wrote:
> This patch exposes xsaves/xgetbv/xsavec to hvm guest.
>
> Signed-off-by: Shuai Ruan <shuai.ruan@intel.com>
> ---
> tools/libxc/xc_cpuid_x86.c | 8 ++++----
> 1 file changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/tools/libxc/xc_cpuid_x86.c b/tools/libxc/xc_cpuid_x86.c
> index c97f91a..0ed8b68 100644
> --- a/tools/libxc/xc_cpuid_x86.c
> +++ b/tools/libxc/xc_cpuid_x86.c
> @@ -211,6 +211,9 @@ static void intel_xc_cpuid_policy(
> }
>
> #define XSAVEOPT (1 << 0)
> +#define XSAVEC (1 << 1)
> +#define XGETBV (1 << 2)
This needs a better macro name, as the bit isn't about XGETBV in
general, but the case of it supporting ECX=1 on input.
Jan
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH 5/6] x86/xsaves: support compact format for hvm save/restore
2015-07-02 14:02 [PATCH 0/6] add xsaves/xrstors support Shuai Ruan
` (3 preceding siblings ...)
2015-07-02 14:02 ` [PATCH 4/6] libxc: expose xsaves/xgetbv/xsavec to " Shuai Ruan
@ 2015-07-02 14:02 ` Shuai Ruan
2015-07-02 14:02 ` [PATCH 6/6] x86/xsaves: detect xsaves/xgetbv in xen Shuai Ruan
2015-07-02 14:08 ` [PATCH 0/6] add xsaves/xrstors support Andrew Cooper
6 siblings, 0 replies; 16+ messages in thread
From: Shuai Ruan @ 2015-07-02 14:02 UTC (permalink / raw)
To: xen-devel
Cc: kevin.tian, wei.liu2, Ian.Campbell, stefano.stabellini,
jun.nakajima, andrew.cooper3, ian.jackson, eddie.dong, jbeulich,
keir
xsaves/xrstors only use compat format, so format convertion
is needed when perform save/restore.
Signed-off-by: Shuai Ruan <shuai.ruan@intel.com>
---
xen/arch/x86/hvm/hvm.c | 16 +++--
xen/arch/x86/xstate.c | 137 +++++++++++++++++++++++++++++++++++++++++++
xen/include/asm-x86/xstate.h | 6 ++
3 files changed, 154 insertions(+), 5 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 2958e0d..86d1579 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2045,8 +2045,11 @@ static int hvm_save_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
ctxt->xfeature_mask = xfeature_mask;
ctxt->xcr0 = v->arch.xcr0;
ctxt->xcr0_accum = v->arch.xcr0_accum;
- memcpy(&ctxt->save_area, v->arch.xsave_area,
- size - offsetof(struct hvm_hw_cpu_xsave, save_area));
+ if ( cpu_has_xsaves )
+ save_xsave_states(v, (u8 *)&ctxt->save_area);
+ else
+ memcpy(&ctxt->save_area, v->arch.xsave_area,
+ size - offsetof(struct hvm_hw_cpu_xsave, save_area));
}
return 0;
@@ -2145,9 +2148,12 @@ static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
v->arch.xcr0_accum = ctxt->xcr0_accum;
if ( ctxt->xcr0_accum & XSTATE_NONLAZY )
v->arch.nonlazy_xstate_used = 1;
- memcpy(v->arch.xsave_area, &ctxt->save_area,
- min(desc->length, size) - offsetof(struct hvm_hw_cpu_xsave,
- save_area));
+ if ( cpu_has_xsaves )
+ load_xsave_states(v, (u8 *)&ctxt->save_area);
+ else
+ memcpy(v->arch.xsave_area, &ctxt->save_area,
+ min(desc->length, size) - offsetof(struct hvm_hw_cpu_xsave,
+ save_area));
return 0;
}
diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index 73a16b9..c20f865 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -30,6 +30,9 @@ static u32 __read_mostly xsave_cntxt_size;
/* A 64-bit bitmask of the XSAVE/XRSTOR features supported by processor. */
u64 __read_mostly xfeature_mask;
+static unsigned int *xstate_offsets, *xstate_sizes;
+static unsigned int xstate_features;
+static unsigned int xstate_comp_offsets[sizeof(xfeature_mask)*8];
/* Cached xcr0 for fast read */
static DEFINE_PER_CPU(uint64_t, xcr0);
@@ -66,6 +69,137 @@ uint64_t get_xcr0(void)
return this_cpu(xcr0);
}
+static void setup_xstate_features(void)
+{
+ unsigned int eax, ebx, ecx, edx, leaf = 0x2;
+
+ xstate_features = fls(xfeature_mask);
+ xstate_offsets = _xzalloc(xstate_features, sizeof(int));
+ xstate_sizes = _xzalloc(xstate_features, sizeof(int));
+
+ do {
+ cpuid_count(XSTATE_CPUID, leaf, &eax, &ebx, &ecx, &edx);
+
+ if ( eax == 0 )
+ break;
+
+ xstate_offsets[leaf] = ebx;
+ xstate_sizes[leaf] = eax;
+
+ leaf++;
+ } while (1);
+}
+
+static void setup_xstate_comp(u64 xcr0)
+{
+ unsigned int xstate_comp_sizes[sizeof(xfeature_mask)*8];
+ int i;
+
+ /*
+ * The FP xstates and SSE xstates are legacy states. They are always
+ * in the fixed offsets in the xsave area in either compacted form
+ * or standard form.
+ */
+ xstate_comp_offsets[0] = 0;
+ xstate_comp_offsets[1] = XSAVE_SSE_OFFSET;
+
+ xstate_comp_offsets[2] = FXSAVE_SIZE + XSAVE_HDR_SIZE;
+
+ for (i = 2; i < xstate_features; i++)
+ {
+ if ( 1 << i & xcr0 )
+ xstate_comp_sizes[i] = xstate_sizes[i];
+ else
+ xstate_comp_sizes[i] = 0;
+
+ if ( i > 2 )
+ xstate_comp_offsets[i] = xstate_comp_offsets[i-1]
+ + xstate_comp_sizes[i-1];
+ }
+}
+
+static void *get_xsave_addr(struct xsave_struct *xsave, int xstate)
+{
+ int feature = fls(xstate) - 1;
+ if ( !(1 << feature & xfeature_mask) )
+ return NULL;
+
+ return (void *)xsave + xstate_comp_offsets[feature];
+}
+
+void save_xsave_states(struct vcpu *v, u8 *dest)
+{
+ struct xsave_struct *xsave = v->arch.xsave_area;
+ u64 xstate_bv = xsave->xsave_hdr.xstate_bv;
+ u64 valid;
+
+ setup_xstate_comp(v->arch.xcr0);
+ /*
+ * Copy legacy XSAVE area, to avoid complications with CPUID
+ * leaves 0 and 1 in the loop below.
+ */
+ memcpy(dest, xsave, XSAVE_HDR_OFFSET);
+
+ /* Set XSTATE_BV */
+ *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
+
+ /*
+ * Copy each region from the possibly compacted offset to the
+ * non-compacted offset.
+ */
+ valid = xstate_bv & ~XSTATE_FP_SSE;
+ while ( valid )
+ {
+ u64 feature = valid & -valid;
+ int index = fls(feature) - 1;
+ void *src = get_xsave_addr(xsave, feature);
+
+ if ( src )
+ memcpy(dest + xstate_offsets[index], src, xstate_sizes[index]);
+ else
+ WARN_ON(1);
+
+ valid -= feature;
+ }
+}
+
+void load_xsave_states(struct vcpu *v, u8 *src)
+{
+ struct xsave_struct *xsave = v->arch.xsave_area;
+ u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
+ u64 valid;
+
+ setup_xstate_comp(v->arch.xcr0);
+ /*
+ * Copy legacy XSAVE area, to avoid complications with CPUID
+ * leaves 0 and 1 in the loop below.
+ */
+ memcpy(xsave, src, XSAVE_HDR_OFFSET);
+
+ /* Set XSTATE_BV and possibly XCOMP_BV. */
+ xsave->xsave_hdr.xstate_bv = xstate_bv;
+ xsave->xsave_hdr.xcomp_bv = get_xcr0() | XSTATE_COMPACTION_ENABLED;
+
+ /*
+ * Copy each region from the non-compacted offset to the
+ * possibly compacted offset.
+ */
+ valid = xstate_bv & ~XSTATE_FP_SSE;
+ while ( valid )
+ {
+ u64 feature = valid & -valid;
+ int index = fls(feature) - 1;
+ void *dest = get_xsave_addr(xsave, feature);
+
+ if (dest)
+ memcpy(dest, src + xstate_offsets[index], xstate_sizes[index]);
+ else
+ WARN_ON(1);
+
+ valid -= feature;
+ }
+}
+
void xsaves(uint32_t lmask, uint32_t hmask, struct xsave_struct *ptr)
{
asm volatile ( ".byte 0x48,0x0f,0xc7,0x2f"
@@ -401,6 +535,9 @@ void xstate_init(bool_t bsp)
/* XXX BUG_ON(!cpu_has_xgetbv1 != !(eax & XSTATE_FEATURE_XGETBV1)); */
/* XXX BUG_ON(!cpu_has_xsaves != !(eax & XSTATE_FEATURE_XSAVES)); */
}
+
+ if ( cpu_has_xsaves )
+ setup_xstate_features();
}
static bool_t valid_xcr0(u64 xcr0)
diff --git a/xen/include/asm-x86/xstate.h b/xen/include/asm-x86/xstate.h
index 1357063..38129a1 100644
--- a/xen/include/asm-x86/xstate.h
+++ b/xen/include/asm-x86/xstate.h
@@ -22,7 +22,11 @@
#define XCR_XFEATURE_ENABLED_MASK 0x00000000 /* index of XCR0 */
+#define XSAVE_HDR_SIZE 64
+#define XSAVE_SSE_OFFSET 160
#define XSTATE_YMM_SIZE 256
+#define FXSAVE_SIZE 512
+#define XSAVE_HDR_OFFSET FXSAVE_SIZE
#define XSTATE_AREA_MIN_SIZE (512 + 64) /* FP/SSE + XSAVE.HEADER */
#define XSTATE_FP (1ULL << 0)
@@ -86,6 +90,8 @@ bool_t __must_check set_xcr0(u64 xfeatures);
uint64_t get_xcr0(void);
void xsaves(uint32_t lmask, uint32_t hmask, struct xsave_struct *ptr);
void xrstors(uint32_t lmask, uint32_t hmask, struct xsave_struct *ptr);
+void save_xsave_states(struct vcpu *v, u8 *dest);
+void load_xsave_states(struct vcpu *v, u8 *src);
void xsave(struct vcpu *v, uint64_t mask);
void xrstor(struct vcpu *v, uint64_t mask);
bool_t xsave_enabled(const struct vcpu *v);
--
1.9.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [PATCH 6/6] x86/xsaves: detect xsaves/xgetbv in xen
2015-07-02 14:02 [PATCH 0/6] add xsaves/xrstors support Shuai Ruan
` (4 preceding siblings ...)
2015-07-02 14:02 ` [PATCH 5/6] x86/xsaves: support compact format for hvm save/restore Shuai Ruan
@ 2015-07-02 14:02 ` Shuai Ruan
2015-07-02 14:08 ` [PATCH 0/6] add xsaves/xrstors support Andrew Cooper
6 siblings, 0 replies; 16+ messages in thread
From: Shuai Ruan @ 2015-07-02 14:02 UTC (permalink / raw)
To: xen-devel
Cc: kevin.tian, wei.liu2, Ian.Campbell, stefano.stabellini,
jun.nakajima, andrew.cooper3, ian.jackson, eddie.dong, jbeulich,
keir
As xsaves/xgetbv already support, so switch on.
Signed-off-by: Shuai Ruan <shuai.ruan@intel.com>
---
xen/arch/x86/xstate.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index c20f865..ebf9920 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -525,15 +525,15 @@ void xstate_init(bool_t bsp)
{
cpu_has_xsaveopt = !!(eax & XSTATE_FEATURE_XSAVEOPT);
cpu_has_xsavec = !!(eax & XSTATE_FEATURE_XSAVEC);
- /* XXX cpu_has_xgetbv1 = !!(eax & XSTATE_FEATURE_XGETBV1); */
- /* XXX cpu_has_xsaves = !!(eax & XSTATE_FEATURE_XSAVES); */
+ cpu_has_xgetbv1 = !!(eax & XSTATE_FEATURE_XGETBV1);
+ cpu_has_xsaves = !!(eax & XSTATE_FEATURE_XSAVES);
}
else
{
BUG_ON(!cpu_has_xsaveopt != !(eax & XSTATE_FEATURE_XSAVEOPT));
BUG_ON(!cpu_has_xsavec != !(eax & XSTATE_FEATURE_XSAVEC));
- /* XXX BUG_ON(!cpu_has_xgetbv1 != !(eax & XSTATE_FEATURE_XGETBV1)); */
- /* XXX BUG_ON(!cpu_has_xsaves != !(eax & XSTATE_FEATURE_XSAVES)); */
+ BUG_ON(!cpu_has_xgetbv1 != !(eax & XSTATE_FEATURE_XGETBV1));
+ BUG_ON(!cpu_has_xsaves != !(eax & XSTATE_FEATURE_XSAVES));
}
if ( cpu_has_xsaves )
--
1.9.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* Re: [PATCH 0/6] add xsaves/xrstors support
2015-07-02 14:02 [PATCH 0/6] add xsaves/xrstors support Shuai Ruan
` (5 preceding siblings ...)
2015-07-02 14:02 ` [PATCH 6/6] x86/xsaves: detect xsaves/xgetbv in xen Shuai Ruan
@ 2015-07-02 14:08 ` Andrew Cooper
2015-07-07 1:46 ` Ruan, Shuai
6 siblings, 1 reply; 16+ messages in thread
From: Andrew Cooper @ 2015-07-02 14:08 UTC (permalink / raw)
To: Shuai Ruan, xen-devel
Cc: kevin.tian, wei.liu2, Ian.Campbell, stefano.stabellini,
eddie.dong, ian.jackson, jbeulich, jun.nakajima, keir
On 02/07/15 15:02, Shuai Ruan wrote:
> This patchset enable xsaves/xrstors feature.
> It includes tree parts:
> 1. add xsaves/xrstors for xen.
> 2. add xsaves/xrstors for pv guest.
> 3. add xsaves/xrstors for hvn guest.
What is xsaves/xrstors and why might I want Xen to use it? What
advantages does it give? When might these instructions be available?
Where can I read more details about this?
~Andrew
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH 0/6] add xsaves/xrstors support
2015-07-02 14:08 ` [PATCH 0/6] add xsaves/xrstors support Andrew Cooper
@ 2015-07-07 1:46 ` Ruan, Shuai
2015-07-07 9:26 ` Andrew Cooper
0 siblings, 1 reply; 16+ messages in thread
From: Ruan, Shuai @ 2015-07-07 1:46 UTC (permalink / raw)
To: Andrew Cooper, xen-devel
Cc: Tian, Kevin, wei.liu2, Ian.Campbell, stefano.stabellini, Dong,
Eddie, ian.jackson, jbeulich, Nakajima, Jun, keir
OK,I will add thest.Thanks.
-----Original Message-----
From: Andrew Cooper [mailto:andrew.cooper3@citrix.com]
Sent: Thursday, July 2, 2015 10:09 PM
To: Ruan, Shuai; xen-devel@lists.xen.org
Cc: ian.jackson@eu.citrix.com; Ian.Campbell@citrix.com; stefano.stabellini@eu.citrix.com; wei.liu2@citrix.com; jbeulich@suse.com; Nakajima, Jun; keir@xen.org; Dong, Eddie; Tian, Kevin
Subject: Re: [PATCH 0/6] add xsaves/xrstors support
On 02/07/15 15:02, Shuai Ruan wrote:
> This patchset enable xsaves/xrstors feature.
> It includes tree parts:
> 1. add xsaves/xrstors for xen.
> 2. add xsaves/xrstors for pv guest.
> 3. add xsaves/xrstors for hvn guest.
What is xsaves/xrstors and why might I want Xen to use it? What advantages does it give? When might these instructions be available?
Where can I read more details about this?
~Andrew
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH 0/6] add xsaves/xrstors support
2015-07-07 1:46 ` Ruan, Shuai
@ 2015-07-07 9:26 ` Andrew Cooper
0 siblings, 0 replies; 16+ messages in thread
From: Andrew Cooper @ 2015-07-07 9:26 UTC (permalink / raw)
To: Ruan, Shuai, xen-devel
Cc: Tian, Kevin, wei.liu2, Ian.Campbell, stefano.stabellini, Dong,
Eddie, ian.jackson, jbeulich, Nakajima, Jun, keir
On 07/07/15 02:46, Ruan, Shuai wrote:
> OK,I will add thest.Thanks.
And while you are at it, you need to perform some migration testing to
cover compatibility cases. Doing this will show why we currently do not
use compressed xsave layouts.
~Andrew
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH 1/6] x86/xsaves: enable xsaves/xrstors for pv guest
2015-07-17 7:26 [PATCH v2 " Shuai Ruan
@ 2015-07-17 7:26 ` Shuai Ruan
2015-07-17 16:21 ` Konrad Rzeszutek Wilk
0 siblings, 1 reply; 16+ messages in thread
From: Shuai Ruan @ 2015-07-17 7:26 UTC (permalink / raw)
To: xen-devel
Cc: kevin.tian, wei.liu2, Ian.Campbell, stefano.stabellini,
jun.nakajima, andrew.cooper3, ian.jackson, eddie.dong, jbeulich,
keir
This patch emualtes xsaves/xrstors instructions and
XSS msr access.
As xsaves/xrstors instructions and XSS msr access
required be executed only in ring0. So emulation are
needed when pv guest uses these instructions.
Signed-off-by: Shuai Ruan <shuai.ruan@intel.com>
---
xen/arch/x86/domain.c | 3 ++
xen/arch/x86/traps.c | 87 +++++++++++++++++++++++++++++++++++++++++
xen/arch/x86/x86_64/mm.c | 52 ++++++++++++++++++++++++
xen/arch/x86/xstate.c | 39 ++++++++++++++++++
xen/include/asm-x86/domain.h | 1 +
xen/include/asm-x86/mm.h | 1 +
xen/include/asm-x86/msr-index.h | 2 +
xen/include/asm-x86/xstate.h | 3 ++
8 files changed, 188 insertions(+)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index a8fe046..66f8231 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -426,6 +426,7 @@ int vcpu_initialise(struct vcpu *v)
/* By default, do not emulate */
v->arch.vm_event.emulate_flags = 0;
+ v->arch.msr_ia32_xss = 0;
rc = mapcache_vcpu_init(v);
if ( rc )
@@ -1494,6 +1495,8 @@ static void __context_switch(void)
if ( xcr0 != get_xcr0() && !set_xcr0(xcr0) )
BUG();
}
+ if ( cpu_has_xsaves )
+ wrmsr_safe(MSR_IA32_XSS, n->arch.msr_ia32_xss);
vcpu_restore_fpu_eager(n);
n->arch.ctxt_switch_to(n);
}
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index ac62f20..5f79f07 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2346,6 +2346,82 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
}
break;
+ case 0xc7:
+ {
+ void *xsave_addr;
+ int not_page_aligned = 0;
+ u32 guest_xsaves_size = xstate_ctxt_size_compact(v->arch.xcr0);
+
+ switch ( insn_fetch(u8, code_base, eip, code_limit) )
+ {
+ case 0x2f:/* XSAVES */
+ {
+ if ( (regs->edi & ~PAGE_MASK) + guest_xsaves_size > PAGE_SIZE )
+ {
+ mfn_t mfn_list[2];
+ void *va;
+
+ not_page_aligned = 1;
+ mfn_list[0] = _mfn(do_page_walk_mfn(v, regs->edi));
+ mfn_list[1] = _mfn(do_page_walk_mfn(v,
+ PAGE_ALIGN(regs->edi)));
+
+ va = __vmap(mfn_list, 1, 2, PAGE_SIZE, PAGE_HYPERVISOR);
+ ASSERT(((unsigned long) va & ~PAGE_MASK) == 0);
+ xsave_addr = (void *)((unsigned long)va +
+ (regs->edi & ~PAGE_MASK));
+ }
+ else
+ xsave_addr = do_page_walk(v, regs->edi);
+
+ if ( !xsave_addr )
+ goto fail;
+
+ xsaves(regs->eax, regs->edx, xsave_addr);
+
+ if ( not_page_aligned )
+ vunmap((void *)((unsigned long)xsave_addr & PAGE_MASK));
+ else
+ unmap_domain_page(xsave_addr);
+ break;
+ }
+ case 0x1f:/* XRSTORS */
+ {
+ if( (regs->edi & ~PAGE_MASK) + guest_xsaves_size > PAGE_SIZE )
+ {
+ mfn_t mfn_list[2];
+ void *va;
+
+ not_page_aligned = 1;
+ mfn_list[0] = _mfn(do_page_walk_mfn(v, regs->edi));
+ mfn_list[1] = _mfn(do_page_walk_mfn(v,
+ PAGE_ALIGN(regs->edi)));
+
+ va = __vmap(mfn_list, 1, 2, PAGE_SIZE, PAGE_HYPERVISOR);
+ ASSERT(((unsigned long) va & ~PAGE_MASK) == 0);
+ xsave_addr = (void *)((unsigned long)va +
+ (regs->edi & ~PAGE_MASK));
+ }
+ else
+ xsave_addr = do_page_walk(v, regs->edi);
+
+ if ( !xsave_addr )
+ goto fail;
+
+ xrstors(regs->eax, regs->edx, xsave_addr);
+
+ if ( not_page_aligned )
+ vunmap((void *)((unsigned long)xsave_addr & PAGE_MASK));
+ else
+ unmap_domain_page(xsave_addr);
+ break;
+ }
+ default:
+ goto fail;
+ }
+ break;
+ }
+
case 0x06: /* CLTS */
(void)do_fpu_taskswitch(0);
break;
@@ -2638,6 +2714,12 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
wrmsrl(regs->_ecx, msr_content);
break;
+ case MSR_IA32_XSS:
+ if ( wrmsr_safe(regs->ecx, msr_content) != 0 )
+ goto fail;
+ v->arch.msr_ia32_xss = msr_content;
+ break;
+
default:
if ( wrmsr_hypervisor_regs(regs->ecx, msr_content) == 1 )
break;
@@ -2740,6 +2822,11 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
regs->edx = 0;
break;
+ case MSR_IA32_XSS:
+ regs->eax = v->arch.msr_ia32_xss;
+ regs->edx = v->arch.msr_ia32_xss >> 32;
+ break;
+
default:
if ( rdmsr_hypervisor_regs(regs->ecx, &val) )
goto rdmsr_writeback;
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 3ef4618..f64aa08 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -48,6 +48,58 @@ l2_pgentry_t __section(".bss.page_aligned") l2_bootmap[L2_PAGETABLE_ENTRIES];
l2_pgentry_t *compat_idle_pg_table_l2;
+unsigned long do_page_walk_mfn(struct vcpu *v, unsigned long addr)
+{
+ unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
+ l4_pgentry_t l4e, *l4t;
+ l3_pgentry_t l3e, *l3t;
+ l2_pgentry_t l2e, *l2t;
+ l1_pgentry_t l1e, *l1t;
+
+ if ( !is_pv_vcpu(v) || !is_canonical_address(addr) )
+ return 0;
+
+ l4t = map_domain_page(mfn);
+ l4e = l4t[l4_table_offset(addr)];
+ unmap_domain_page(l4t);
+ if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
+ return 0;
+
+ l3t = map_l3t_from_l4e(l4e);
+ l3e = l3t[l3_table_offset(addr)];
+ unmap_domain_page(l3t);
+ mfn = l3e_get_pfn(l3e);
+ if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
+ return 0;
+ if ( (l3e_get_flags(l3e) & _PAGE_PSE) )
+ {
+ mfn += PFN_DOWN(addr & ((1UL << L3_PAGETABLE_SHIFT) - 1));
+ goto ret;
+ }
+
+ l2t = map_domain_page(mfn);
+ l2e = l2t[l2_table_offset(addr)];
+ unmap_domain_page(l2t);
+ mfn = l2e_get_pfn(l2e);
+ if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
+ return 0;
+ if ( (l2e_get_flags(l2e) & _PAGE_PSE) )
+ {
+ mfn += PFN_DOWN(addr & ((1UL << L2_PAGETABLE_SHIFT) - 1));
+ goto ret;
+ }
+
+ l1t = map_domain_page(mfn);
+ l1e = l1t[l1_table_offset(addr)];
+ unmap_domain_page(l1t);
+ mfn = l1e_get_pfn(l1e);
+ if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
+ return 0;
+
+ ret:
+ return mfn;
+}
+
void *do_page_walk(struct vcpu *v, unsigned long addr)
{
unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index d5f5e3b..e34eda3 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -65,6 +65,31 @@ uint64_t get_xcr0(void)
return this_cpu(xcr0);
}
+void xsaves(uint32_t lmask, uint32_t hmask, struct xsave_struct *ptr)
+{
+ asm volatile ( ".byte 0x48,0x0f,0xc7,0x2f"
+ : "=m" (*ptr)
+ : "a" (lmask), "d" (hmask), "D" (ptr) );
+}
+
+void xrstors(uint32_t lmask, uint32_t hmask, struct xsave_struct *ptr)
+{
+ asm volatile ( "1: .byte 0x48,0x0f,0xc7,0x1f\n"
+ ".section .fixup,\"ax\" \n"
+ "2: mov %5,%%ecx \n"
+ " xor %1,%1 \n"
+ " rep stosb \n"
+ " lea %2,%0 \n"
+ " mov %3,%1 \n"
+ " jmp 1b \n"
+ ".previous \n"
+ _ASM_EXTABLE(1b, 2b)
+ : "+&D" (ptr), "+&a" (lmask)
+ : "m" (*ptr), "g" (lmask), "d" (hmask),
+ "m" (xsave_cntxt_size)
+ : "ecx" );
+}
+
void xsave(struct vcpu *v, uint64_t mask)
{
struct xsave_struct *ptr = v->arch.xsave_area;
@@ -268,6 +293,20 @@ static unsigned int _xstate_ctxt_size(u64 xcr0)
return ebx;
}
+unsigned int xstate_ctxt_size_compact(u64 xcr0)
+{
+ u64 act_xcr0 = get_xcr0();
+ u32 eax, ebx = 0, ecx, edx;
+ bool_t ok = set_xcr0(xcr0);
+
+ ASSERT(ok);
+ cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
+ ok = set_xcr0(act_xcr0);
+ ASSERT(ok);
+
+ return ebx;
+}
+
/* Fastpath for common xstate size requests, avoiding reloads of xcr0. */
unsigned int xstate_ctxt_size(u64 xcr0)
{
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 96bde65..bcea9d4 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -473,6 +473,7 @@ struct arch_vcpu
*/
struct xsave_struct *xsave_area;
uint64_t xcr0;
+ u64 msr_ia32_xss;
/* Accumulated eXtended features mask for using XSAVE/XRESTORE by Xen
* itself, as we can never know whether guest OS depends on content
* preservation whenever guest OS clears one feature flag (for example,
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 8595c38..94a590e 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -524,6 +524,7 @@ void make_cr3(struct vcpu *v, unsigned long mfn);
void update_cr3(struct vcpu *v);
int vcpu_destroy_pagetables(struct vcpu *);
struct trap_bounce *propagate_page_fault(unsigned long addr, u16 error_code);
+unsigned long do_page_walk_mfn(struct vcpu *v, unsigned long addr);
void *do_page_walk(struct vcpu *v, unsigned long addr);
int __sync_local_execstate(void);
diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
index 83f2f70..9564113 100644
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -58,6 +58,8 @@
#define MSR_IA32_BNDCFGS 0x00000D90
+#define MSR_IA32_XSS 0x00000da0
+
#define MSR_MTRRfix64K_00000 0x00000250
#define MSR_MTRRfix16K_80000 0x00000258
#define MSR_MTRRfix16K_A0000 0x00000259
diff --git a/xen/include/asm-x86/xstate.h b/xen/include/asm-x86/xstate.h
index 4c690db..59c7156 100644
--- a/xen/include/asm-x86/xstate.h
+++ b/xen/include/asm-x86/xstate.h
@@ -82,6 +82,8 @@ struct __packed __attribute__((aligned (64))) xsave_struct
/* extended state operations */
bool_t __must_check set_xcr0(u64 xfeatures);
uint64_t get_xcr0(void);
+void xsaves(uint32_t lmask, uint32_t hmask, struct xsave_struct *ptr);
+void xrstors(uint32_t lmask, uint32_t hmask, struct xsave_struct *ptr);
void xsave(struct vcpu *v, uint64_t mask);
void xrstor(struct vcpu *v, uint64_t mask);
bool_t xsave_enabled(const struct vcpu *v);
@@ -92,6 +94,7 @@ int __must_check handle_xsetbv(u32 index, u64 new_bv);
void xstate_free_save_area(struct vcpu *v);
int xstate_alloc_save_area(struct vcpu *v);
void xstate_init(bool_t bsp);
+unsigned int xstate_ctxt_size_compact(u64 xcr0);
unsigned int xstate_ctxt_size(u64 xcr0);
#endif /* __ASM_XSTATE_H */
--
1.9.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* Re: [PATCH 1/6] x86/xsaves: enable xsaves/xrstors for pv guest
2015-07-17 7:26 ` [PATCH 1/6] x86/xsaves: enable xsaves/xrstors for pv guest Shuai Ruan
@ 2015-07-17 16:21 ` Konrad Rzeszutek Wilk
2015-07-21 9:43 ` Ruan, Shuai
0 siblings, 1 reply; 16+ messages in thread
From: Konrad Rzeszutek Wilk @ 2015-07-17 16:21 UTC (permalink / raw)
To: Shuai Ruan
Cc: kevin.tian, wei.liu2, Ian.Campbell, stefano.stabellini,
andrew.cooper3, ian.jackson, xen-devel, jbeulich, eddie.dong,
jun.nakajima, keir
On Fri, Jul 17, 2015 at 03:26:51PM +0800, Shuai Ruan wrote:
> This patch emualtes xsaves/xrstors instructions and
emulates
> XSS msr access.
>
> As xsaves/xrstors instructions and XSS msr access
> required be executed only in ring0. So emulation are
> needed when pv guest uses these instructions.
This looks to try the emulation even if the hardware does not support it.
That is - and guest could try these opcodes and we would end up
trying to execute the xsaves in the hypervisor.
Perhaps first we should verify that the host can actually execute this?
>
> Signed-off-by: Shuai Ruan <shuai.ruan@intel.com>
> ---
> xen/arch/x86/domain.c | 3 ++
> xen/arch/x86/traps.c | 87 +++++++++++++++++++++++++++++++++++++++++
> xen/arch/x86/x86_64/mm.c | 52 ++++++++++++++++++++++++
> xen/arch/x86/xstate.c | 39 ++++++++++++++++++
> xen/include/asm-x86/domain.h | 1 +
> xen/include/asm-x86/mm.h | 1 +
> xen/include/asm-x86/msr-index.h | 2 +
> xen/include/asm-x86/xstate.h | 3 ++
> 8 files changed, 188 insertions(+)
>
> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> index a8fe046..66f8231 100644
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -426,6 +426,7 @@ int vcpu_initialise(struct vcpu *v)
>
> /* By default, do not emulate */
> v->arch.vm_event.emulate_flags = 0;
> + v->arch.msr_ia32_xss = 0;
>
> rc = mapcache_vcpu_init(v);
> if ( rc )
> @@ -1494,6 +1495,8 @@ static void __context_switch(void)
> if ( xcr0 != get_xcr0() && !set_xcr0(xcr0) )
> BUG();
> }
> + if ( cpu_has_xsaves )
> + wrmsr_safe(MSR_IA32_XSS, n->arch.msr_ia32_xss);
> vcpu_restore_fpu_eager(n);
> n->arch.ctxt_switch_to(n);
> }
> diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
> index ac62f20..5f79f07 100644
> --- a/xen/arch/x86/traps.c
> +++ b/xen/arch/x86/traps.c
> @@ -2346,6 +2346,82 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
> }
> break;
>
> + case 0xc7:
> + {
> + void *xsave_addr;
> + int not_page_aligned = 0;
> + u32 guest_xsaves_size = xstate_ctxt_size_compact(v->arch.xcr0);
> +
> + switch ( insn_fetch(u8, code_base, eip, code_limit) )
> + {
> + case 0x2f:/* XSAVES */
> + {
> + if ( (regs->edi & ~PAGE_MASK) + guest_xsaves_size > PAGE_SIZE )
> + {
> + mfn_t mfn_list[2];
> + void *va;
> +
> + not_page_aligned = 1;
> + mfn_list[0] = _mfn(do_page_walk_mfn(v, regs->edi));
> + mfn_list[1] = _mfn(do_page_walk_mfn(v,
> + PAGE_ALIGN(regs->edi)));
> +
> + va = __vmap(mfn_list, 1, 2, PAGE_SIZE, PAGE_HYPERVISOR);
> + ASSERT(((unsigned long) va & ~PAGE_MASK) == 0);
> + xsave_addr = (void *)((unsigned long)va +
> + (regs->edi & ~PAGE_MASK));
> + }
> + else
> + xsave_addr = do_page_walk(v, regs->edi);
> +
> + if ( !xsave_addr )
> + goto fail;
> +
> + xsaves(regs->eax, regs->edx, xsave_addr);
> +
> + if ( not_page_aligned )
> + vunmap((void *)((unsigned long)xsave_addr & PAGE_MASK));
> + else
> + unmap_domain_page(xsave_addr);
> + break;
> + }
> + case 0x1f:/* XRSTORS */
> + {
> + if( (regs->edi & ~PAGE_MASK) + guest_xsaves_size > PAGE_SIZE )
> + {
> + mfn_t mfn_list[2];
> + void *va;
> +
> + not_page_aligned = 1;
> + mfn_list[0] = _mfn(do_page_walk_mfn(v, regs->edi));
> + mfn_list[1] = _mfn(do_page_walk_mfn(v,
> + PAGE_ALIGN(regs->edi)));
> +
> + va = __vmap(mfn_list, 1, 2, PAGE_SIZE, PAGE_HYPERVISOR);
> + ASSERT(((unsigned long) va & ~PAGE_MASK) == 0);
Ouch! Crash the hypervisor?
> + xsave_addr = (void *)((unsigned long)va +
> + (regs->edi & ~PAGE_MASK));
> + }
> + else
> + xsave_addr = do_page_walk(v, regs->edi);
> +
> + if ( !xsave_addr )
> + goto fail;
> +
> + xrstors(regs->eax, regs->edx, xsave_addr);
> +
> + if ( not_page_aligned )
> + vunmap((void *)((unsigned long)xsave_addr & PAGE_MASK));
> + else
> + unmap_domain_page(xsave_addr);
> + break;
> + }
> + default:
> + goto fail;
> + }
> + break;
> + }
> +
> case 0x06: /* CLTS */
> (void)do_fpu_taskswitch(0);
> break;
> @@ -2638,6 +2714,12 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
> wrmsrl(regs->_ecx, msr_content);
> break;
>
> + case MSR_IA32_XSS:
> + if ( wrmsr_safe(regs->ecx, msr_content) != 0 )
> + goto fail;
> + v->arch.msr_ia32_xss = msr_content;
> + break;
> +
> default:
> if ( wrmsr_hypervisor_regs(regs->ecx, msr_content) == 1 )
> break;
> @@ -2740,6 +2822,11 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
> regs->edx = 0;
> break;
>
> + case MSR_IA32_XSS:
> + regs->eax = v->arch.msr_ia32_xss;
> + regs->edx = v->arch.msr_ia32_xss >> 32;
> + break;
> +
> default:
> if ( rdmsr_hypervisor_regs(regs->ecx, &val) )
> goto rdmsr_writeback;
> diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
> index 3ef4618..f64aa08 100644
> --- a/xen/arch/x86/x86_64/mm.c
> +++ b/xen/arch/x86/x86_64/mm.c
> @@ -48,6 +48,58 @@ l2_pgentry_t __section(".bss.page_aligned") l2_bootmap[L2_PAGETABLE_ENTRIES];
>
> l2_pgentry_t *compat_idle_pg_table_l2;
>
> +unsigned long do_page_walk_mfn(struct vcpu *v, unsigned long addr)
> +{
> + unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
> + l4_pgentry_t l4e, *l4t;
> + l3_pgentry_t l3e, *l3t;
> + l2_pgentry_t l2e, *l2t;
> + l1_pgentry_t l1e, *l1t;
> +
> + if ( !is_pv_vcpu(v) || !is_canonical_address(addr) )
> + return 0;
> +
> + l4t = map_domain_page(mfn);
> + l4e = l4t[l4_table_offset(addr)];
> + unmap_domain_page(l4t);
> + if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
> + return 0;
> +
> + l3t = map_l3t_from_l4e(l4e);
> + l3e = l3t[l3_table_offset(addr)];
> + unmap_domain_page(l3t);
> + mfn = l3e_get_pfn(l3e);
> + if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
> + return 0;
> + if ( (l3e_get_flags(l3e) & _PAGE_PSE) )
> + {
> + mfn += PFN_DOWN(addr & ((1UL << L3_PAGETABLE_SHIFT) - 1));
> + goto ret;
> + }
> +
> + l2t = map_domain_page(mfn);
> + l2e = l2t[l2_table_offset(addr)];
> + unmap_domain_page(l2t);
> + mfn = l2e_get_pfn(l2e);
> + if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
> + return 0;
> + if ( (l2e_get_flags(l2e) & _PAGE_PSE) )
> + {
> + mfn += PFN_DOWN(addr & ((1UL << L2_PAGETABLE_SHIFT) - 1));
> + goto ret;
> + }
> +
> + l1t = map_domain_page(mfn);
> + l1e = l1t[l1_table_offset(addr)];
> + unmap_domain_page(l1t);
> + mfn = l1e_get_pfn(l1e);
> + if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
> + return 0;
> +
> + ret:
> + return mfn;
> +}
> +
> void *do_page_walk(struct vcpu *v, unsigned long addr)
> {
> unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
> diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
> index d5f5e3b..e34eda3 100644
> --- a/xen/arch/x86/xstate.c
> +++ b/xen/arch/x86/xstate.c
> @@ -65,6 +65,31 @@ uint64_t get_xcr0(void)
> return this_cpu(xcr0);
> }
>
> +void xsaves(uint32_t lmask, uint32_t hmask, struct xsave_struct *ptr)
> +{
> + asm volatile ( ".byte 0x48,0x0f,0xc7,0x2f"
> + : "=m" (*ptr)
> + : "a" (lmask), "d" (hmask), "D" (ptr) );
> +}
> +
> +void xrstors(uint32_t lmask, uint32_t hmask, struct xsave_struct *ptr)
> +{
> + asm volatile ( "1: .byte 0x48,0x0f,0xc7,0x1f\n"
> + ".section .fixup,\"ax\" \n"
> + "2: mov %5,%%ecx \n"
> + " xor %1,%1 \n"
> + " rep stosb \n"
> + " lea %2,%0 \n"
> + " mov %3,%1 \n"
> + " jmp 1b \n"
> + ".previous \n"
> + _ASM_EXTABLE(1b, 2b)
> + : "+&D" (ptr), "+&a" (lmask)
> + : "m" (*ptr), "g" (lmask), "d" (hmask),
> + "m" (xsave_cntxt_size)
> + : "ecx" );
> +}
> +
> void xsave(struct vcpu *v, uint64_t mask)
> {
> struct xsave_struct *ptr = v->arch.xsave_area;
> @@ -268,6 +293,20 @@ static unsigned int _xstate_ctxt_size(u64 xcr0)
> return ebx;
> }
>
> +unsigned int xstate_ctxt_size_compact(u64 xcr0)
> +{
> + u64 act_xcr0 = get_xcr0();
> + u32 eax, ebx = 0, ecx, edx;
> + bool_t ok = set_xcr0(xcr0);
> +
> + ASSERT(ok);
> + cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
> + ok = set_xcr0(act_xcr0);
> + ASSERT(ok);
> +
> + return ebx;
> +}
> +
> /* Fastpath for common xstate size requests, avoiding reloads of xcr0. */
> unsigned int xstate_ctxt_size(u64 xcr0)
> {
> diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
> index 96bde65..bcea9d4 100644
> --- a/xen/include/asm-x86/domain.h
> +++ b/xen/include/asm-x86/domain.h
> @@ -473,6 +473,7 @@ struct arch_vcpu
> */
> struct xsave_struct *xsave_area;
> uint64_t xcr0;
> + u64 msr_ia32_xss;
> /* Accumulated eXtended features mask for using XSAVE/XRESTORE by Xen
> * itself, as we can never know whether guest OS depends on content
> * preservation whenever guest OS clears one feature flag (for example,
> diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
> index 8595c38..94a590e 100644
> --- a/xen/include/asm-x86/mm.h
> +++ b/xen/include/asm-x86/mm.h
> @@ -524,6 +524,7 @@ void make_cr3(struct vcpu *v, unsigned long mfn);
> void update_cr3(struct vcpu *v);
> int vcpu_destroy_pagetables(struct vcpu *);
> struct trap_bounce *propagate_page_fault(unsigned long addr, u16 error_code);
> +unsigned long do_page_walk_mfn(struct vcpu *v, unsigned long addr);
> void *do_page_walk(struct vcpu *v, unsigned long addr);
>
> int __sync_local_execstate(void);
> diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
> index 83f2f70..9564113 100644
> --- a/xen/include/asm-x86/msr-index.h
> +++ b/xen/include/asm-x86/msr-index.h
> @@ -58,6 +58,8 @@
>
> #define MSR_IA32_BNDCFGS 0x00000D90
>
> +#define MSR_IA32_XSS 0x00000da0
> +
> #define MSR_MTRRfix64K_00000 0x00000250
> #define MSR_MTRRfix16K_80000 0x00000258
> #define MSR_MTRRfix16K_A0000 0x00000259
> diff --git a/xen/include/asm-x86/xstate.h b/xen/include/asm-x86/xstate.h
> index 4c690db..59c7156 100644
> --- a/xen/include/asm-x86/xstate.h
> +++ b/xen/include/asm-x86/xstate.h
> @@ -82,6 +82,8 @@ struct __packed __attribute__((aligned (64))) xsave_struct
> /* extended state operations */
> bool_t __must_check set_xcr0(u64 xfeatures);
> uint64_t get_xcr0(void);
> +void xsaves(uint32_t lmask, uint32_t hmask, struct xsave_struct *ptr);
> +void xrstors(uint32_t lmask, uint32_t hmask, struct xsave_struct *ptr);
> void xsave(struct vcpu *v, uint64_t mask);
> void xrstor(struct vcpu *v, uint64_t mask);
> bool_t xsave_enabled(const struct vcpu *v);
> @@ -92,6 +94,7 @@ int __must_check handle_xsetbv(u32 index, u64 new_bv);
> void xstate_free_save_area(struct vcpu *v);
> int xstate_alloc_save_area(struct vcpu *v);
> void xstate_init(bool_t bsp);
> +unsigned int xstate_ctxt_size_compact(u64 xcr0);
> unsigned int xstate_ctxt_size(u64 xcr0);
>
> #endif /* __ASM_XSTATE_H */
> --
> 1.9.1
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH 1/6] x86/xsaves: enable xsaves/xrstors for pv guest
2015-07-17 16:21 ` Konrad Rzeszutek Wilk
@ 2015-07-21 9:43 ` Ruan, Shuai
2015-07-21 13:12 ` Konrad Rzeszutek Wilk
0 siblings, 1 reply; 16+ messages in thread
From: Ruan, Shuai @ 2015-07-21 9:43 UTC (permalink / raw)
To: Konrad Rzeszutek Wilk
Cc: Tian, Kevin, wei.liu2, Ian.Campbell, stefano.stabellini,
andrew.cooper3, ian.jackson, xen-devel, jbeulich, Dong, Eddie,
Nakajima, Jun, keir
Thanks for your review, konrad
1.If the hardware does not support xsaves, then hypersior will not expose xsaves feature to guest. Then the guest will not excute xsaves. But your suggestion is important, I will add code to verify that the host can excute xsaves or not.
2.Using 'ASSERT' here is improper. So I will fix these in next version.
Thanks
-----Original Message-----
From: Konrad Rzeszutek Wilk [mailto:konrad.wilk@oracle.com]
Sent: Saturday, July 18, 2015 12:22 AM
To: Ruan, Shuai
Cc: xen-devel@lists.xen.org; Tian, Kevin; wei.liu2@citrix.com; Ian.Campbell@citrix.com; stefano.stabellini@eu.citrix.com; Nakajima, Jun; andrew.cooper3@citrix.com; ian.jackson@eu.citrix.com; Dong, Eddie; jbeulich@suse.com; keir@xen.org
Subject: Re: [Xen-devel] [PATCH 1/6] x86/xsaves: enable xsaves/xrstors for pv guest
On Fri, Jul 17, 2015 at 03:26:51PM +0800, Shuai Ruan wrote:
> This patch emualtes xsaves/xrstors instructions and
emulates
> XSS msr access.
>
> As xsaves/xrstors instructions and XSS msr access required be executed
> only in ring0. So emulation are needed when pv guest uses these
> instructions.
This looks to try the emulation even if the hardware does not support it.
That is - and guest could try these opcodes and we would end up trying to execute the xsaves in the hypervisor.
Perhaps first we should verify that the host can actually execute this?
>
> Signed-off-by: Shuai Ruan <shuai.ruan@intel.com>
> ---
> xen/arch/x86/domain.c | 3 ++
> xen/arch/x86/traps.c | 87 +++++++++++++++++++++++++++++++++++++++++
> xen/arch/x86/x86_64/mm.c | 52 ++++++++++++++++++++++++
> xen/arch/x86/xstate.c | 39 ++++++++++++++++++
> xen/include/asm-x86/domain.h | 1 +
> xen/include/asm-x86/mm.h | 1 +
> xen/include/asm-x86/msr-index.h | 2 +
> xen/include/asm-x86/xstate.h | 3 ++
> 8 files changed, 188 insertions(+)
>
> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index
> a8fe046..66f8231 100644
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -426,6 +426,7 @@ int vcpu_initialise(struct vcpu *v)
>
> /* By default, do not emulate */
> v->arch.vm_event.emulate_flags = 0;
> + v->arch.msr_ia32_xss = 0;
>
> rc = mapcache_vcpu_init(v);
> if ( rc )
> @@ -1494,6 +1495,8 @@ static void __context_switch(void)
> if ( xcr0 != get_xcr0() && !set_xcr0(xcr0) )
> BUG();
> }
> + if ( cpu_has_xsaves )
> + wrmsr_safe(MSR_IA32_XSS, n->arch.msr_ia32_xss);
> vcpu_restore_fpu_eager(n);
> n->arch.ctxt_switch_to(n);
> }
> diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index
> ac62f20..5f79f07 100644
> --- a/xen/arch/x86/traps.c
> +++ b/xen/arch/x86/traps.c
> @@ -2346,6 +2346,82 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
> }
> break;
>
> + case 0xc7:
> + {
> + void *xsave_addr;
> + int not_page_aligned = 0;
> + u32 guest_xsaves_size =
> + xstate_ctxt_size_compact(v->arch.xcr0);
> +
> + switch ( insn_fetch(u8, code_base, eip, code_limit) )
> + {
> + case 0x2f:/* XSAVES */
> + {
> + if ( (regs->edi & ~PAGE_MASK) + guest_xsaves_size > PAGE_SIZE )
> + {
> + mfn_t mfn_list[2];
> + void *va;
> +
> + not_page_aligned = 1;
> + mfn_list[0] = _mfn(do_page_walk_mfn(v, regs->edi));
> + mfn_list[1] = _mfn(do_page_walk_mfn(v,
> + PAGE_ALIGN(regs->edi)));
> +
> + va = __vmap(mfn_list, 1, 2, PAGE_SIZE, PAGE_HYPERVISOR);
> + ASSERT(((unsigned long) va & ~PAGE_MASK) == 0);
> + xsave_addr = (void *)((unsigned long)va +
> + (regs->edi & ~PAGE_MASK));
> + }
> + else
> + xsave_addr = do_page_walk(v, regs->edi);
> +
> + if ( !xsave_addr )
> + goto fail;
> +
> + xsaves(regs->eax, regs->edx, xsave_addr);
> +
> + if ( not_page_aligned )
> + vunmap((void *)((unsigned long)xsave_addr & PAGE_MASK));
> + else
> + unmap_domain_page(xsave_addr);
> + break;
> + }
> + case 0x1f:/* XRSTORS */
> + {
> + if( (regs->edi & ~PAGE_MASK) + guest_xsaves_size > PAGE_SIZE )
> + {
> + mfn_t mfn_list[2];
> + void *va;
> +
> + not_page_aligned = 1;
> + mfn_list[0] = _mfn(do_page_walk_mfn(v, regs->edi));
> + mfn_list[1] = _mfn(do_page_walk_mfn(v,
> + PAGE_ALIGN(regs->edi)));
> +
> + va = __vmap(mfn_list, 1, 2, PAGE_SIZE, PAGE_HYPERVISOR);
> + ASSERT(((unsigned long) va & ~PAGE_MASK) == 0);
Ouch! Crash the hypervisor?
> + xsave_addr = (void *)((unsigned long)va +
> + (regs->edi & ~PAGE_MASK));
> + }
> + else
> + xsave_addr = do_page_walk(v, regs->edi);
> +
> + if ( !xsave_addr )
> + goto fail;
> +
> + xrstors(regs->eax, regs->edx, xsave_addr);
> +
> + if ( not_page_aligned )
> + vunmap((void *)((unsigned long)xsave_addr & PAGE_MASK));
> + else
> + unmap_domain_page(xsave_addr);
> + break;
> + }
> + default:
> + goto fail;
> + }
> + break;
> + }
> +
> case 0x06: /* CLTS */
> (void)do_fpu_taskswitch(0);
> break;
> @@ -2638,6 +2714,12 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
> wrmsrl(regs->_ecx, msr_content);
> break;
>
> + case MSR_IA32_XSS:
> + if ( wrmsr_safe(regs->ecx, msr_content) != 0 )
> + goto fail;
> + v->arch.msr_ia32_xss = msr_content;
> + break;
> +
> default:
> if ( wrmsr_hypervisor_regs(regs->ecx, msr_content) == 1 )
> break;
> @@ -2740,6 +2822,11 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
> regs->edx = 0;
> break;
>
> + case MSR_IA32_XSS:
> + regs->eax = v->arch.msr_ia32_xss;
> + regs->edx = v->arch.msr_ia32_xss >> 32;
> + break;
> +
> default:
> if ( rdmsr_hypervisor_regs(regs->ecx, &val) )
> goto rdmsr_writeback; diff --git
> a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c index
> 3ef4618..f64aa08 100644
> --- a/xen/arch/x86/x86_64/mm.c
> +++ b/xen/arch/x86/x86_64/mm.c
> @@ -48,6 +48,58 @@ l2_pgentry_t __section(".bss.page_aligned")
> l2_bootmap[L2_PAGETABLE_ENTRIES];
>
> l2_pgentry_t *compat_idle_pg_table_l2;
>
> +unsigned long do_page_walk_mfn(struct vcpu *v, unsigned long addr) {
> + unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
> + l4_pgentry_t l4e, *l4t;
> + l3_pgentry_t l3e, *l3t;
> + l2_pgentry_t l2e, *l2t;
> + l1_pgentry_t l1e, *l1t;
> +
> + if ( !is_pv_vcpu(v) || !is_canonical_address(addr) )
> + return 0;
> +
> + l4t = map_domain_page(mfn);
> + l4e = l4t[l4_table_offset(addr)];
> + unmap_domain_page(l4t);
> + if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
> + return 0;
> +
> + l3t = map_l3t_from_l4e(l4e);
> + l3e = l3t[l3_table_offset(addr)];
> + unmap_domain_page(l3t);
> + mfn = l3e_get_pfn(l3e);
> + if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
> + return 0;
> + if ( (l3e_get_flags(l3e) & _PAGE_PSE) )
> + {
> + mfn += PFN_DOWN(addr & ((1UL << L3_PAGETABLE_SHIFT) - 1));
> + goto ret;
> + }
> +
> + l2t = map_domain_page(mfn);
> + l2e = l2t[l2_table_offset(addr)];
> + unmap_domain_page(l2t);
> + mfn = l2e_get_pfn(l2e);
> + if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
> + return 0;
> + if ( (l2e_get_flags(l2e) & _PAGE_PSE) )
> + {
> + mfn += PFN_DOWN(addr & ((1UL << L2_PAGETABLE_SHIFT) - 1));
> + goto ret;
> + }
> +
> + l1t = map_domain_page(mfn);
> + l1e = l1t[l1_table_offset(addr)];
> + unmap_domain_page(l1t);
> + mfn = l1e_get_pfn(l1e);
> + if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
> + return 0;
> +
> + ret:
> + return mfn;
> +}
> +
> void *do_page_walk(struct vcpu *v, unsigned long addr) {
> unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
> diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c index
> d5f5e3b..e34eda3 100644
> --- a/xen/arch/x86/xstate.c
> +++ b/xen/arch/x86/xstate.c
> @@ -65,6 +65,31 @@ uint64_t get_xcr0(void)
> return this_cpu(xcr0);
> }
>
> +void xsaves(uint32_t lmask, uint32_t hmask, struct xsave_struct *ptr)
> +{
> + asm volatile ( ".byte 0x48,0x0f,0xc7,0x2f"
> + : "=m" (*ptr)
> + : "a" (lmask), "d" (hmask), "D" (ptr) ); }
> +
> +void xrstors(uint32_t lmask, uint32_t hmask, struct xsave_struct
> +*ptr) {
> + asm volatile ( "1: .byte 0x48,0x0f,0xc7,0x1f\n"
> + ".section .fixup,\"ax\" \n"
> + "2: mov %5,%%ecx \n"
> + " xor %1,%1 \n"
> + " rep stosb \n"
> + " lea %2,%0 \n"
> + " mov %3,%1 \n"
> + " jmp 1b \n"
> + ".previous \n"
> + _ASM_EXTABLE(1b, 2b)
> + : "+&D" (ptr), "+&a" (lmask)
> + : "m" (*ptr), "g" (lmask), "d" (hmask),
> + "m" (xsave_cntxt_size)
> + : "ecx" );
> +}
> +
> void xsave(struct vcpu *v, uint64_t mask) {
> struct xsave_struct *ptr = v->arch.xsave_area; @@ -268,6 +293,20
> @@ static unsigned int _xstate_ctxt_size(u64 xcr0)
> return ebx;
> }
>
> +unsigned int xstate_ctxt_size_compact(u64 xcr0) {
> + u64 act_xcr0 = get_xcr0();
> + u32 eax, ebx = 0, ecx, edx;
> + bool_t ok = set_xcr0(xcr0);
> +
> + ASSERT(ok);
> + cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
> + ok = set_xcr0(act_xcr0);
> + ASSERT(ok);
> +
> + return ebx;
> +}
> +
> /* Fastpath for common xstate size requests, avoiding reloads of
> xcr0. */ unsigned int xstate_ctxt_size(u64 xcr0) { diff --git
> a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index
> 96bde65..bcea9d4 100644
> --- a/xen/include/asm-x86/domain.h
> +++ b/xen/include/asm-x86/domain.h
> @@ -473,6 +473,7 @@ struct arch_vcpu
> */
> struct xsave_struct *xsave_area;
> uint64_t xcr0;
> + u64 msr_ia32_xss;
> /* Accumulated eXtended features mask for using XSAVE/XRESTORE by Xen
> * itself, as we can never know whether guest OS depends on content
> * preservation whenever guest OS clears one feature flag (for
> example, diff --git a/xen/include/asm-x86/mm.h
> b/xen/include/asm-x86/mm.h index 8595c38..94a590e 100644
> --- a/xen/include/asm-x86/mm.h
> +++ b/xen/include/asm-x86/mm.h
> @@ -524,6 +524,7 @@ void make_cr3(struct vcpu *v, unsigned long mfn);
> void update_cr3(struct vcpu *v); int vcpu_destroy_pagetables(struct
> vcpu *); struct trap_bounce *propagate_page_fault(unsigned long addr,
> u16 error_code);
> +unsigned long do_page_walk_mfn(struct vcpu *v, unsigned long addr);
> void *do_page_walk(struct vcpu *v, unsigned long addr);
>
> int __sync_local_execstate(void);
> diff --git a/xen/include/asm-x86/msr-index.h
> b/xen/include/asm-x86/msr-index.h index 83f2f70..9564113 100644
> --- a/xen/include/asm-x86/msr-index.h
> +++ b/xen/include/asm-x86/msr-index.h
> @@ -58,6 +58,8 @@
>
> #define MSR_IA32_BNDCFGS 0x00000D90
>
> +#define MSR_IA32_XSS 0x00000da0
> +
> #define MSR_MTRRfix64K_00000 0x00000250
> #define MSR_MTRRfix16K_80000 0x00000258
> #define MSR_MTRRfix16K_A0000 0x00000259
> diff --git a/xen/include/asm-x86/xstate.h
> b/xen/include/asm-x86/xstate.h index 4c690db..59c7156 100644
> --- a/xen/include/asm-x86/xstate.h
> +++ b/xen/include/asm-x86/xstate.h
> @@ -82,6 +82,8 @@ struct __packed __attribute__((aligned (64)))
> xsave_struct
> /* extended state operations */
> bool_t __must_check set_xcr0(u64 xfeatures); uint64_t
> get_xcr0(void);
> +void xsaves(uint32_t lmask, uint32_t hmask, struct xsave_struct
> +*ptr); void xrstors(uint32_t lmask, uint32_t hmask, struct
> +xsave_struct *ptr);
> void xsave(struct vcpu *v, uint64_t mask); void xrstor(struct vcpu
> *v, uint64_t mask); bool_t xsave_enabled(const struct vcpu *v); @@
> -92,6 +94,7 @@ int __must_check handle_xsetbv(u32 index, u64 new_bv);
> void xstate_free_save_area(struct vcpu *v); int
> xstate_alloc_save_area(struct vcpu *v); void xstate_init(bool_t bsp);
> +unsigned int xstate_ctxt_size_compact(u64 xcr0);
> unsigned int xstate_ctxt_size(u64 xcr0);
>
> #endif /* __ASM_XSTATE_H */
> --
> 1.9.1
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH 1/6] x86/xsaves: enable xsaves/xrstors for pv guest
2015-07-21 9:43 ` Ruan, Shuai
@ 2015-07-21 13:12 ` Konrad Rzeszutek Wilk
0 siblings, 0 replies; 16+ messages in thread
From: Konrad Rzeszutek Wilk @ 2015-07-21 13:12 UTC (permalink / raw)
To: Ruan, Shuai
Cc: Tian, Kevin, wei.liu2, Ian.Campbell, stefano.stabellini,
andrew.cooper3, ian.jackson, xen-devel, jbeulich, Dong, Eddie,
Nakajima, Jun, keir
On Tue, Jul 21, 2015 at 09:43:22AM +0000, Ruan, Shuai wrote:
> Thanks for your review, konrad
>
> 1.If the hardware does not support xsaves, then hypersior will not expose xsaves feature to guest. Then the guest will not excute xsaves. But your suggestion is important, I will add code to verify that the host can excute xsaves or not.
That is a wrong way to think about it. Think of a malicious guest doing everything wrong. Including
ignoring the cpuid ops and executing all opcodes.
>
> 2.Using 'ASSERT' here is improper. So I will fix these in next version.
>
> Thanks
>
> -----Original Message-----
> From: Konrad Rzeszutek Wilk [mailto:konrad.wilk@oracle.com]
> Sent: Saturday, July 18, 2015 12:22 AM
> To: Ruan, Shuai
> Cc: xen-devel@lists.xen.org; Tian, Kevin; wei.liu2@citrix.com; Ian.Campbell@citrix.com; stefano.stabellini@eu.citrix.com; Nakajima, Jun; andrew.cooper3@citrix.com; ian.jackson@eu.citrix.com; Dong, Eddie; jbeulich@suse.com; keir@xen.org
> Subject: Re: [Xen-devel] [PATCH 1/6] x86/xsaves: enable xsaves/xrstors for pv guest
>
> On Fri, Jul 17, 2015 at 03:26:51PM +0800, Shuai Ruan wrote:
> > This patch emualtes xsaves/xrstors instructions and
>
> emulates
> > XSS msr access.
> >
> > As xsaves/xrstors instructions and XSS msr access required be executed
> > only in ring0. So emulation are needed when pv guest uses these
> > instructions.
>
> This looks to try the emulation even if the hardware does not support it.
>
> That is - and guest could try these opcodes and we would end up trying to execute the xsaves in the hypervisor.
>
> Perhaps first we should verify that the host can actually execute this?
> >
> > Signed-off-by: Shuai Ruan <shuai.ruan@intel.com>
> > ---
> > xen/arch/x86/domain.c | 3 ++
> > xen/arch/x86/traps.c | 87 +++++++++++++++++++++++++++++++++++++++++
> > xen/arch/x86/x86_64/mm.c | 52 ++++++++++++++++++++++++
> > xen/arch/x86/xstate.c | 39 ++++++++++++++++++
> > xen/include/asm-x86/domain.h | 1 +
> > xen/include/asm-x86/mm.h | 1 +
> > xen/include/asm-x86/msr-index.h | 2 +
> > xen/include/asm-x86/xstate.h | 3 ++
> > 8 files changed, 188 insertions(+)
> >
> > diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index
> > a8fe046..66f8231 100644
> > --- a/xen/arch/x86/domain.c
> > +++ b/xen/arch/x86/domain.c
> > @@ -426,6 +426,7 @@ int vcpu_initialise(struct vcpu *v)
> >
> > /* By default, do not emulate */
> > v->arch.vm_event.emulate_flags = 0;
> > + v->arch.msr_ia32_xss = 0;
> >
> > rc = mapcache_vcpu_init(v);
> > if ( rc )
> > @@ -1494,6 +1495,8 @@ static void __context_switch(void)
> > if ( xcr0 != get_xcr0() && !set_xcr0(xcr0) )
> > BUG();
> > }
> > + if ( cpu_has_xsaves )
> > + wrmsr_safe(MSR_IA32_XSS, n->arch.msr_ia32_xss);
> > vcpu_restore_fpu_eager(n);
> > n->arch.ctxt_switch_to(n);
> > }
> > diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index
> > ac62f20..5f79f07 100644
> > --- a/xen/arch/x86/traps.c
> > +++ b/xen/arch/x86/traps.c
> > @@ -2346,6 +2346,82 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
> > }
> > break;
> >
> > + case 0xc7:
> > + {
> > + void *xsave_addr;
> > + int not_page_aligned = 0;
> > + u32 guest_xsaves_size =
> > + xstate_ctxt_size_compact(v->arch.xcr0);
> > +
> > + switch ( insn_fetch(u8, code_base, eip, code_limit) )
> > + {
> > + case 0x2f:/* XSAVES */
> > + {
> > + if ( (regs->edi & ~PAGE_MASK) + guest_xsaves_size > PAGE_SIZE )
> > + {
> > + mfn_t mfn_list[2];
> > + void *va;
> > +
> > + not_page_aligned = 1;
> > + mfn_list[0] = _mfn(do_page_walk_mfn(v, regs->edi));
> > + mfn_list[1] = _mfn(do_page_walk_mfn(v,
> > + PAGE_ALIGN(regs->edi)));
> > +
> > + va = __vmap(mfn_list, 1, 2, PAGE_SIZE, PAGE_HYPERVISOR);
> > + ASSERT(((unsigned long) va & ~PAGE_MASK) == 0);
> > + xsave_addr = (void *)((unsigned long)va +
> > + (regs->edi & ~PAGE_MASK));
> > + }
> > + else
> > + xsave_addr = do_page_walk(v, regs->edi);
> > +
> > + if ( !xsave_addr )
> > + goto fail;
> > +
> > + xsaves(regs->eax, regs->edx, xsave_addr);
> > +
> > + if ( not_page_aligned )
> > + vunmap((void *)((unsigned long)xsave_addr & PAGE_MASK));
> > + else
> > + unmap_domain_page(xsave_addr);
> > + break;
> > + }
> > + case 0x1f:/* XRSTORS */
> > + {
> > + if( (regs->edi & ~PAGE_MASK) + guest_xsaves_size > PAGE_SIZE )
> > + {
> > + mfn_t mfn_list[2];
> > + void *va;
> > +
> > + not_page_aligned = 1;
> > + mfn_list[0] = _mfn(do_page_walk_mfn(v, regs->edi));
> > + mfn_list[1] = _mfn(do_page_walk_mfn(v,
> > + PAGE_ALIGN(regs->edi)));
> > +
> > + va = __vmap(mfn_list, 1, 2, PAGE_SIZE, PAGE_HYPERVISOR);
> > + ASSERT(((unsigned long) va & ~PAGE_MASK) == 0);
>
> Ouch! Crash the hypervisor?
> > + xsave_addr = (void *)((unsigned long)va +
> > + (regs->edi & ~PAGE_MASK));
> > + }
> > + else
> > + xsave_addr = do_page_walk(v, regs->edi);
> > +
> > + if ( !xsave_addr )
> > + goto fail;
> > +
> > + xrstors(regs->eax, regs->edx, xsave_addr);
> > +
> > + if ( not_page_aligned )
> > + vunmap((void *)((unsigned long)xsave_addr & PAGE_MASK));
> > + else
> > + unmap_domain_page(xsave_addr);
> > + break;
> > + }
> > + default:
> > + goto fail;
> > + }
> > + break;
> > + }
> > +
> > case 0x06: /* CLTS */
> > (void)do_fpu_taskswitch(0);
> > break;
> > @@ -2638,6 +2714,12 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
> > wrmsrl(regs->_ecx, msr_content);
> > break;
> >
> > + case MSR_IA32_XSS:
> > + if ( wrmsr_safe(regs->ecx, msr_content) != 0 )
> > + goto fail;
> > + v->arch.msr_ia32_xss = msr_content;
> > + break;
> > +
> > default:
> > if ( wrmsr_hypervisor_regs(regs->ecx, msr_content) == 1 )
> > break;
> > @@ -2740,6 +2822,11 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
> > regs->edx = 0;
> > break;
> >
> > + case MSR_IA32_XSS:
> > + regs->eax = v->arch.msr_ia32_xss;
> > + regs->edx = v->arch.msr_ia32_xss >> 32;
> > + break;
> > +
> > default:
> > if ( rdmsr_hypervisor_regs(regs->ecx, &val) )
> > goto rdmsr_writeback; diff --git
> > a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c index
> > 3ef4618..f64aa08 100644
> > --- a/xen/arch/x86/x86_64/mm.c
> > +++ b/xen/arch/x86/x86_64/mm.c
> > @@ -48,6 +48,58 @@ l2_pgentry_t __section(".bss.page_aligned")
> > l2_bootmap[L2_PAGETABLE_ENTRIES];
> >
> > l2_pgentry_t *compat_idle_pg_table_l2;
> >
> > +unsigned long do_page_walk_mfn(struct vcpu *v, unsigned long addr) {
> > + unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
> > + l4_pgentry_t l4e, *l4t;
> > + l3_pgentry_t l3e, *l3t;
> > + l2_pgentry_t l2e, *l2t;
> > + l1_pgentry_t l1e, *l1t;
> > +
> > + if ( !is_pv_vcpu(v) || !is_canonical_address(addr) )
> > + return 0;
> > +
> > + l4t = map_domain_page(mfn);
> > + l4e = l4t[l4_table_offset(addr)];
> > + unmap_domain_page(l4t);
> > + if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
> > + return 0;
> > +
> > + l3t = map_l3t_from_l4e(l4e);
> > + l3e = l3t[l3_table_offset(addr)];
> > + unmap_domain_page(l3t);
> > + mfn = l3e_get_pfn(l3e);
> > + if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
> > + return 0;
> > + if ( (l3e_get_flags(l3e) & _PAGE_PSE) )
> > + {
> > + mfn += PFN_DOWN(addr & ((1UL << L3_PAGETABLE_SHIFT) - 1));
> > + goto ret;
> > + }
> > +
> > + l2t = map_domain_page(mfn);
> > + l2e = l2t[l2_table_offset(addr)];
> > + unmap_domain_page(l2t);
> > + mfn = l2e_get_pfn(l2e);
> > + if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
> > + return 0;
> > + if ( (l2e_get_flags(l2e) & _PAGE_PSE) )
> > + {
> > + mfn += PFN_DOWN(addr & ((1UL << L2_PAGETABLE_SHIFT) - 1));
> > + goto ret;
> > + }
> > +
> > + l1t = map_domain_page(mfn);
> > + l1e = l1t[l1_table_offset(addr)];
> > + unmap_domain_page(l1t);
> > + mfn = l1e_get_pfn(l1e);
> > + if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
> > + return 0;
> > +
> > + ret:
> > + return mfn;
> > +}
> > +
> > void *do_page_walk(struct vcpu *v, unsigned long addr) {
> > unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
> > diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c index
> > d5f5e3b..e34eda3 100644
> > --- a/xen/arch/x86/xstate.c
> > +++ b/xen/arch/x86/xstate.c
> > @@ -65,6 +65,31 @@ uint64_t get_xcr0(void)
> > return this_cpu(xcr0);
> > }
> >
> > +void xsaves(uint32_t lmask, uint32_t hmask, struct xsave_struct *ptr)
> > +{
> > + asm volatile ( ".byte 0x48,0x0f,0xc7,0x2f"
> > + : "=m" (*ptr)
> > + : "a" (lmask), "d" (hmask), "D" (ptr) ); }
> > +
> > +void xrstors(uint32_t lmask, uint32_t hmask, struct xsave_struct
> > +*ptr) {
> > + asm volatile ( "1: .byte 0x48,0x0f,0xc7,0x1f\n"
> > + ".section .fixup,\"ax\" \n"
> > + "2: mov %5,%%ecx \n"
> > + " xor %1,%1 \n"
> > + " rep stosb \n"
> > + " lea %2,%0 \n"
> > + " mov %3,%1 \n"
> > + " jmp 1b \n"
> > + ".previous \n"
> > + _ASM_EXTABLE(1b, 2b)
> > + : "+&D" (ptr), "+&a" (lmask)
> > + : "m" (*ptr), "g" (lmask), "d" (hmask),
> > + "m" (xsave_cntxt_size)
> > + : "ecx" );
> > +}
> > +
> > void xsave(struct vcpu *v, uint64_t mask) {
> > struct xsave_struct *ptr = v->arch.xsave_area; @@ -268,6 +293,20
> > @@ static unsigned int _xstate_ctxt_size(u64 xcr0)
> > return ebx;
> > }
> >
> > +unsigned int xstate_ctxt_size_compact(u64 xcr0) {
> > + u64 act_xcr0 = get_xcr0();
> > + u32 eax, ebx = 0, ecx, edx;
> > + bool_t ok = set_xcr0(xcr0);
> > +
> > + ASSERT(ok);
> > + cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
> > + ok = set_xcr0(act_xcr0);
> > + ASSERT(ok);
> > +
> > + return ebx;
> > +}
> > +
> > /* Fastpath for common xstate size requests, avoiding reloads of
> > xcr0. */ unsigned int xstate_ctxt_size(u64 xcr0) { diff --git
> > a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index
> > 96bde65..bcea9d4 100644
> > --- a/xen/include/asm-x86/domain.h
> > +++ b/xen/include/asm-x86/domain.h
> > @@ -473,6 +473,7 @@ struct arch_vcpu
> > */
> > struct xsave_struct *xsave_area;
> > uint64_t xcr0;
> > + u64 msr_ia32_xss;
> > /* Accumulated eXtended features mask for using XSAVE/XRESTORE by Xen
> > * itself, as we can never know whether guest OS depends on content
> > * preservation whenever guest OS clears one feature flag (for
> > example, diff --git a/xen/include/asm-x86/mm.h
> > b/xen/include/asm-x86/mm.h index 8595c38..94a590e 100644
> > --- a/xen/include/asm-x86/mm.h
> > +++ b/xen/include/asm-x86/mm.h
> > @@ -524,6 +524,7 @@ void make_cr3(struct vcpu *v, unsigned long mfn);
> > void update_cr3(struct vcpu *v); int vcpu_destroy_pagetables(struct
> > vcpu *); struct trap_bounce *propagate_page_fault(unsigned long addr,
> > u16 error_code);
> > +unsigned long do_page_walk_mfn(struct vcpu *v, unsigned long addr);
> > void *do_page_walk(struct vcpu *v, unsigned long addr);
> >
> > int __sync_local_execstate(void);
> > diff --git a/xen/include/asm-x86/msr-index.h
> > b/xen/include/asm-x86/msr-index.h index 83f2f70..9564113 100644
> > --- a/xen/include/asm-x86/msr-index.h
> > +++ b/xen/include/asm-x86/msr-index.h
> > @@ -58,6 +58,8 @@
> >
> > #define MSR_IA32_BNDCFGS 0x00000D90
> >
> > +#define MSR_IA32_XSS 0x00000da0
> > +
> > #define MSR_MTRRfix64K_00000 0x00000250
> > #define MSR_MTRRfix16K_80000 0x00000258
> > #define MSR_MTRRfix16K_A0000 0x00000259
> > diff --git a/xen/include/asm-x86/xstate.h
> > b/xen/include/asm-x86/xstate.h index 4c690db..59c7156 100644
> > --- a/xen/include/asm-x86/xstate.h
> > +++ b/xen/include/asm-x86/xstate.h
> > @@ -82,6 +82,8 @@ struct __packed __attribute__((aligned (64)))
> > xsave_struct
> > /* extended state operations */
> > bool_t __must_check set_xcr0(u64 xfeatures); uint64_t
> > get_xcr0(void);
> > +void xsaves(uint32_t lmask, uint32_t hmask, struct xsave_struct
> > +*ptr); void xrstors(uint32_t lmask, uint32_t hmask, struct
> > +xsave_struct *ptr);
> > void xsave(struct vcpu *v, uint64_t mask); void xrstor(struct vcpu
> > *v, uint64_t mask); bool_t xsave_enabled(const struct vcpu *v); @@
> > -92,6 +94,7 @@ int __must_check handle_xsetbv(u32 index, u64 new_bv);
> > void xstate_free_save_area(struct vcpu *v); int
> > xstate_alloc_save_area(struct vcpu *v); void xstate_init(bool_t bsp);
> > +unsigned int xstate_ctxt_size_compact(u64 xcr0);
> > unsigned int xstate_ctxt_size(u64 xcr0);
> >
> > #endif /* __ASM_XSTATE_H */
> > --
> > 1.9.1
> >
> >
> > _______________________________________________
> > Xen-devel mailing list
> > Xen-devel@lists.xen.org
> > http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 16+ messages in thread