From mboxrd@z Thu Jan 1 00:00:00 1970 From: Shuai Ruan Subject: [PATCH 2/2] x86/xsave: use alternative asm on xsave side. Date: Tue, 2 Feb 2016 15:11:04 +0800 Message-ID: <1454397064-29268-3-git-send-email-shuai.ruan@linux.intel.com> References: <1454397064-29268-1-git-send-email-shuai.ruan@linux.intel.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <1454397064-29268-1-git-send-email-shuai.ruan@linux.intel.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xen.org Cc: andrew.cooper3@citrix.com, keir@xen.org, jbeulich@suse.com List-Id: xen-devel@lists.xenproject.org This patch use alternavtive asm on the xsave side. As xsaves use modified optimization like xsaveopt, xsaves may not writing the FPU portion of the save image too. So xsaves also need some extra tweaks. Signed-off-by: Shuai Ruan --- xen/arch/x86/xstate.c | 49 ++++++++++++++++--------------------------------- 1 file changed, 16 insertions(+), 33 deletions(-) diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c index 4e87ab3..832f4ad 100644 --- a/xen/arch/x86/xstate.c +++ b/xen/arch/x86/xstate.c @@ -248,24 +248,26 @@ void xsave(struct vcpu *v, uint64_t mask) uint32_t hmask = mask >> 32; uint32_t lmask = mask; int word_size = mask & XSTATE_FP ? (cpu_has_fpu_sel ? 8 : 0) : -1; +#define XSAVE(pfx) \ + alternative_io_3(".byte " pfx "0x0f,0xae,0x27\n", \ + ".byte " pfx "0x0f,0xae,0x37\n", \ + X86_FEATURE_XSAVEOPT, \ + ".byte " pfx "0x0f,0xc7,0x27\n", \ + X86_FEATURE_XSAVEC, \ + ".byte " pfx "0x0f,0xc7,0x37\n", \ + X86_FEATURE_XSAVES, \ + "=m" (*ptr), \ + "a" (lmask), "d" (hmask), "D" (ptr)) if ( word_size <= 0 || !is_pv_32bit_vcpu(v) ) { typeof(ptr->fpu_sse.fip.sel) fcs = ptr->fpu_sse.fip.sel; typeof(ptr->fpu_sse.fdp.sel) fds = ptr->fpu_sse.fdp.sel; - if ( cpu_has_xsaves ) - asm volatile ( ".byte 0x48,0x0f,0xc7,0x2f" - : "=m" (*ptr) - : "a" (lmask), "d" (hmask), "D" (ptr) ); - else if ( cpu_has_xsavec ) - asm volatile ( ".byte 0x48,0x0f,0xc7,0x27" - : "=m" (*ptr) - : "a" (lmask), "d" (hmask), "D" (ptr) ); - else if ( cpu_has_xsaveopt ) + if ( cpu_has_xsaveopt || cpu_has_xsaves ) { /* - * xsaveopt may not write the FPU portion even when the respective + * xsaveopt/xsaves may not write the FPU portion even when the respective * mask bit is set. For the check further down to work we hence * need to put the save image back into the state that it was in * right after the previous xsaveopt. @@ -277,14 +279,9 @@ void xsave(struct vcpu *v, uint64_t mask) ptr->fpu_sse.fip.sel = 0; ptr->fpu_sse.fdp.sel = 0; } - asm volatile ( ".byte 0x48,0x0f,0xae,0x37" - : "=m" (*ptr) - : "a" (lmask), "d" (hmask), "D" (ptr) ); } - else - asm volatile ( ".byte 0x48,0x0f,0xae,0x27" - : "=m" (*ptr) - : "a" (lmask), "d" (hmask), "D" (ptr) ); + + XSAVE("0x48,"); if ( !(mask & ptr->xsave_hdr.xstate_bv & XSTATE_FP) || /* @@ -315,24 +312,10 @@ void xsave(struct vcpu *v, uint64_t mask) } else { - if ( cpu_has_xsaves ) - asm volatile ( ".byte 0x0f,0xc7,0x2f" - : "=m" (*ptr) - : "a" (lmask), "d" (hmask), "D" (ptr) ); - else if ( cpu_has_xsavec ) - asm volatile ( ".byte 0x0f,0xc7,0x27" - : "=m" (*ptr) - : "a" (lmask), "d" (hmask), "D" (ptr) ); - else if ( cpu_has_xsaveopt ) - asm volatile ( ".byte 0x0f,0xae,0x37" - : "=m" (*ptr) - : "a" (lmask), "d" (hmask), "D" (ptr) ); - else - asm volatile ( ".byte 0x0f,0xae,0x27" - : "=m" (*ptr) - : "a" (lmask), "d" (hmask), "D" (ptr) ); + XSAVE(""); word_size = 4; } +#undef XSAVE if ( word_size >= 0 ) ptr->fpu_sse.x[FPU_WORD_SIZE_OFFSET] = word_size; } -- 1.9.1