All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] x86: add strictly sanity check for XSAVE/XRSTOR
@ 2011-02-18  2:45 Wei, Gang
  2011-02-18  7:13 ` Keir Fraser
  0 siblings, 1 reply; 5+ messages in thread
From: Wei, Gang @ 2011-02-18  2:45 UTC (permalink / raw)
  To: xen-devel; +Cc: wei.huang2, Keir Fraser, Wei, Gang

[-- Attachment #1: Type: text/plain, Size: 3431 bytes --]

This patch is trying to make issues around XSAVE/XRSTOR induced in future easy to be exposed.

Jimmy

x86: add strictly sanity check for XSAVE/XRSTOR

Signed-off-by: Wei Gang <gang.wei@intel.com>

diff -r 137ad3347504 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/domctl.c	Fri Feb 18 16:00:41 2011 +0800
@@ -1604,8 +1604,13 @@ void arch_get_info_guest(struct vcpu *v,
 
     /* Fill legacy context from xsave area first */
     if ( cpu_has_xsave )
+    {
+        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
+        ASSERT(v->arch.xsave_area);
+
         memcpy(v->arch.xsave_area, &v->arch.guest_context.fpu_ctxt,
                sizeof(v->arch.guest_context.fpu_ctxt));
+    }
 
     if ( !is_pv_32on64_domain(v->domain) )
         memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat)); diff -r 137ad3347504 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/hvm/hvm.c	Fri Feb 18 16:03:23 2011 +0800
@@ -777,6 +777,9 @@ static int hvm_load_cpu_ctxt(struct doma
     {
         struct xsave_struct *xsave_area = v->arch.xsave_area;
 
+        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
+        ASSERT(v->arch.xsave_area);
+
         memcpy(v->arch.xsave_area, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
         xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
         v->arch.xcr0_accum = XSTATE_FP_SSE; @@ -834,6 +837,7 @@ static int hvm_save_cpu_xsave_states(str
     if ( !cpu_has_xsave )
         return 0;   /* do nothing */
 
+    ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
     for_each_vcpu ( d, v )
     {
         if ( _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id, HVM_CPU_XSAVE_SIZE) ) @@ -846,8 +850,12 @@ static int hvm_save_cpu_xsave_states(str
         ctxt->xcr0 = v->arch.xcr0;
         ctxt->xcr0_accum = v->arch.xcr0_accum;
         if ( v->fpu_initialised )
+        {
+            ASSERT(v->arch.xsave_area);
+
             memcpy(&ctxt->save_area,
                 v->arch.xsave_area, xsave_cntxt_size);
+        }
     }
 
     return 0;
@@ -873,6 +881,9 @@ static int hvm_load_cpu_xsave_states(str
         gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
         return -EINVAL;
     }
+
+    ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
+    ASSERT(v->arch.xsave_area);
 
     /* Customized checking for entry since our entry is of variable length */
     desc = (struct hvm_save_descriptor *)&h->data[h->cur]; diff -r 137ad3347504 xen/arch/x86/i387.c
--- a/xen/arch/x86/i387.c	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/i387.c	Fri Feb 18 16:00:41 2011 +0800
@@ -71,6 +71,9 @@ void setup_fpu(struct vcpu *v)
 
     if ( cpu_has_xsave )
     {
+        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
+        ASSERT(v->arch.xsave_area);
+
         /*
          * XCR0 normally represents what guest OS set. In case of Xen itself, 
          * we set all supported feature mask before doing save/restore.
@@ -118,6 +121,9 @@ void save_init_fpu(struct vcpu *v)
 
     if ( cpu_has_xsave )
     {
+        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
+        ASSERT(v->arch.xsave_area);
+
         /* XCR0 normally represents what guest OS set. In case of Xen itself,
          * we set all accumulated feature mask before doing save/restore.
          */

[-- Attachment #2: xsave_sanity_check.patch --]
[-- Type: application/octet-stream, Size: 3241 bytes --]

x86: add strictly sanity check for XSAVE/XRSTOR

Signed-off-by: Wei Gang <gang.wei@intel.com>

diff -r 137ad3347504 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/domctl.c	Fri Feb 18 16:00:41 2011 +0800
@@ -1604,8 +1604,13 @@ void arch_get_info_guest(struct vcpu *v,
 
     /* Fill legacy context from xsave area first */
     if ( cpu_has_xsave )
+    {
+        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
+        ASSERT(v->arch.xsave_area);
+
         memcpy(v->arch.xsave_area, &v->arch.guest_context.fpu_ctxt,
                sizeof(v->arch.guest_context.fpu_ctxt));
+    }
 
     if ( !is_pv_32on64_domain(v->domain) )
         memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat));
diff -r 137ad3347504 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/hvm/hvm.c	Fri Feb 18 16:03:23 2011 +0800
@@ -777,6 +777,9 @@ static int hvm_load_cpu_ctxt(struct doma
     {
         struct xsave_struct *xsave_area = v->arch.xsave_area;
 
+        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
+        ASSERT(v->arch.xsave_area);
+
         memcpy(v->arch.xsave_area, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
         xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
         v->arch.xcr0_accum = XSTATE_FP_SSE;
@@ -834,6 +837,7 @@ static int hvm_save_cpu_xsave_states(str
     if ( !cpu_has_xsave )
         return 0;   /* do nothing */
 
+    ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
     for_each_vcpu ( d, v )
     {
         if ( _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id, HVM_CPU_XSAVE_SIZE) )
@@ -846,8 +850,12 @@ static int hvm_save_cpu_xsave_states(str
         ctxt->xcr0 = v->arch.xcr0;
         ctxt->xcr0_accum = v->arch.xcr0_accum;
         if ( v->fpu_initialised )
+        {
+            ASSERT(v->arch.xsave_area);
+
             memcpy(&ctxt->save_area,
                 v->arch.xsave_area, xsave_cntxt_size);
+        }
     }
 
     return 0;
@@ -873,6 +881,9 @@ static int hvm_load_cpu_xsave_states(str
         gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
         return -EINVAL;
     }
+
+    ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
+    ASSERT(v->arch.xsave_area);
 
     /* Customized checking for entry since our entry is of variable length */
     desc = (struct hvm_save_descriptor *)&h->data[h->cur];
diff -r 137ad3347504 xen/arch/x86/i387.c
--- a/xen/arch/x86/i387.c	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/i387.c	Fri Feb 18 16:00:41 2011 +0800
@@ -71,6 +71,9 @@ void setup_fpu(struct vcpu *v)
 
     if ( cpu_has_xsave )
     {
+        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
+        ASSERT(v->arch.xsave_area);
+
         /*
          * XCR0 normally represents what guest OS set. In case of Xen itself, 
          * we set all supported feature mask before doing save/restore.
@@ -118,6 +121,9 @@ void save_init_fpu(struct vcpu *v)
 
     if ( cpu_has_xsave )
     {
+        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
+        ASSERT(v->arch.xsave_area);
+
         /* XCR0 normally represents what guest OS set. In case of Xen itself,
          * we set all accumulated feature mask before doing save/restore.
          */

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] x86: add strictly sanity check for XSAVE/XRSTOR
  2011-02-18  2:45 [PATCH] x86: add strictly sanity check for XSAVE/XRSTOR Wei, Gang
@ 2011-02-18  7:13 ` Keir Fraser
  2011-02-19  1:21   ` Kay, Allen M
  0 siblings, 1 reply; 5+ messages in thread
From: Keir Fraser @ 2011-02-18  7:13 UTC (permalink / raw)
  To: Wei, Gang, xen-devel; +Cc: wei.huang2

On 18/02/2011 02:45, "Wei, Gang" <gang.wei@intel.com> wrote:

> This patch is trying to make issues around XSAVE/XRSTOR induced in future easy
> to be exposed.

The fact that xsave_alloc_save_area() is called unconditionally on the vcpu
allocation path suffices I think. It's pretty easy to eyeball that no
successfully initialised non-idle vcpu can have an xsave area smaller than
min_size.

I like assertions a lot, but not carpet bombed all over the code.

 -- Keir


> Jimmy
> 
> x86: add strictly sanity check for XSAVE/XRSTOR
> 
> Signed-off-by: Wei Gang <gang.wei@intel.com>
> 
> diff -r 137ad3347504 xen/arch/x86/domctl.c
> --- a/xen/arch/x86/domctl.c Mon Feb 14 17:02:55 2011 +0000
> +++ b/xen/arch/x86/domctl.c Fri Feb 18 16:00:41 2011 +0800
> @@ -1604,8 +1604,13 @@ void arch_get_info_guest(struct vcpu *v,
>  
>      /* Fill legacy context from xsave area first */
>      if ( cpu_has_xsave )
> +    {
> +        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
> +        ASSERT(v->arch.xsave_area);
> +
>          memcpy(v->arch.xsave_area, &v->arch.guest_context.fpu_ctxt,
>                 sizeof(v->arch.guest_context.fpu_ctxt));
> +    }
>  
>      if ( !is_pv_32on64_domain(v->domain) )
>          memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat)); diff -r
> 137ad3347504 xen/arch/x86/hvm/hvm.c
> --- a/xen/arch/x86/hvm/hvm.c Mon Feb 14 17:02:55 2011 +0000
> +++ b/xen/arch/x86/hvm/hvm.c Fri Feb 18 16:03:23 2011 +0800
> @@ -777,6 +777,9 @@ static int hvm_load_cpu_ctxt(struct doma
>      {
>          struct xsave_struct *xsave_area = v->arch.xsave_area;
>  
> +        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
> +        ASSERT(v->arch.xsave_area);
> +
>          memcpy(v->arch.xsave_area, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
>          xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
>          v->arch.xcr0_accum = XSTATE_FP_SSE; @@ -834,6 +837,7 @@ static int
> hvm_save_cpu_xsave_states(str
>      if ( !cpu_has_xsave )
>          return 0;   /* do nothing */
>  
> +    ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
>      for_each_vcpu ( d, v )
>      {
>          if ( _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id,
> HVM_CPU_XSAVE_SIZE) ) @@ -846,8 +850,12 @@ static int
> hvm_save_cpu_xsave_states(str
>          ctxt->xcr0 = v->arch.xcr0;
>          ctxt->xcr0_accum = v->arch.xcr0_accum;
>          if ( v->fpu_initialised )
> +        {
> +            ASSERT(v->arch.xsave_area);
> +
>              memcpy(&ctxt->save_area,
>                  v->arch.xsave_area, xsave_cntxt_size);
> +        }
>      }
>  
>      return 0;
> @@ -873,6 +881,9 @@ static int hvm_load_cpu_xsave_states(str
>          gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
>          return -EINVAL;
>      }
> +
> +    ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
> +    ASSERT(v->arch.xsave_area);
>  
>      /* Customized checking for entry since our entry is of variable length */
>      desc = (struct hvm_save_descriptor *)&h->data[h->cur]; diff -r
> 137ad3347504 xen/arch/x86/i387.c
> --- a/xen/arch/x86/i387.c Mon Feb 14 17:02:55 2011 +0000
> +++ b/xen/arch/x86/i387.c Fri Feb 18 16:00:41 2011 +0800
> @@ -71,6 +71,9 @@ void setup_fpu(struct vcpu *v)
>  
>      if ( cpu_has_xsave )
>      {
> +        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
> +        ASSERT(v->arch.xsave_area);
> +
>          /*
>           * XCR0 normally represents what guest OS set. In case of Xen itself,
>           * we set all supported feature mask before doing save/restore.
> @@ -118,6 +121,9 @@ void save_init_fpu(struct vcpu *v)
>  
>      if ( cpu_has_xsave )
>      {
> +        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
> +        ASSERT(v->arch.xsave_area);
> +
>          /* XCR0 normally represents what guest OS set. In case of Xen itself,
>           * we set all accumulated feature mask before doing save/restore.
>           */

^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: Re: [PATCH] x86: add strictly sanity check for XSAVE/XRSTOR
  2011-02-18  7:13 ` Keir Fraser
@ 2011-02-19  1:21   ` Kay, Allen M
  2011-02-19  8:35     ` Keir Fraser
  0 siblings, 1 reply; 5+ messages in thread
From: Kay, Allen M @ 2011-02-19  1:21 UTC (permalink / raw)
  To: Keir Fraser, Wei, Gang, xen-devel; +Cc: wei.huang2

This patch was meant to address that checking cpu_has_xsave is not enough.  Since it only checks the availability of the feature but it does not check whether memory has allocated properly or not.  It is possible that xsave can be used without memory being properly allocated and results in clobbering of memory.  We have already encountered two random boot failures caused by xsave patch in the past due to this so we want to put some safeguard to ensure this will not happen again.

Maybe the proper thing to do is to have a new function call xsave_enabled(), this function then checks for whether memory has allocated properly in addition to checking cpu_has_xsave.

What do you think or do you have a better suggestion?

Allen

-----Original Message-----
From: xen-devel-bounces@lists.xensource.com [mailto:xen-devel-bounces@lists.xensource.com] On Behalf Of Keir Fraser
Sent: Thursday, February 17, 2011 11:13 PM
To: Wei, Gang; xen-devel@lists.xensource.com
Cc: wei.huang2@amd.com
Subject: [Xen-devel] Re: [PATCH] x86: add strictly sanity check for XSAVE/XRSTOR

On 18/02/2011 02:45, "Wei, Gang" <gang.wei@intel.com> wrote:

> This patch is trying to make issues around XSAVE/XRSTOR induced in future easy
> to be exposed.

The fact that xsave_alloc_save_area() is called unconditionally on the vcpu
allocation path suffices I think. It's pretty easy to eyeball that no
successfully initialised non-idle vcpu can have an xsave area smaller than
min_size.

I like assertions a lot, but not carpet bombed all over the code.

 -- Keir


> Jimmy
> 
> x86: add strictly sanity check for XSAVE/XRSTOR
> 
> Signed-off-by: Wei Gang <gang.wei@intel.com>
> 
> diff -r 137ad3347504 xen/arch/x86/domctl.c
> --- a/xen/arch/x86/domctl.c Mon Feb 14 17:02:55 2011 +0000
> +++ b/xen/arch/x86/domctl.c Fri Feb 18 16:00:41 2011 +0800
> @@ -1604,8 +1604,13 @@ void arch_get_info_guest(struct vcpu *v,
>  
>      /* Fill legacy context from xsave area first */
>      if ( cpu_has_xsave )
> +    {
> +        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
> +        ASSERT(v->arch.xsave_area);
> +
>          memcpy(v->arch.xsave_area, &v->arch.guest_context.fpu_ctxt,
>                 sizeof(v->arch.guest_context.fpu_ctxt));
> +    }
>  
>      if ( !is_pv_32on64_domain(v->domain) )
>          memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat)); diff -r
> 137ad3347504 xen/arch/x86/hvm/hvm.c
> --- a/xen/arch/x86/hvm/hvm.c Mon Feb 14 17:02:55 2011 +0000
> +++ b/xen/arch/x86/hvm/hvm.c Fri Feb 18 16:03:23 2011 +0800
> @@ -777,6 +777,9 @@ static int hvm_load_cpu_ctxt(struct doma
>      {
>          struct xsave_struct *xsave_area = v->arch.xsave_area;
>  
> +        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
> +        ASSERT(v->arch.xsave_area);
> +
>          memcpy(v->arch.xsave_area, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
>          xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
>          v->arch.xcr0_accum = XSTATE_FP_SSE; @@ -834,6 +837,7 @@ static int
> hvm_save_cpu_xsave_states(str
>      if ( !cpu_has_xsave )
>          return 0;   /* do nothing */
>  
> +    ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
>      for_each_vcpu ( d, v )
>      {
>          if ( _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id,
> HVM_CPU_XSAVE_SIZE) ) @@ -846,8 +850,12 @@ static int
> hvm_save_cpu_xsave_states(str
>          ctxt->xcr0 = v->arch.xcr0;
>          ctxt->xcr0_accum = v->arch.xcr0_accum;
>          if ( v->fpu_initialised )
> +        {
> +            ASSERT(v->arch.xsave_area);
> +
>              memcpy(&ctxt->save_area,
>                  v->arch.xsave_area, xsave_cntxt_size);
> +        }
>      }
>  
>      return 0;
> @@ -873,6 +881,9 @@ static int hvm_load_cpu_xsave_states(str
>          gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
>          return -EINVAL;
>      }
> +
> +    ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
> +    ASSERT(v->arch.xsave_area);
>  
>      /* Customized checking for entry since our entry is of variable length */
>      desc = (struct hvm_save_descriptor *)&h->data[h->cur]; diff -r
> 137ad3347504 xen/arch/x86/i387.c
> --- a/xen/arch/x86/i387.c Mon Feb 14 17:02:55 2011 +0000
> +++ b/xen/arch/x86/i387.c Fri Feb 18 16:00:41 2011 +0800
> @@ -71,6 +71,9 @@ void setup_fpu(struct vcpu *v)
>  
>      if ( cpu_has_xsave )
>      {
> +        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
> +        ASSERT(v->arch.xsave_area);
> +
>          /*
>           * XCR0 normally represents what guest OS set. In case of Xen itself,
>           * we set all supported feature mask before doing save/restore.
> @@ -118,6 +121,9 @@ void save_init_fpu(struct vcpu *v)
>  
>      if ( cpu_has_xsave )
>      {
> +        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
> +        ASSERT(v->arch.xsave_area);
> +
>          /* XCR0 normally represents what guest OS set. In case of Xen itself,
>           * we set all accumulated feature mask before doing save/restore.
>           */



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: Re: [PATCH] x86: add strictly sanity check for XSAVE/XRSTOR
  2011-02-19  1:21   ` Kay, Allen M
@ 2011-02-19  8:35     ` Keir Fraser
  2011-02-21  7:06       ` Wei, Gang
  0 siblings, 1 reply; 5+ messages in thread
From: Keir Fraser @ 2011-02-19  8:35 UTC (permalink / raw)
  To: Kay, Allen M, Wei, Gang, xen-devel; +Cc: wei.huang2

On 19/02/2011 01:21, "Kay, Allen M" <allen.m.kay@intel.com> wrote:

> This patch was meant to address that checking cpu_has_xsave is not enough.

Well obviously it is, else the new checks would not be coded as assertions.

> Since it only checks the availability of the feature but it does not check
> whether memory has allocated properly or not.  It is possible that xsave can
> be used without memory being properly allocated and results in clobbering of
> memory.  We have already encountered two random boot failures caused by xsave
> patch in the past due to this so we want to put some safeguard to ensure this
> will not happen again.
> 
> Maybe the proper thing to do is to have a new function call xsave_enabled(),
> this function then checks for whether memory has allocated properly in
> addition to checking cpu_has_xsave.
> 
> What do you think or do you have a better suggestion?

Yeah, a new function xsave_enabled() which encapsulates the check of
cpu_has_xsave, plus your new assertions, would be acceptable I think.

 -- Keir

> Allen
> 
> -----Original Message-----
> From: xen-devel-bounces@lists.xensource.com
> [mailto:xen-devel-bounces@lists.xensource.com] On Behalf Of Keir Fraser
> Sent: Thursday, February 17, 2011 11:13 PM
> To: Wei, Gang; xen-devel@lists.xensource.com
> Cc: wei.huang2@amd.com
> Subject: [Xen-devel] Re: [PATCH] x86: add strictly sanity check for
> XSAVE/XRSTOR
> 
> On 18/02/2011 02:45, "Wei, Gang" <gang.wei@intel.com> wrote:
> 
>> This patch is trying to make issues around XSAVE/XRSTOR induced in future
>> easy
>> to be exposed.
> 
> The fact that xsave_alloc_save_area() is called unconditionally on the vcpu
> allocation path suffices I think. It's pretty easy to eyeball that no
> successfully initialised non-idle vcpu can have an xsave area smaller than
> min_size.
> 
> I like assertions a lot, but not carpet bombed all over the code.
> 
>  -- Keir
> 
> 
>> Jimmy
>> 
>> x86: add strictly sanity check for XSAVE/XRSTOR
>> 
>> Signed-off-by: Wei Gang <gang.wei@intel.com>
>> 
>> diff -r 137ad3347504 xen/arch/x86/domctl.c
>> --- a/xen/arch/x86/domctl.c Mon Feb 14 17:02:55 2011 +0000
>> +++ b/xen/arch/x86/domctl.c Fri Feb 18 16:00:41 2011 +0800
>> @@ -1604,8 +1604,13 @@ void arch_get_info_guest(struct vcpu *v,
>>  
>>      /* Fill legacy context from xsave area first */
>>      if ( cpu_has_xsave )
>> +    {
>> +        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
>> +        ASSERT(v->arch.xsave_area);
>> +
>>          memcpy(v->arch.xsave_area, &v->arch.guest_context.fpu_ctxt,
>>                 sizeof(v->arch.guest_context.fpu_ctxt));
>> +    }
>>  
>>      if ( !is_pv_32on64_domain(v->domain) )
>>          memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat)); diff -r
>> 137ad3347504 xen/arch/x86/hvm/hvm.c
>> --- a/xen/arch/x86/hvm/hvm.c Mon Feb 14 17:02:55 2011 +0000
>> +++ b/xen/arch/x86/hvm/hvm.c Fri Feb 18 16:03:23 2011 +0800
>> @@ -777,6 +777,9 @@ static int hvm_load_cpu_ctxt(struct doma
>>      {
>>          struct xsave_struct *xsave_area = v->arch.xsave_area;
>>  
>> +        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
>> +        ASSERT(v->arch.xsave_area);
>> +
>>          memcpy(v->arch.xsave_area, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
>>          xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
>>          v->arch.xcr0_accum = XSTATE_FP_SSE; @@ -834,6 +837,7 @@ static int
>> hvm_save_cpu_xsave_states(str
>>      if ( !cpu_has_xsave )
>>          return 0;   /* do nothing */
>>  
>> +    ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
>>      for_each_vcpu ( d, v )
>>      {
>>          if ( _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id,
>> HVM_CPU_XSAVE_SIZE) ) @@ -846,8 +850,12 @@ static int
>> hvm_save_cpu_xsave_states(str
>>          ctxt->xcr0 = v->arch.xcr0;
>>          ctxt->xcr0_accum = v->arch.xcr0_accum;
>>          if ( v->fpu_initialised )
>> +        {
>> +            ASSERT(v->arch.xsave_area);
>> +
>>              memcpy(&ctxt->save_area,
>>                  v->arch.xsave_area, xsave_cntxt_size);
>> +        }
>>      }
>>  
>>      return 0;
>> @@ -873,6 +881,9 @@ static int hvm_load_cpu_xsave_states(str
>>          gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n",
>> vcpuid);
>>          return -EINVAL;
>>      }
>> +
>> +    ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
>> +    ASSERT(v->arch.xsave_area);
>>  
>>      /* Customized checking for entry since our entry is of variable length
>> */
>>      desc = (struct hvm_save_descriptor *)&h->data[h->cur]; diff -r
>> 137ad3347504 xen/arch/x86/i387.c
>> --- a/xen/arch/x86/i387.c Mon Feb 14 17:02:55 2011 +0000
>> +++ b/xen/arch/x86/i387.c Fri Feb 18 16:00:41 2011 +0800
>> @@ -71,6 +71,9 @@ void setup_fpu(struct vcpu *v)
>>  
>>      if ( cpu_has_xsave )
>>      {
>> +        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
>> +        ASSERT(v->arch.xsave_area);
>> +
>>          /*
>>           * XCR0 normally represents what guest OS set. In case of Xen
>> itself,
>>           * we set all supported feature mask before doing save/restore.
>> @@ -118,6 +121,9 @@ void save_init_fpu(struct vcpu *v)
>>  
>>      if ( cpu_has_xsave )
>>      {
>> +        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
>> +        ASSERT(v->arch.xsave_area);
>> +
>>          /* XCR0 normally represents what guest OS set. In case of Xen
>> itself,
>>           * we set all accumulated feature mask before doing save/restore.
>>           */
> 
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: Re: [PATCH] x86: add strictly sanity check for XSAVE/XRSTOR
  2011-02-19  8:35     ` Keir Fraser
@ 2011-02-21  7:06       ` Wei, Gang
  0 siblings, 0 replies; 5+ messages in thread
From: Wei, Gang @ 2011-02-21  7:06 UTC (permalink / raw)
  To: Keir Fraser, Kay, Allen M, xen-devel; +Cc: wei.huang2, Wei, Gang

[-- Attachment #1: Type: text/plain, Size: 8606 bytes --]

Keir Fraser wrote on 2011-02-19:
> On 19/02/2011 01:21, "Kay, Allen M" <allen.m.kay@intel.com> wrote:
>> Maybe the proper thing to do is to have a new function call
>> xsave_enabled(), this function then checks for whether memory has
>> allocated properly in addition to checking cpu_has_xsave.
>> 
>> What do you think or do you have a better suggestion?
> 
> Yeah, a new function xsave_enabled() which encapsulates the check of
> cpu_has_xsave, plus your new assertions, would be acceptable I think.

Good suggestion. Attached is the reworked patch.

Jimmy

x86: add strictly sanity check for XSAVE/XRSTOR

Replace most checks on cpu_has_xsave with checks on new fn xsave_enabled(), do additional sanity checks in the new fn.

Signed-off-by: Wei Gang <gang.wei@intel.com>

diff -r 137ad3347504 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/domain.c	Tue Feb 22 19:57:01 2011 +0800
@@ -628,7 +628,7 @@ unsigned long pv_guest_cr4_fixup(const s
         hv_cr4_mask &= ~X86_CR4_DE;
     if ( cpu_has_fsgsbase && !is_pv_32bit_domain(v->domain) )
         hv_cr4_mask &= ~X86_CR4_FSGSBASE;
-    if ( cpu_has_xsave )
+    if ( xsave_enabled(v) )
         hv_cr4_mask &= ~X86_CR4_OSXSAVE;
 
     if ( (guest_cr4 & hv_cr4_mask) != (hv_cr4 & hv_cr4_mask) )
@@ -1402,7 +1402,7 @@ static void __context_switch(void)
         memcpy(stack_regs,
                &n->arch.guest_context.user_regs,
                CTXT_SWITCH_STACK_BYTES);
-        if ( cpu_has_xsave && n->arch.xcr0 != get_xcr0() )
+        if ( xsave_enabled(n) && n->arch.xcr0 != get_xcr0() )
             set_xcr0(n->arch.xcr0);
         n->arch.ctxt_switch_to(n);
     }
diff -r 137ad3347504 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/domctl.c	Tue Feb 22 19:30:01 2011 +0800
@@ -1603,7 +1603,7 @@ void arch_get_info_guest(struct vcpu *v,
 #endif
 
     /* Fill legacy context from xsave area first */
-    if ( cpu_has_xsave )
+    if ( xsave_enabled(v) )
         memcpy(v->arch.xsave_area, &v->arch.guest_context.fpu_ctxt,
                sizeof(v->arch.guest_context.fpu_ctxt));
 
diff -r 137ad3347504 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/hvm/hvm.c	Tue Feb 22 19:58:20 2011 +0800
@@ -773,7 +773,7 @@ static int hvm_load_cpu_ctxt(struct doma
     memcpy(&vc->fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
 
     /* In case xsave-absent save file is restored on a xsave-capable host */
-    if ( cpu_has_xsave )
+    if ( xsave_enabled(v) )
     {
         struct xsave_struct *xsave_area = v->arch.xsave_area;
 
@@ -831,7 +831,7 @@ static int hvm_save_cpu_xsave_states(str
     struct vcpu *v;
     struct hvm_hw_cpu_xsave *ctxt;
 
-    if ( !cpu_has_xsave )
+    if ( !xsave_enabled(NULL) )
         return 0;   /* do nothing */
 
     for_each_vcpu ( d, v )
@@ -846,8 +846,12 @@ static int hvm_save_cpu_xsave_states(str
         ctxt->xcr0 = v->arch.xcr0;
         ctxt->xcr0_accum = v->arch.xcr0_accum;
         if ( v->fpu_initialised )
+        {
+            ASSERT(v->arch.xsave_area);
+
             memcpy(&ctxt->save_area,
                 v->arch.xsave_area, xsave_cntxt_size);
+        }
     }
 
     return 0;
@@ -861,11 +865,6 @@ static int hvm_load_cpu_xsave_states(str
     struct hvm_save_descriptor *desc;
     uint64_t _xfeature_mask;
 
-    /* fails since we can't restore an img saved on xsave-capable host */
-//XXX: 
-    if ( !cpu_has_xsave )
-        return -EINVAL;
-
     /* Which vcpu is this? */
     vcpuid = hvm_load_instance(h);
     if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
@@ -873,6 +872,11 @@ static int hvm_load_cpu_xsave_states(str
         gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
         return -EINVAL;
     }
+
+    /* fails since we can't restore an img saved on xsave-capable host */
+//XXX: 
+    if ( !xsave_enabled(v) )
+        return -EINVAL;
 
     /* Customized checking for entry since our entry is of variable length */
     desc = (struct hvm_save_descriptor *)&h->data[h->cur];
@@ -2208,7 +2212,7 @@ void hvm_cpuid(unsigned int input, unsig
             __clear_bit(X86_FEATURE_APIC & 31, edx);
 
         /* Fix up OSXSAVE. */
-        if ( cpu_has_xsave )
+        if ( xsave_enabled(v) )
             *ecx |= (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_OSXSAVE) ?
                      bitmaskof(X86_FEATURE_OSXSAVE) : 0;
         break;
diff -r 137ad3347504 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/hvm/vmx/vmcs.c	Tue Feb 22 20:55:28 2011 +0800
@@ -760,7 +760,8 @@ static int construct_vmcs(struct vcpu *v
     /* Host control registers. */
     v->arch.hvm_vmx.host_cr0 = read_cr0() | X86_CR0_TS;
     __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
-    __vmwrite(HOST_CR4, mmu_cr4_features | (cpu_has_xsave ? X86_CR4_OSXSAVE : 0));
+    __vmwrite(HOST_CR4,
+              mmu_cr4_features | (xsave_enabled(v) ? X86_CR4_OSXSAVE : 0));
 
     /* Host CS:RIP. */
     __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
diff -r 137ad3347504 xen/arch/x86/i387.c
--- a/xen/arch/x86/i387.c	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/i387.c	Tue Feb 22 19:29:14 2011 +0800
@@ -69,7 +69,7 @@ void setup_fpu(struct vcpu *v)
     if ( v->fpu_dirtied )
         return;
 
-    if ( cpu_has_xsave )
+    if ( xsave_enabled(v) )
     {
         /*
          * XCR0 normally represents what guest OS set. In case of Xen itself, 
@@ -116,7 +116,7 @@ void save_init_fpu(struct vcpu *v)
     if ( cr0 & X86_CR0_TS )
         clts();
 
-    if ( cpu_has_xsave )
+    if ( xsave_enabled(v) )
     {
         /* XCR0 normally represents what guest OS set. In case of Xen itself,
          * we set all accumulated feature mask before doing save/restore.
diff -r 137ad3347504 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/traps.c	Tue Feb 22 20:09:16 2011 +0800
@@ -771,7 +771,7 @@ static void pv_cpuid(struct cpu_user_reg
         __clear_bit(X86_FEATURE_XTPR % 32, &c);
         __clear_bit(X86_FEATURE_PDCM % 32, &c);
         __clear_bit(X86_FEATURE_DCA % 32, &c);
-        if ( !cpu_has_xsave )
+        if ( !xsave_enabled(current) )
         {
             __clear_bit(X86_FEATURE_XSAVE % 32, &c);
             __clear_bit(X86_FEATURE_AVX % 32, &c);
diff -r 137ad3347504 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/include/asm-x86/domain.h	Tue Feb 22 20:10:06 2011 +0800
@@ -464,7 +464,7 @@ unsigned long pv_guest_cr4_fixup(const s
     (((v)->arch.guest_context.ctrlreg[4]                    \
       | (mmu_cr4_features & (X86_CR4_PGE | X86_CR4_PSE))    \
       | ((v)->domain->arch.vtsc ? X86_CR4_TSD : 0)         \
-      | ((cpu_has_xsave)? X86_CR4_OSXSAVE : 0))              \
+      | ((xsave_enabled(v))? X86_CR4_OSXSAVE : 0))              \
       & ~X86_CR4_DE)
 #define real_cr4_to_pv_guest_cr4(c) \
     ((c) & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_TSD | X86_CR4_OSXSAVE))
diff -r 137ad3347504 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/include/asm-x86/hvm/hvm.h	Tue Feb 22 20:13:23 2011 +0800
@@ -291,7 +291,7 @@ static inline int hvm_do_pmu_interrupt(s
         X86_CR4_DE  | X86_CR4_PSE | X86_CR4_PAE |       \
         X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE |       \
         X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT |           \
-        (cpu_has_xsave ? X86_CR4_OSXSAVE : 0))))
+        (xsave_enabled(NULL) ? X86_CR4_OSXSAVE : 0))))
 
 /* These exceptions must always be intercepted. */
 #define HVM_TRAP_MASK ((1U << TRAP_machine_check) | (1U << TRAP_invalid_op))
diff -r 137ad3347504 xen/include/asm-x86/i387.h
--- a/xen/include/asm-x86/i387.h	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/include/asm-x86/i387.h	Tue Feb 22 20:22:07 2011 +0800
@@ -31,6 +31,19 @@ void xsave_free_save_area(struct vcpu *v
 #define XSTATE_YMM_OFFSET  XSAVE_AREA_MIN_SIZE
 #define XSTATE_YMM_SIZE    256
 #define XSAVEOPT        (1 << 0)
+
+static inline int xsave_enabled(const struct vcpu *v)
+{
+    if ( cpu_has_xsave )
+    {
+        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
+
+        if ( v )
+            ASSERT(v->arch.xsave_area);
+    }
+
+    return cpu_has_xsave;	
+}
 
 struct xsave_struct
 {

[-- Attachment #2: xsave_sanity_check_2.patch --]
[-- Type: application/octet-stream, Size: 7832 bytes --]

x86: add strictly sanity check for XSAVE/XRSTOR

Replace most checks on cpu_has_xsave with checks on new fn xsave_enabled(), do additional sanity checks in the new fn.

Signed-off-by: Wei Gang <gang.wei@intel.com>

diff -r 137ad3347504 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/domain.c	Tue Feb 22 19:57:01 2011 +0800
@@ -628,7 +628,7 @@ unsigned long pv_guest_cr4_fixup(const s
         hv_cr4_mask &= ~X86_CR4_DE;
     if ( cpu_has_fsgsbase && !is_pv_32bit_domain(v->domain) )
         hv_cr4_mask &= ~X86_CR4_FSGSBASE;
-    if ( cpu_has_xsave )
+    if ( xsave_enabled(v) )
         hv_cr4_mask &= ~X86_CR4_OSXSAVE;
 
     if ( (guest_cr4 & hv_cr4_mask) != (hv_cr4 & hv_cr4_mask) )
@@ -1402,7 +1402,7 @@ static void __context_switch(void)
         memcpy(stack_regs,
                &n->arch.guest_context.user_regs,
                CTXT_SWITCH_STACK_BYTES);
-        if ( cpu_has_xsave && n->arch.xcr0 != get_xcr0() )
+        if ( xsave_enabled(n) && n->arch.xcr0 != get_xcr0() )
             set_xcr0(n->arch.xcr0);
         n->arch.ctxt_switch_to(n);
     }
diff -r 137ad3347504 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/domctl.c	Tue Feb 22 19:30:01 2011 +0800
@@ -1603,7 +1603,7 @@ void arch_get_info_guest(struct vcpu *v,
 #endif
 
     /* Fill legacy context from xsave area first */
-    if ( cpu_has_xsave )
+    if ( xsave_enabled(v) )
         memcpy(v->arch.xsave_area, &v->arch.guest_context.fpu_ctxt,
                sizeof(v->arch.guest_context.fpu_ctxt));
 
diff -r 137ad3347504 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/hvm/hvm.c	Tue Feb 22 19:58:20 2011 +0800
@@ -773,7 +773,7 @@ static int hvm_load_cpu_ctxt(struct doma
     memcpy(&vc->fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
 
     /* In case xsave-absent save file is restored on a xsave-capable host */
-    if ( cpu_has_xsave )
+    if ( xsave_enabled(v) )
     {
         struct xsave_struct *xsave_area = v->arch.xsave_area;
 
@@ -831,7 +831,7 @@ static int hvm_save_cpu_xsave_states(str
     struct vcpu *v;
     struct hvm_hw_cpu_xsave *ctxt;
 
-    if ( !cpu_has_xsave )
+    if ( !xsave_enabled(NULL) )
         return 0;   /* do nothing */
 
     for_each_vcpu ( d, v )
@@ -846,8 +846,12 @@ static int hvm_save_cpu_xsave_states(str
         ctxt->xcr0 = v->arch.xcr0;
         ctxt->xcr0_accum = v->arch.xcr0_accum;
         if ( v->fpu_initialised )
+        {
+            ASSERT(v->arch.xsave_area);
+
             memcpy(&ctxt->save_area,
                 v->arch.xsave_area, xsave_cntxt_size);
+        }
     }
 
     return 0;
@@ -861,11 +865,6 @@ static int hvm_load_cpu_xsave_states(str
     struct hvm_save_descriptor *desc;
     uint64_t _xfeature_mask;
 
-    /* fails since we can't restore an img saved on xsave-capable host */
-//XXX: 
-    if ( !cpu_has_xsave )
-        return -EINVAL;
-
     /* Which vcpu is this? */
     vcpuid = hvm_load_instance(h);
     if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
@@ -873,6 +872,11 @@ static int hvm_load_cpu_xsave_states(str
         gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
         return -EINVAL;
     }
+
+    /* fails since we can't restore an img saved on xsave-capable host */
+//XXX: 
+    if ( !xsave_enabled(v) )
+        return -EINVAL;
 
     /* Customized checking for entry since our entry is of variable length */
     desc = (struct hvm_save_descriptor *)&h->data[h->cur];
@@ -2208,7 +2212,7 @@ void hvm_cpuid(unsigned int input, unsig
             __clear_bit(X86_FEATURE_APIC & 31, edx);
 
         /* Fix up OSXSAVE. */
-        if ( cpu_has_xsave )
+        if ( xsave_enabled(v) )
             *ecx |= (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_OSXSAVE) ?
                      bitmaskof(X86_FEATURE_OSXSAVE) : 0;
         break;
diff -r 137ad3347504 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/hvm/vmx/vmcs.c	Tue Feb 22 20:55:28 2011 +0800
@@ -760,7 +760,8 @@ static int construct_vmcs(struct vcpu *v
     /* Host control registers. */
     v->arch.hvm_vmx.host_cr0 = read_cr0() | X86_CR0_TS;
     __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
-    __vmwrite(HOST_CR4, mmu_cr4_features | (cpu_has_xsave ? X86_CR4_OSXSAVE : 0));
+    __vmwrite(HOST_CR4,
+              mmu_cr4_features | (xsave_enabled(v) ? X86_CR4_OSXSAVE : 0));
 
     /* Host CS:RIP. */
     __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
diff -r 137ad3347504 xen/arch/x86/i387.c
--- a/xen/arch/x86/i387.c	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/i387.c	Tue Feb 22 19:29:14 2011 +0800
@@ -69,7 +69,7 @@ void setup_fpu(struct vcpu *v)
     if ( v->fpu_dirtied )
         return;
 
-    if ( cpu_has_xsave )
+    if ( xsave_enabled(v) )
     {
         /*
          * XCR0 normally represents what guest OS set. In case of Xen itself, 
@@ -116,7 +116,7 @@ void save_init_fpu(struct vcpu *v)
     if ( cr0 & X86_CR0_TS )
         clts();
 
-    if ( cpu_has_xsave )
+    if ( xsave_enabled(v) )
     {
         /* XCR0 normally represents what guest OS set. In case of Xen itself,
          * we set all accumulated feature mask before doing save/restore.
diff -r 137ad3347504 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/traps.c	Tue Feb 22 20:09:16 2011 +0800
@@ -771,7 +771,7 @@ static void pv_cpuid(struct cpu_user_reg
         __clear_bit(X86_FEATURE_XTPR % 32, &c);
         __clear_bit(X86_FEATURE_PDCM % 32, &c);
         __clear_bit(X86_FEATURE_DCA % 32, &c);
-        if ( !cpu_has_xsave )
+        if ( !xsave_enabled(current) )
         {
             __clear_bit(X86_FEATURE_XSAVE % 32, &c);
             __clear_bit(X86_FEATURE_AVX % 32, &c);
diff -r 137ad3347504 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/include/asm-x86/domain.h	Tue Feb 22 20:10:06 2011 +0800
@@ -464,7 +464,7 @@ unsigned long pv_guest_cr4_fixup(const s
     (((v)->arch.guest_context.ctrlreg[4]                    \
       | (mmu_cr4_features & (X86_CR4_PGE | X86_CR4_PSE))    \
       | ((v)->domain->arch.vtsc ? X86_CR4_TSD : 0)         \
-      | ((cpu_has_xsave)? X86_CR4_OSXSAVE : 0))              \
+      | ((xsave_enabled(v))? X86_CR4_OSXSAVE : 0))              \
       & ~X86_CR4_DE)
 #define real_cr4_to_pv_guest_cr4(c) \
     ((c) & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_TSD | X86_CR4_OSXSAVE))
diff -r 137ad3347504 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/include/asm-x86/hvm/hvm.h	Tue Feb 22 20:13:23 2011 +0800
@@ -291,7 +291,7 @@ static inline int hvm_do_pmu_interrupt(s
         X86_CR4_DE  | X86_CR4_PSE | X86_CR4_PAE |       \
         X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE |       \
         X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT |           \
-        (cpu_has_xsave ? X86_CR4_OSXSAVE : 0))))
+        (xsave_enabled(NULL) ? X86_CR4_OSXSAVE : 0))))
 
 /* These exceptions must always be intercepted. */
 #define HVM_TRAP_MASK ((1U << TRAP_machine_check) | (1U << TRAP_invalid_op))
diff -r 137ad3347504 xen/include/asm-x86/i387.h
--- a/xen/include/asm-x86/i387.h	Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/include/asm-x86/i387.h	Tue Feb 22 20:22:07 2011 +0800
@@ -31,6 +31,19 @@ void xsave_free_save_area(struct vcpu *v
 #define XSTATE_YMM_OFFSET  XSAVE_AREA_MIN_SIZE
 #define XSTATE_YMM_SIZE    256
 #define XSAVEOPT        (1 << 0)
+
+static inline int xsave_enabled(const struct vcpu *v)
+{
+    if ( cpu_has_xsave )
+    {
+        ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
+
+        if ( v )
+            ASSERT(v->arch.xsave_area);
+    }
+
+    return cpu_has_xsave;	
+}
 
 struct xsave_struct
 {

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2011-02-21  7:06 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-02-18  2:45 [PATCH] x86: add strictly sanity check for XSAVE/XRSTOR Wei, Gang
2011-02-18  7:13 ` Keir Fraser
2011-02-19  1:21   ` Kay, Allen M
2011-02-19  8:35     ` Keir Fraser
2011-02-21  7:06       ` Wei, Gang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.