All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v4 for 4.5] x86/VPMU: Clear last_vcpu when destroying VPMU
@ 2014-12-18 18:06 Boris Ostrovsky
  2014-12-19 13:41 ` Jan Beulich
  2015-01-05 16:33 ` Konrad Rzeszutek Wilk
  0 siblings, 2 replies; 4+ messages in thread
From: Boris Ostrovsky @ 2014-12-18 18:06 UTC (permalink / raw)
  To: jbeulich, keir, konrad.wilk; +Cc: boris.ostrovsky, xen-devel

We need to make sure that last_vcpu is not pointing to VCPU whose
VPMU is being destroyed. Otherwise we may try to dereference it in
the future, when VCPU is gone.

We have to do this via IPI since otherwise there is a (somewheat
theoretical) chance that between test and subsequent clearing
of last_vcpu the remote processor (i.e. vpmu->last_pcpu) might do
both vpmu_load() and then vpmu_save() for another VCPU. The former
will clear last_vcpu and the latter will set it to something else.

Performing this operation via IPI will guarantee that nothing can
happen on the remote processor between testing and clearing of
last_vcpu.

We should also check for VPMU_CONTEXT_ALLOCATED in vpmu_destroy() to
avoid unnecessary percpu tests and arch-specific destroy ops. Thus
checks in AMD and Intel routines are no longer needed.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
---
 xen/arch/x86/hvm/svm/vpmu.c       |    3 ---
 xen/arch/x86/hvm/vmx/vpmu_core2.c |    2 --
 xen/arch/x86/hvm/vpmu.c           |   20 ++++++++++++++++++++
 3 files changed, 20 insertions(+), 5 deletions(-)

Changes in v4:
 * Back to v2's IPI implementation

Changes in v3:
 * Use cmpxchg instead of IPI
 * Use correct routine names in commit message
 * Remove duplicate test for VPMU_CONTEXT_ALLOCATED in arch-specific
   destroy ops

Changes in v2:
 * Test last_vcpu locally before IPI
 * Don't handle local pcpu as a special case --- on_selected_cpus
   will take care of that
 * Dont't cast variables unnecessarily


diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
index 8e07a98..4c448bb 100644
--- a/xen/arch/x86/hvm/svm/vpmu.c
+++ b/xen/arch/x86/hvm/svm/vpmu.c
@@ -403,9 +403,6 @@ static void amd_vpmu_destroy(struct vcpu *v)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
-    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
-        return;
-
     if ( ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
         amd_vpmu_unset_msr_bitmap(v);
 
diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c b/xen/arch/x86/hvm/vmx/vpmu_core2.c
index 68b6272..590c2a9 100644
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
@@ -818,8 +818,6 @@ static void core2_vpmu_destroy(struct vcpu *v)
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
     struct core2_vpmu_context *core2_vpmu_cxt = vpmu->context;
 
-    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
-        return;
     xfree(core2_vpmu_cxt->pmu_enable);
     xfree(vpmu->context);
     if ( cpu_has_vmx_msr_bitmap )
diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c
index 1df74c2..37f0d9f 100644
--- a/xen/arch/x86/hvm/vpmu.c
+++ b/xen/arch/x86/hvm/vpmu.c
@@ -247,10 +247,30 @@ void vpmu_initialise(struct vcpu *v)
     }
 }
 
+static void vpmu_clear_last(void *arg)
+{
+    if ( this_cpu(last_vcpu) == arg )
+        this_cpu(last_vcpu) = NULL;
+}
+
 void vpmu_destroy(struct vcpu *v)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
+    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
+        return;
+
+    /*
+     * Need to clear last_vcpu in case it points to v.
+     * We can check here non-atomically whether it is 'v' since
+     * last_vcpu can never become 'v' again at this point.
+     * We will test it again in vpmu_clear_last() with interrupts
+     * disabled to make sure we don't clear someone else.
+     */
+    if ( per_cpu(last_vcpu, vpmu->last_pcpu) == v )
+        on_selected_cpus(cpumask_of(vpmu->last_pcpu),
+                         vpmu_clear_last, v, 1);
+
     if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_destroy )
         vpmu->arch_vpmu_ops->arch_vpmu_destroy(v);
 }
-- 
1.7.1

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v4 for 4.5] x86/VPMU: Clear last_vcpu when destroying VPMU
  2014-12-18 18:06 [PATCH v4 for 4.5] x86/VPMU: Clear last_vcpu when destroying VPMU Boris Ostrovsky
@ 2014-12-19 13:41 ` Jan Beulich
  2015-01-05 16:33 ` Konrad Rzeszutek Wilk
  1 sibling, 0 replies; 4+ messages in thread
From: Jan Beulich @ 2014-12-19 13:41 UTC (permalink / raw)
  To: Boris Ostrovsky; +Cc: keir, xen-devel

>>> On 18.12.14 at 19:06, <boris.ostrovsky@oracle.com> wrote:
> We need to make sure that last_vcpu is not pointing to VCPU whose
> VPMU is being destroyed. Otherwise we may try to dereference it in
> the future, when VCPU is gone.
> 
> We have to do this via IPI since otherwise there is a (somewheat
> theoretical) chance that between test and subsequent clearing
> of last_vcpu the remote processor (i.e. vpmu->last_pcpu) might do
> both vpmu_load() and then vpmu_save() for another VCPU. The former
> will clear last_vcpu and the latter will set it to something else.
> 
> Performing this operation via IPI will guarantee that nothing can
> happen on the remote processor between testing and clearing of
> last_vcpu.
> 
> We should also check for VPMU_CONTEXT_ALLOCATED in vpmu_destroy() to
> avoid unnecessary percpu tests and arch-specific destroy ops. Thus
> checks in AMD and Intel routines are no longer needed.
> 
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>

Acked-by: Jan Beulich <jbeulich@suse.com>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v4 for 4.5] x86/VPMU: Clear last_vcpu when destroying VPMU
  2014-12-18 18:06 [PATCH v4 for 4.5] x86/VPMU: Clear last_vcpu when destroying VPMU Boris Ostrovsky
  2014-12-19 13:41 ` Jan Beulich
@ 2015-01-05 16:33 ` Konrad Rzeszutek Wilk
  2015-01-07  8:46   ` Tian, Kevin
  1 sibling, 1 reply; 4+ messages in thread
From: Konrad Rzeszutek Wilk @ 2015-01-05 16:33 UTC (permalink / raw)
  To: Boris Ostrovsky, jun.nakajima, eddie.dong, kevin.tian
  Cc: keir, jbeulich, xen-devel

On Thu, Dec 18, 2014 at 01:06:40PM -0500, Boris Ostrovsky wrote:
> We need to make sure that last_vcpu is not pointing to VCPU whose
> VPMU is being destroyed. Otherwise we may try to dereference it in
> the future, when VCPU is gone.
> 
> We have to do this via IPI since otherwise there is a (somewheat
> theoretical) chance that between test and subsequent clearing
> of last_vcpu the remote processor (i.e. vpmu->last_pcpu) might do
> both vpmu_load() and then vpmu_save() for another VCPU. The former
> will clear last_vcpu and the latter will set it to something else.
> 
> Performing this operation via IPI will guarantee that nothing can
> happen on the remote processor between testing and clearing of
> last_vcpu.
> 
> We should also check for VPMU_CONTEXT_ALLOCATED in vpmu_destroy() to
> avoid unnecessary percpu tests and arch-specific destroy ops. Thus
> checks in AMD and Intel routines are no longer needed.
> 
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
> ---
>  xen/arch/x86/hvm/svm/vpmu.c       |    3 ---
>  xen/arch/x86/hvm/vmx/vpmu_core2.c |    2 --
>  xen/arch/x86/hvm/vpmu.c           |   20 ++++++++++++++++++++
>  3 files changed, 20 insertions(+), 5 deletions(-)

CC-ing the rest of the maintainers (Intel ones, since Boris is
on the AMD side).

I am OK with this patch going in Xen 4.5 as for one thing to actually
use vpmu you have to pass 'vpmu=1' on the Xen command line.

Aka, Release-Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

> 
> Changes in v4:
>  * Back to v2's IPI implementation
> 
> Changes in v3:
>  * Use cmpxchg instead of IPI
>  * Use correct routine names in commit message
>  * Remove duplicate test for VPMU_CONTEXT_ALLOCATED in arch-specific
>    destroy ops
> 
> Changes in v2:
>  * Test last_vcpu locally before IPI
>  * Don't handle local pcpu as a special case --- on_selected_cpus
>    will take care of that
>  * Dont't cast variables unnecessarily
> 
> 
> diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
> index 8e07a98..4c448bb 100644
> --- a/xen/arch/x86/hvm/svm/vpmu.c
> +++ b/xen/arch/x86/hvm/svm/vpmu.c
> @@ -403,9 +403,6 @@ static void amd_vpmu_destroy(struct vcpu *v)
>  {
>      struct vpmu_struct *vpmu = vcpu_vpmu(v);
>  
> -    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
> -        return;
> -
>      if ( ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
>          amd_vpmu_unset_msr_bitmap(v);
>  
> diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> index 68b6272..590c2a9 100644
> --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
> +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> @@ -818,8 +818,6 @@ static void core2_vpmu_destroy(struct vcpu *v)
>      struct vpmu_struct *vpmu = vcpu_vpmu(v);
>      struct core2_vpmu_context *core2_vpmu_cxt = vpmu->context;
>  
> -    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
> -        return;
>      xfree(core2_vpmu_cxt->pmu_enable);
>      xfree(vpmu->context);
>      if ( cpu_has_vmx_msr_bitmap )
> diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c
> index 1df74c2..37f0d9f 100644
> --- a/xen/arch/x86/hvm/vpmu.c
> +++ b/xen/arch/x86/hvm/vpmu.c
> @@ -247,10 +247,30 @@ void vpmu_initialise(struct vcpu *v)
>      }
>  }
>  
> +static void vpmu_clear_last(void *arg)
> +{
> +    if ( this_cpu(last_vcpu) == arg )
> +        this_cpu(last_vcpu) = NULL;
> +}
> +
>  void vpmu_destroy(struct vcpu *v)
>  {
>      struct vpmu_struct *vpmu = vcpu_vpmu(v);
>  
> +    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
> +        return;
> +
> +    /*
> +     * Need to clear last_vcpu in case it points to v.
> +     * We can check here non-atomically whether it is 'v' since
> +     * last_vcpu can never become 'v' again at this point.
> +     * We will test it again in vpmu_clear_last() with interrupts
> +     * disabled to make sure we don't clear someone else.
> +     */
> +    if ( per_cpu(last_vcpu, vpmu->last_pcpu) == v )
> +        on_selected_cpus(cpumask_of(vpmu->last_pcpu),
> +                         vpmu_clear_last, v, 1);
> +
>      if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_destroy )
>          vpmu->arch_vpmu_ops->arch_vpmu_destroy(v);
>  }
> -- 
> 1.7.1
> 

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v4 for 4.5] x86/VPMU: Clear last_vcpu when destroying VPMU
  2015-01-05 16:33 ` Konrad Rzeszutek Wilk
@ 2015-01-07  8:46   ` Tian, Kevin
  0 siblings, 0 replies; 4+ messages in thread
From: Tian, Kevin @ 2015-01-07  8:46 UTC (permalink / raw)
  To: Konrad Rzeszutek Wilk, Boris Ostrovsky, Nakajima, Jun, Dong, Eddie
  Cc: keir, jbeulich, xen-devel

> From: Konrad Rzeszutek Wilk [mailto:konrad.wilk@oracle.com]
> Sent: Tuesday, January 06, 2015 12:33 AM
> 
> On Thu, Dec 18, 2014 at 01:06:40PM -0500, Boris Ostrovsky wrote:
> > We need to make sure that last_vcpu is not pointing to VCPU whose
> > VPMU is being destroyed. Otherwise we may try to dereference it in
> > the future, when VCPU is gone.
> >
> > We have to do this via IPI since otherwise there is a (somewheat
> > theoretical) chance that between test and subsequent clearing
> > of last_vcpu the remote processor (i.e. vpmu->last_pcpu) might do
> > both vpmu_load() and then vpmu_save() for another VCPU. The former
> > will clear last_vcpu and the latter will set it to something else.
> >
> > Performing this operation via IPI will guarantee that nothing can
> > happen on the remote processor between testing and clearing of
> > last_vcpu.
> >
> > We should also check for VPMU_CONTEXT_ALLOCATED in vpmu_destroy() to
> > avoid unnecessary percpu tests and arch-specific destroy ops. Thus
> > checks in AMD and Intel routines are no longer needed.
> >
> > Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
> > ---
> >  xen/arch/x86/hvm/svm/vpmu.c       |    3 ---
> >  xen/arch/x86/hvm/vmx/vpmu_core2.c |    2 --
> >  xen/arch/x86/hvm/vpmu.c           |   20 ++++++++++++++++++++
> >  3 files changed, 20 insertions(+), 5 deletions(-)
> 
> CC-ing the rest of the maintainers (Intel ones, since Boris is
> on the AMD side).
> 
> I am OK with this patch going in Xen 4.5 as for one thing to actually
> use vpmu you have to pass 'vpmu=1' on the Xen command line.
> 
> Aka, Release-Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

Acked-by: Kevin Tian <kevin.tian@intel.com>

> 
> >
> > Changes in v4:
> >  * Back to v2's IPI implementation
> >
> > Changes in v3:
> >  * Use cmpxchg instead of IPI
> >  * Use correct routine names in commit message
> >  * Remove duplicate test for VPMU_CONTEXT_ALLOCATED in arch-specific
> >    destroy ops
> >
> > Changes in v2:
> >  * Test last_vcpu locally before IPI
> >  * Don't handle local pcpu as a special case --- on_selected_cpus
> >    will take care of that
> >  * Dont't cast variables unnecessarily
> >
> >
> > diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
> > index 8e07a98..4c448bb 100644
> > --- a/xen/arch/x86/hvm/svm/vpmu.c
> > +++ b/xen/arch/x86/hvm/svm/vpmu.c
> > @@ -403,9 +403,6 @@ static void amd_vpmu_destroy(struct vcpu *v)
> >  {
> >      struct vpmu_struct *vpmu = vcpu_vpmu(v);
> >
> > -    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
> > -        return;
> > -
> >      if ( ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
> >          amd_vpmu_unset_msr_bitmap(v);
> >
> > diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c
> b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> > index 68b6272..590c2a9 100644
> > --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
> > +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> > @@ -818,8 +818,6 @@ static void core2_vpmu_destroy(struct vcpu *v)
> >      struct vpmu_struct *vpmu = vcpu_vpmu(v);
> >      struct core2_vpmu_context *core2_vpmu_cxt = vpmu->context;
> >
> > -    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
> > -        return;
> >      xfree(core2_vpmu_cxt->pmu_enable);
> >      xfree(vpmu->context);
> >      if ( cpu_has_vmx_msr_bitmap )
> > diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c
> > index 1df74c2..37f0d9f 100644
> > --- a/xen/arch/x86/hvm/vpmu.c
> > +++ b/xen/arch/x86/hvm/vpmu.c
> > @@ -247,10 +247,30 @@ void vpmu_initialise(struct vcpu *v)
> >      }
> >  }
> >
> > +static void vpmu_clear_last(void *arg)
> > +{
> > +    if ( this_cpu(last_vcpu) == arg )
> > +        this_cpu(last_vcpu) = NULL;
> > +}
> > +
> >  void vpmu_destroy(struct vcpu *v)
> >  {
> >      struct vpmu_struct *vpmu = vcpu_vpmu(v);
> >
> > +    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
> > +        return;
> > +
> > +    /*
> > +     * Need to clear last_vcpu in case it points to v.
> > +     * We can check here non-atomically whether it is 'v' since
> > +     * last_vcpu can never become 'v' again at this point.
> > +     * We will test it again in vpmu_clear_last() with interrupts
> > +     * disabled to make sure we don't clear someone else.
> > +     */
> > +    if ( per_cpu(last_vcpu, vpmu->last_pcpu) == v )
> > +        on_selected_cpus(cpumask_of(vpmu->last_pcpu),
> > +                         vpmu_clear_last, v, 1);
> > +
> >      if ( vpmu->arch_vpmu_ops &&
> vpmu->arch_vpmu_ops->arch_vpmu_destroy )
> >          vpmu->arch_vpmu_ops->arch_vpmu_destroy(v);
> >  }
> > --
> > 1.7.1
> >

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2015-01-07  8:46 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-12-18 18:06 [PATCH v4 for 4.5] x86/VPMU: Clear last_vcpu when destroying VPMU Boris Ostrovsky
2014-12-19 13:41 ` Jan Beulich
2015-01-05 16:33 ` Konrad Rzeszutek Wilk
2015-01-07  8:46   ` Tian, Kevin

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.