xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Feng Wu <feng.wu@intel.com>
To: xen-devel@lists.xen.org
Cc: kevin.tian@intel.com, keir@xen.org, george.dunlap@eu.citrix.com,
	andrew.cooper3@citrix.com, dario.faggioli@citrix.com,
	jbeulich@suse.com, Feng Wu <feng.wu@intel.com>
Subject: [PATCH v2 1/4] VMX: Properly handle pi when all the assigned devices are removed
Date: Thu, 26 May 2016 21:39:11 +0800	[thread overview]
Message-ID: <1464269954-8056-2-git-send-email-feng.wu@intel.com> (raw)
In-Reply-To: <1464269954-8056-1-git-send-email-feng.wu@intel.com>

This patch handles some concern case when the last assigned device
is removed from the domain. In this case we should carefully handle
pi descriptor and the per-cpu blocking list, to make sure:
- all the PI descriptor are in the right state when next time a
devices is assigned to the domain again. This is achrived by always
making all the pi hooks available, so the pi descriptor is updated
during scheduling, which make it always up-to-data.
- No remaining vcpus of the domain in the per-cpu blocking list.

Signed-off-by: Feng Wu <feng.wu@intel.com>
---
 xen/arch/x86/hvm/vmx/vmx.c         | 75 +++++++++++++++++++++++++++++++-------
 xen/include/asm-x86/hvm/vmx/vmcs.h |  3 ++
 2 files changed, 65 insertions(+), 13 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index bc4410f..65f5288 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -113,7 +113,19 @@ static void vmx_vcpu_block(struct vcpu *v)
 		&per_cpu(vmx_pi_blocking, v->processor).lock;
     struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc;
 
-    spin_lock_irqsave(pi_blocking_list_lock, flags);
+    spin_lock_irqsave(&v->arch.hvm_vmx.pi_hotplug_lock, flags);
+    if ( unlikely(v->arch.hvm_vmx.pi_blocking_cleaned_up) )
+    {
+        /*
+         * The vcpu is to be destroyed and it has already been removed
+         * from the per-CPU list if it is blocking, we shouldn't add
+         * new vCPU to the list.
+         */
+        spin_unlock_irqrestore(&v->arch.hvm_vmx.pi_hotplug_lock, flags);
+        return;
+    }
+
+    spin_lock(pi_blocking_list_lock);
     old_lock = cmpxchg(&v->arch.hvm_vmx.pi_blocking.lock, NULL,
                        pi_blocking_list_lock);
 
@@ -126,7 +138,9 @@ static void vmx_vcpu_block(struct vcpu *v)
 
     list_add_tail(&v->arch.hvm_vmx.pi_blocking.list,
                   &per_cpu(vmx_pi_blocking, v->processor).list);
-    spin_unlock_irqrestore(pi_blocking_list_lock, flags);
+    spin_unlock(pi_blocking_list_lock);
+
+    spin_unlock_irqrestore(&v->arch.hvm_vmx.pi_hotplug_lock, flags);
 
     ASSERT(!pi_test_sn(pi_desc));
 
@@ -199,32 +213,65 @@ static void vmx_pi_do_resume(struct vcpu *v)
     spin_unlock_irqrestore(pi_blocking_list_lock, flags);
 }
 
+static void vmx_pi_blocking_cleanup(struct vcpu *v)
+{
+    unsigned long flags;
+    spinlock_t *pi_blocking_list_lock;
+
+    if ( !iommu_intpost )
+        return;
+
+    spin_lock_irqsave(&v->arch.hvm_vmx.pi_hotplug_lock, flags);
+    v->arch.hvm_vmx.pi_blocking_cleaned_up = 1;
+
+    pi_blocking_list_lock = v->arch.hvm_vmx.pi_blocking.lock;
+    if (pi_blocking_list_lock == NULL)
+    {
+        spin_unlock_irqrestore(&v->arch.hvm_vmx.pi_hotplug_lock, flags);
+        return;
+    }
+
+    spin_lock(pi_blocking_list_lock);
+    if ( v->arch.hvm_vmx.pi_blocking.lock != NULL )
+    {
+        ASSERT(v->arch.hvm_vmx.pi_blocking.lock == pi_blocking_list_lock);
+        list_del(&v->arch.hvm_vmx.pi_blocking.list);
+        v->arch.hvm_vmx.pi_blocking.lock = NULL;
+    }
+    spin_unlock(pi_blocking_list_lock);
+    spin_unlock_irqrestore(&v->arch.hvm_vmx.pi_hotplug_lock, flags);
+}
+
 /* This function is called when pcidevs_lock is held */
 void vmx_pi_hooks_assign(struct domain *d)
 {
+    struct vcpu *v;
+
     if ( !iommu_intpost || !has_hvm_container_domain(d) )
         return;
 
-    ASSERT(!d->arch.hvm_domain.vmx.vcpu_block);
+    for_each_vcpu ( d, v )
+        v->arch.hvm_vmx.pi_blocking_cleaned_up = 0;
 
-    d->arch.hvm_domain.vmx.vcpu_block = vmx_vcpu_block;
-    d->arch.hvm_domain.vmx.pi_switch_from = vmx_pi_switch_from;
-    d->arch.hvm_domain.vmx.pi_switch_to = vmx_pi_switch_to;
-    d->arch.hvm_domain.vmx.pi_do_resume = vmx_pi_do_resume;
+    if ( !d->arch.hvm_domain.vmx.vcpu_block )
+    {
+        d->arch.hvm_domain.vmx.vcpu_block = vmx_vcpu_block;
+        d->arch.hvm_domain.vmx.pi_switch_from = vmx_pi_switch_from;
+        d->arch.hvm_domain.vmx.pi_switch_to = vmx_pi_switch_to;
+        d->arch.hvm_domain.vmx.pi_do_resume = vmx_pi_do_resume;
+    }
 }
 
 /* This function is called when pcidevs_lock is held */
 void vmx_pi_hooks_deassign(struct domain *d)
 {
+    struct vcpu *v;
+
     if ( !iommu_intpost || !has_hvm_container_domain(d) )
         return;
 
-    ASSERT(d->arch.hvm_domain.vmx.vcpu_block);
-
-    d->arch.hvm_domain.vmx.vcpu_block = NULL;
-    d->arch.hvm_domain.vmx.pi_switch_from = NULL;
-    d->arch.hvm_domain.vmx.pi_switch_to = NULL;
-    d->arch.hvm_domain.vmx.pi_do_resume = NULL;
+    for_each_vcpu ( d, v )
+        vmx_pi_blocking_cleanup(v);
 }
 
 static int vmx_domain_initialise(struct domain *d)
@@ -256,6 +303,8 @@ static int vmx_vcpu_initialise(struct vcpu *v)
 
     INIT_LIST_HEAD(&v->arch.hvm_vmx.pi_blocking.list);
 
+    spin_lock_init(&v->arch.hvm_vmx.pi_hotplug_lock);
+
     v->arch.schedule_tail    = vmx_do_resume;
     v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
     v->arch.ctxt_switch_to   = vmx_ctxt_switch_to;
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index b54f52f..3834f49 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -231,6 +231,9 @@ struct arch_vmx_struct {
      * pCPU and wakeup the related vCPU.
      */
     struct pi_blocking_vcpu pi_blocking;
+
+    spinlock_t            pi_hotplug_lock;
+    bool_t                pi_blocking_cleaned_up;
 };
 
 int vmx_create_vmcs(struct vcpu *v);
-- 
2.1.0


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

  reply	other threads:[~2016-05-26 13:39 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-05-26 13:39 [PATCH v2 0/4] VMX: Properly handle pi descriptor and per-cpu blocking list Feng Wu
2016-05-26 13:39 ` Feng Wu [this message]
2016-05-27 13:43   ` [PATCH v2 1/4] VMX: Properly handle pi when all the assigned devices are removed Jan Beulich
2016-05-31 10:22     ` Wu, Feng
2016-05-31 11:52       ` Jan Beulich
2016-06-03  5:12         ` Wu, Feng
2016-06-03  9:45           ` Jan Beulich
2016-05-26 13:39 ` [PATCH v2 2/4] VMX: Cleanup PI per-cpu blocking list when vcpu is destroyed Feng Wu
2016-05-27 13:49   ` Jan Beulich
2016-05-31 10:22     ` Wu, Feng
2016-05-31 11:54       ` Jan Beulich
2016-05-26 13:39 ` [PATCH v2 3/4] VMX: Assign the right value to 'NDST' field in a concern case Feng Wu
2016-05-27 14:00   ` Jan Beulich
2016-05-31 10:27     ` Wu, Feng
2016-05-31 11:58       ` Jan Beulich
2016-06-03  5:23         ` Wu, Feng
2016-06-03  9:57           ` Jan Beulich
2016-06-22 18:00   ` George Dunlap
2016-06-24  9:08     ` Wu, Feng
2016-05-26 13:39 ` [PATCH v2 4/4] VMX: fixup PI descritpor when cpu is offline Feng Wu
2016-05-27 14:56   ` Jan Beulich
2016-05-31 10:31     ` Wu, Feng
2016-06-22 18:33       ` George Dunlap
2016-06-24  6:34         ` Wu, Feng
2016-05-26 17:20 ` [PATCH v2 0/4] VMX: Properly handle pi descriptor and per-cpu blocking list Dario Faggioli
2016-05-31 10:19   ` Wu, Feng
2016-06-22 21:33     ` Dario Faggioli
2016-06-24  6:33       ` Wu, Feng
2016-06-24 10:29         ` Dario Faggioli
2016-06-24 13:42           ` Wu, Feng
2016-06-24 13:49             ` George Dunlap
2016-06-24 14:36               ` Dario Faggioli

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1464269954-8056-2-git-send-email-feng.wu@intel.com \
    --to=feng.wu@intel.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dario.faggioli@citrix.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=keir@xen.org \
    --cc=kevin.tian@intel.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).