xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: "Jan Beulich" <JBeulich@suse.com>
To: xen-devel <xen-devel@lists.xenproject.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Subject: [PATCH 2/4] x86/vMSI-X: drop list lock
Date: Wed, 08 Jun 2016 06:53:35 -0600	[thread overview]
Message-ID: <5758316F02000078000F30A6@prv-mh.provo.novell.com> (raw)
In-Reply-To: <5758302D02000078000F3087@prv-mh.provo.novell.com>

[-- Attachment #1: Type: text/plain, Size: 2872 bytes --]

msixtbl_pt_{,un}register() already run with both the PCI devices lock
and the domain event lock held, so there's no need for another lock.
Just to be on the safe side, acquire the domain event lock in the
cleanup function (albeit I don't think this is strictly necessary).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -468,8 +468,6 @@ int msixtbl_pt_register(struct domain *d
 
     pdev = msi_desc->dev;
 
-    spin_lock(&d->arch.hvm_domain.msixtbl_list_lock);
-
     list_for_each_entry( entry, &d->arch.hvm_domain.msixtbl_list, list )
         if ( pdev == entry->pdev )
             goto found;
@@ -480,7 +478,6 @@ int msixtbl_pt_register(struct domain *d
 
 found:
     atomic_inc(&entry->refcnt);
-    spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock);
     r = 0;
 
 out:
@@ -530,15 +527,10 @@ void msixtbl_pt_unregister(struct domain
 
     pdev = msi_desc->dev;
 
-    spin_lock(&d->arch.hvm_domain.msixtbl_list_lock);
-
     list_for_each_entry( entry, &d->arch.hvm_domain.msixtbl_list, list )
         if ( pdev == entry->pdev )
             goto found;
 
-    spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock);
-
-
 out:
     spin_unlock_irq(&irq_desc->lock);
     return;
@@ -547,7 +539,6 @@ found:
     if ( !atomic_dec_and_test(&entry->refcnt) )
         del_msixtbl_entry(entry);
 
-    spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock);
     spin_unlock_irq(&irq_desc->lock);
 }
 
@@ -558,7 +549,6 @@ void msixtbl_init(struct domain *d)
         return;
 
     INIT_LIST_HEAD(&d->arch.hvm_domain.msixtbl_list);
-    spin_lock_init(&d->arch.hvm_domain.msixtbl_list_lock);
 
     register_mmio_handler(d, &msixtbl_mmio_ops);
 }
@@ -566,21 +556,17 @@ void msixtbl_init(struct domain *d)
 void msixtbl_pt_cleanup(struct domain *d)
 {
     struct msixtbl_entry *entry, *temp;
-    unsigned long flags;
 
     if ( !d->arch.hvm_domain.msixtbl_list.next )
         return;
 
-    /* msixtbl_list_lock must be acquired with irq_disabled for check_lock() */
-    local_irq_save(flags); 
-    spin_lock(&d->arch.hvm_domain.msixtbl_list_lock);
+    spin_lock(&d->event_lock);
 
     list_for_each_entry_safe( entry, temp,
                               &d->arch.hvm_domain.msixtbl_list, list )
         del_msixtbl_entry(entry);
 
-    spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock);
-    local_irq_restore(flags);
+    spin_unlock(&d->event_lock);
 }
 
 void msix_write_completion(struct vcpu *v)
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -124,7 +124,6 @@ struct hvm_domain {
 
     /* hypervisor intercepted msix table */
     struct list_head       msixtbl_list;
-    spinlock_t             msixtbl_list_lock;
 
     struct viridian_domain viridian;
 




[-- Attachment #2: x86-vMSI-X-drop-list-lock.patch --]
[-- Type: text/plain, Size: 2896 bytes --]

x86/vMSI-X: drop list lock

msixtbl_pt_{,un}register() already run with both the PCI devices lock
and the domain event lock held, so there's no need for another lock.
Just to be on the safe side, acquire the domain event lock in the
cleanup function (albeit I don't think this is strictly necessary).

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -468,8 +468,6 @@ int msixtbl_pt_register(struct domain *d
 
     pdev = msi_desc->dev;
 
-    spin_lock(&d->arch.hvm_domain.msixtbl_list_lock);
-
     list_for_each_entry( entry, &d->arch.hvm_domain.msixtbl_list, list )
         if ( pdev == entry->pdev )
             goto found;
@@ -480,7 +478,6 @@ int msixtbl_pt_register(struct domain *d
 
 found:
     atomic_inc(&entry->refcnt);
-    spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock);
     r = 0;
 
 out:
@@ -530,15 +527,10 @@ void msixtbl_pt_unregister(struct domain
 
     pdev = msi_desc->dev;
 
-    spin_lock(&d->arch.hvm_domain.msixtbl_list_lock);
-
     list_for_each_entry( entry, &d->arch.hvm_domain.msixtbl_list, list )
         if ( pdev == entry->pdev )
             goto found;
 
-    spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock);
-
-
 out:
     spin_unlock_irq(&irq_desc->lock);
     return;
@@ -547,7 +539,6 @@ found:
     if ( !atomic_dec_and_test(&entry->refcnt) )
         del_msixtbl_entry(entry);
 
-    spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock);
     spin_unlock_irq(&irq_desc->lock);
 }
 
@@ -558,7 +549,6 @@ void msixtbl_init(struct domain *d)
         return;
 
     INIT_LIST_HEAD(&d->arch.hvm_domain.msixtbl_list);
-    spin_lock_init(&d->arch.hvm_domain.msixtbl_list_lock);
 
     register_mmio_handler(d, &msixtbl_mmio_ops);
 }
@@ -566,21 +556,17 @@ void msixtbl_init(struct domain *d)
 void msixtbl_pt_cleanup(struct domain *d)
 {
     struct msixtbl_entry *entry, *temp;
-    unsigned long flags;
 
     if ( !d->arch.hvm_domain.msixtbl_list.next )
         return;
 
-    /* msixtbl_list_lock must be acquired with irq_disabled for check_lock() */
-    local_irq_save(flags); 
-    spin_lock(&d->arch.hvm_domain.msixtbl_list_lock);
+    spin_lock(&d->event_lock);
 
     list_for_each_entry_safe( entry, temp,
                               &d->arch.hvm_domain.msixtbl_list, list )
         del_msixtbl_entry(entry);
 
-    spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock);
-    local_irq_restore(flags);
+    spin_unlock(&d->event_lock);
 }
 
 void msix_write_completion(struct vcpu *v)
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -124,7 +124,6 @@ struct hvm_domain {
 
     /* hypervisor intercepted msix table */
     struct list_head       msixtbl_list;
-    spinlock_t             msixtbl_list_lock;
 
     struct viridian_domain viridian;
 

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

  parent reply	other threads:[~2016-06-08 12:53 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-06-08 12:48 [PATCH 0/4] x86/vMSI-X: misc improvements Jan Beulich
2016-06-08 12:52 ` [PATCH 1/4] x86/vMSI-X: defer intercept handler registration Jan Beulich
2016-06-17 16:13   ` Konrad Rzeszutek Wilk
2016-06-17 16:38     ` Jan Beulich
2016-06-21 17:11   ` Andrew Cooper
2016-06-08 12:53 ` Jan Beulich [this message]
2016-06-21 17:26   ` [PATCH 2/4] x86/vMSI-X: drop list lock Andrew Cooper
2016-06-08 12:54 ` [PATCH 3/4] x86/vMSI-X: drop pci_msix_get_table_len() Jan Beulich
2016-06-21 17:27   ` Andrew Cooper
2016-06-08 12:54 ` [PATCH 4/4] x86/vMSI-X: use generic intercept handler in place of MMIO one Jan Beulich
2016-06-13  8:36   ` Paul Durrant
2016-06-21 17:33   ` Andrew Cooper
2016-06-17  8:20 ` Ping: [PATCH 0/4] x86/vMSI-X: misc improvements Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5758316F02000078000F30A6@prv-mh.provo.novell.com \
    --to=jbeulich@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).