All of lore.kernel.org
 help / color / mirror / Atom feed
From: Roger Pau Monne <roger.pau@citrix.com>
To: <xen-devel@lists.xenproject.org>
Cc: Roger Pau Monne <roger.pau@citrix.com>,
	Jan Beulich <jbeulich@suse.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>, Wei Liu <wl@xen.org>
Subject: [PATCH v2 05/11] x86/vioapic: switch to use the EOI callback mechanism
Date: Wed, 30 Sep 2020 12:41:02 +0200	[thread overview]
Message-ID: <20200930104108.35969-6-roger.pau@citrix.com> (raw)
In-Reply-To: <20200930104108.35969-1-roger.pau@citrix.com>

Switch the emulated IO-APIC code to use the local APIC EOI callback
mechanism. This allows to remove the last hardcoded callback from
vlapic_handle_EOI. Removing the hardcoded vIO-APIC callback also
allows to getting rid of setting the EOI exit bitmap based on the
triggering mode, as now all users that require an EOI action use the
newly introduced callback mechanism.

Move and rename the vioapic_update_EOI now that it can be made static.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
Changes since v1:
 - Remove the triggering check in the update_eoi_exit_bitmap call.
 - Register the vlapic callbacks when loading the vIO-APIC state.
 - Reduce scope of ent.
---
 xen/arch/x86/hvm/vioapic.c | 131 ++++++++++++++++++++++++-------------
 xen/arch/x86/hvm/vlapic.c  |   5 +-
 2 files changed, 86 insertions(+), 50 deletions(-)

diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
index 752fc410db..dce98b1479 100644
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -375,6 +375,50 @@ static const struct hvm_mmio_ops vioapic_mmio_ops = {
     .write = vioapic_write
 };
 
+static void eoi_callback(unsigned int vector, void *data)
+{
+    struct domain *d = current->domain;
+    struct hvm_irq *hvm_irq = hvm_domain_irq(d);
+    unsigned int i;
+
+    ASSERT(has_vioapic(d));
+
+    spin_lock(&d->arch.hvm.irq_lock);
+
+    for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
+    {
+        struct hvm_vioapic *vioapic = domain_vioapic(d, i);
+        unsigned int pin;
+
+        for ( pin = 0; pin < vioapic->nr_pins; pin++ )
+        {
+            union vioapic_redir_entry *ent = &vioapic->redirtbl[pin];
+
+            if ( ent->fields.vector != vector )
+                continue;
+
+            ent->fields.remote_irr = 0;
+
+            if ( is_iommu_enabled(d) )
+            {
+                spin_unlock(&d->arch.hvm.irq_lock);
+                hvm_dpci_eoi(vioapic->base_gsi + pin, ent);
+                spin_lock(&d->arch.hvm.irq_lock);
+            }
+
+            if ( (ent->fields.trig_mode == VIOAPIC_LEVEL_TRIG) &&
+                 !ent->fields.mask &&
+                 hvm_irq->gsi_assert_count[vioapic->base_gsi + pin] )
+            {
+                ent->fields.remote_irr = 1;
+                vioapic_deliver(vioapic, pin);
+            }
+        }
+    }
+
+    spin_unlock(&d->arch.hvm.irq_lock);
+}
+
 static void ioapic_inj_irq(
     struct hvm_vioapic *vioapic,
     struct vlapic *target,
@@ -388,7 +432,8 @@ static void ioapic_inj_irq(
     ASSERT((delivery_mode == dest_Fixed) ||
            (delivery_mode == dest_LowestPrio));
 
-    vlapic_set_irq(target, vector, trig_mode);
+    vlapic_set_irq_callback(target, vector, trig_mode,
+                            trig_mode ? eoi_callback : NULL, NULL);
 }
 
 static void vioapic_deliver(struct hvm_vioapic *vioapic, unsigned int pin)
@@ -495,50 +540,6 @@ void vioapic_irq_positive_edge(struct domain *d, unsigned int irq)
     }
 }
 
-void vioapic_update_EOI(unsigned int vector)
-{
-    struct domain *d = current->domain;
-    struct hvm_irq *hvm_irq = hvm_domain_irq(d);
-    union vioapic_redir_entry *ent;
-    unsigned int i;
-
-    ASSERT(has_vioapic(d));
-
-    spin_lock(&d->arch.hvm.irq_lock);
-
-    for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
-    {
-        struct hvm_vioapic *vioapic = domain_vioapic(d, i);
-        unsigned int pin;
-
-        for ( pin = 0; pin < vioapic->nr_pins; pin++ )
-        {
-            ent = &vioapic->redirtbl[pin];
-            if ( ent->fields.vector != vector )
-                continue;
-
-            ent->fields.remote_irr = 0;
-
-            if ( is_iommu_enabled(d) )
-            {
-                spin_unlock(&d->arch.hvm.irq_lock);
-                hvm_dpci_eoi(vioapic->base_gsi + pin, ent);
-                spin_lock(&d->arch.hvm.irq_lock);
-            }
-
-            if ( (ent->fields.trig_mode == VIOAPIC_LEVEL_TRIG) &&
-                 !ent->fields.mask &&
-                 hvm_irq->gsi_assert_count[vioapic->base_gsi + pin] )
-            {
-                ent->fields.remote_irr = 1;
-                vioapic_deliver(vioapic, pin);
-            }
-        }
-    }
-
-    spin_unlock(&d->arch.hvm.irq_lock);
-}
-
 int vioapic_get_mask(const struct domain *d, unsigned int gsi)
 {
     unsigned int pin = 0; /* See gsi_vioapic */
@@ -592,6 +593,8 @@ static int ioapic_save(struct vcpu *v, hvm_domain_context_t *h)
 static int ioapic_load(struct domain *d, hvm_domain_context_t *h)
 {
     struct hvm_vioapic *s;
+    unsigned int i;
+    int rc;
 
     if ( !has_vioapic(d) )
         return -ENODEV;
@@ -602,7 +605,43 @@ static int ioapic_load(struct domain *d, hvm_domain_context_t *h)
          d->arch.hvm.nr_vioapics != 1 )
         return -EOPNOTSUPP;
 
-    return hvm_load_entry(IOAPIC, h, &s->domU);
+    rc = hvm_load_entry(IOAPIC, h, &s->domU);
+    if ( rc )
+        return rc;
+
+    for ( i = 0; i < ARRAY_SIZE(s->domU.redirtbl); i++ )
+    {
+        const union vioapic_redir_entry *ent = &s->domU.redirtbl[i];
+        unsigned int vector = ent->fields.vector;
+        unsigned int delivery_mode = ent->fields.delivery_mode;
+        struct vcpu *v;
+
+        /*
+         * Add a callback for each possible vector injected by a redirection
+         * entry.
+         */
+        if ( vector < 16 || !ent->fields.remote_irr ||
+             (delivery_mode != dest_LowestPrio && delivery_mode != dest_Fixed) )
+            continue;
+
+        for_each_vcpu ( d, v )
+        {
+            struct vlapic *vlapic = vcpu_vlapic(v);
+
+            /*
+             * NB: if the vlapic registers where restored before the vio-apic
+             * ones we could test whether the vector is set in the vlapic IRR
+             * or ISR registers before unconditionally setting the callback.
+             * This is harmless as eoi_callback is capable of dealing with
+             * spurious callbacks.
+             */
+            if ( vlapic_match_dest(vlapic, NULL, 0, ent->fields.dest_id,
+                                   ent->fields.dest_mode) )
+                vlapic_set_callback(vlapic, vector, eoi_callback, NULL);
+        }
+    }
+
+    return 0;
 }
 
 HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load, 1, HVMSR_PER_DOM);
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index 8a18b33428..35f12e0909 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -189,7 +189,7 @@ void vlapic_set_irq_callback(struct vlapic *vlapic, uint8_t vec, uint8_t trig,
 
     if ( hvm_funcs.update_eoi_exit_bitmap )
         alternative_vcall(hvm_funcs.update_eoi_exit_bitmap, target, vec,
-                          trig || callback);
+                          callback);
 
     if ( hvm_funcs.deliver_posted_intr )
         alternative_vcall(hvm_funcs.deliver_posted_intr, target, vec);
@@ -493,9 +493,6 @@ void vlapic_handle_EOI(struct vlapic *vlapic, u8 vector)
     unsigned long flags;
     unsigned int index = vector - 16;
 
-    if ( vlapic_test_vector(vector, &vlapic->regs->data[APIC_TMR]) )
-        vioapic_update_EOI(vector);
-
     spin_lock_irqsave(&vlapic->callback_lock, flags);
     callback = vlapic->callbacks[index].callback;
     vlapic->callbacks[index].callback = NULL;
-- 
2.28.0



  parent reply	other threads:[~2020-09-30 10:41 UTC|newest]

Thread overview: 40+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-30 10:40 [PATCH v2 00/11] x86/intr: introduce EOI callbacks and fix vPT Roger Pau Monne
2020-09-30 10:40 ` [PATCH v2 01/11] x86/hvm: drop vcpu parameter from vlapic EOI callbacks Roger Pau Monne
2020-09-30 11:30   ` Paul Durrant
2020-10-02  8:48   ` Jan Beulich
2020-10-02  9:24     ` Durrant, Paul
2020-10-02 10:54       ` Wei Liu
2020-10-13 14:08     ` Roger Pau Monné
2020-10-13 14:13       ` Jan Beulich
2020-09-30 10:40 ` [PATCH v2 02/11] x86/hvm: drop domain parameter from vioapic/vpic " Roger Pau Monne
2020-09-30 11:33   ` Paul Durrant
2020-10-02  9:02   ` Jan Beulich
2020-09-30 10:41 ` [PATCH v2 03/11] x86/vlapic: introduce an EOI callback mechanism Roger Pau Monne
2020-09-30 11:49   ` Paul Durrant
2020-10-02  9:22     ` Jan Beulich
2020-10-02  9:39   ` Jan Beulich
2020-10-13 14:30     ` Roger Pau Monné
2020-10-13 15:41       ` Jan Beulich
2020-09-30 10:41 ` [PATCH v2 04/11] x86/vmsi: use the newly introduced EOI callbacks Roger Pau Monne
2020-09-30 11:57   ` Paul Durrant
2020-09-30 13:37     ` Roger Pau Monné
2020-10-02 15:25   ` Jan Beulich
2020-10-13 14:47     ` Roger Pau Monné
2020-10-13 15:42       ` Jan Beulich
2020-09-30 10:41 ` Roger Pau Monne [this message]
2020-09-30 12:09   ` [PATCH v2 05/11] x86/vioapic: switch to use the EOI callback mechanism Paul Durrant
2020-09-30 13:29     ` Roger Pau Monné
2020-10-22 16:12   ` Jan Beulich
2020-09-30 10:41 ` [PATCH v2 06/11] x86/hvm: allowing registering EOI callbacks for GSIs Roger Pau Monne
2020-10-23 12:29   ` Jan Beulich
2020-09-30 10:41 ` [PATCH v2 07/11] x86/dpci: move code Roger Pau Monne
2020-10-23 12:32   ` Jan Beulich
2020-09-30 10:41 ` [PATCH v2 08/11] x86/dpci: switch to use a GSI EOI callback Roger Pau Monne
2020-10-23 12:47   ` Jan Beulich
2020-09-30 10:41 ` [PATCH v2 09/11] x86/vpt: switch interrupt injection model Roger Pau Monne
2020-10-23 14:59   ` Jan Beulich
2020-09-30 10:41 ` [PATCH v2 10/11] x86/vpt: remove vPT timers per-vCPU lists Roger Pau Monne
2020-10-23 15:34   ` Jan Beulich
2020-09-30 10:41 ` [PATCH v2 11/11] x86/vpt: introduce a per-vPT lock Roger Pau Monne
2020-09-30 13:30   ` Roger Pau Monné
2020-10-23 15:42     ` Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200930104108.35969-6-roger.pau@citrix.com \
    --to=roger.pau@citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.