* [PATCH for-4.9 v2] x86/vioapic: allow holes in the GSI range for PVH Dom0
@ 2017-04-18 9:14 Roger Pau Monne
2017-04-18 9:40 ` Jan Beulich
0 siblings, 1 reply; 2+ messages in thread
From: Roger Pau Monne @ 2017-04-18 9:14 UTC (permalink / raw)
To: xen-devel
Cc: Andrew Cooper, Julien Grall, Chao Gao, Jan Beulich, Roger Pau Monne
The current vIO APIC for PVH Dom0 doesn't allow non-contiguous GSIs, which
means that all GSIs must belong to an IO APIC. This doesn't match reality,
where there are systems with non-contiguous GSIs.
In order to fix this add a base_gsi field to each hvm_vioapic struct, in order
to store the base GSI for each emulated IO APIC. For PVH Dom0 those values are
populated based on the hardware ones.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reported-by: Chao Gao <chao.gao@intel.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Julien Grall <julien.grall@arm.com>
Cc: Chao Gao <chao.gao@intel.com>
---
Changes since v1:
- Calculate the maximum GSI in vioapic_init taking into account the base GSI
of each IO APIC.
- Use the vioapic base_gsi field in the PVH Dom0 build in order to populate
the MADT IO APIC entries.
I think this is a good canditate to be included in 4.9, it's a bugfix, and it's
only used by PVH Dom0, so the risk is low IMHO.
---
xen/arch/x86/hvm/dom0_build.c | 2 +-
xen/arch/x86/hvm/vioapic.c | 45 +++++++++++++++++----------------------
xen/include/asm-x86/hvm/vioapic.h | 1 +
3 files changed, 21 insertions(+), 27 deletions(-)
diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c
index db9be87612..020c355faf 100644
--- a/xen/arch/x86/hvm/dom0_build.c
+++ b/xen/arch/x86/hvm/dom0_build.c
@@ -719,7 +719,7 @@ static int __init pvh_setup_acpi_madt(struct domain *d, paddr_t *addr)
io_apic->header.length = sizeof(*io_apic);
io_apic->id = domain_vioapic(d, i)->id;
io_apic->address = domain_vioapic(d, i)->base_address;
- io_apic->global_irq_base = io_apic_gsi_base(i);
+ io_apic->global_irq_base = domain_vioapic(d, i)->base_gsi;
io_apic++;
}
diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
index 5157db7a4e..c511fa7db6 100644
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -64,37 +64,23 @@ static struct hvm_vioapic *addr_vioapic(const struct domain *d,
struct hvm_vioapic *gsi_vioapic(const struct domain *d, unsigned int gsi,
unsigned int *pin)
{
- unsigned int i, base_gsi = 0;
+ unsigned int i;
for ( i = 0; i < d->arch.hvm_domain.nr_vioapics; i++ )
{
struct hvm_vioapic *vioapic = domain_vioapic(d, i);
- if ( gsi >= base_gsi && gsi < base_gsi + vioapic->nr_pins )
+ if ( gsi >= vioapic->base_gsi &&
+ gsi < vioapic->base_gsi + vioapic->nr_pins )
{
- *pin = gsi - base_gsi;
+ *pin = gsi - vioapic->base_gsi;
return vioapic;
}
-
- base_gsi += vioapic->nr_pins;
}
return NULL;
}
-static unsigned int base_gsi(const struct domain *d,
- const struct hvm_vioapic *vioapic)
-{
- unsigned int nr_vioapics = d->arch.hvm_domain.nr_vioapics;
- unsigned int base_gsi = 0, i = 0;
- const struct hvm_vioapic *tmp;
-
- while ( i < nr_vioapics && (tmp = domain_vioapic(d, i++)) != vioapic )
- base_gsi += tmp->nr_pins;
-
- return base_gsi;
-}
-
static uint32_t vioapic_read_indirect(const struct hvm_vioapic *vioapic)
{
uint32_t result = 0;
@@ -180,7 +166,7 @@ static void vioapic_write_redirent(
struct hvm_irq *hvm_irq = hvm_domain_irq(d);
union vioapic_redir_entry *pent, ent;
int unmasked = 0;
- unsigned int gsi = base_gsi(d, vioapic) + idx;
+ unsigned int gsi = vioapic->base_gsi + idx;
spin_lock(&d->arch.hvm_domain.irq_lock);
@@ -340,7 +326,7 @@ static void vioapic_deliver(struct hvm_vioapic *vioapic, unsigned int pin)
struct domain *d = vioapic_domain(vioapic);
struct vlapic *target;
struct vcpu *v;
- unsigned int irq = base_gsi(d, vioapic) + pin;
+ unsigned int irq = vioapic->base_gsi + pin;
ASSERT(spin_is_locked(&d->arch.hvm_domain.irq_lock));
@@ -451,7 +437,7 @@ void vioapic_update_EOI(struct domain *d, u8 vector)
{
struct hvm_irq *hvm_irq = hvm_domain_irq(d);
union vioapic_redir_entry *ent;
- unsigned int i, base_gsi = 0;
+ unsigned int i;
ASSERT(has_vioapic(d));
@@ -473,19 +459,18 @@ void vioapic_update_EOI(struct domain *d, u8 vector)
if ( iommu_enabled )
{
spin_unlock(&d->arch.hvm_domain.irq_lock);
- hvm_dpci_eoi(d, base_gsi + pin, ent);
+ hvm_dpci_eoi(d, vioapic->base_gsi + pin, ent);
spin_lock(&d->arch.hvm_domain.irq_lock);
}
if ( (ent->fields.trig_mode == VIOAPIC_LEVEL_TRIG) &&
!ent->fields.mask &&
- hvm_irq->gsi_assert_count[base_gsi + pin] )
+ hvm_irq->gsi_assert_count[vioapic->base_gsi + pin] )
{
ent->fields.remote_irr = 1;
vioapic_deliver(vioapic, pin);
}
}
- base_gsi += vioapic->nr_pins;
}
spin_unlock(&d->arch.hvm_domain.irq_lock);
@@ -554,6 +539,7 @@ void vioapic_reset(struct domain *d)
{
vioapic->base_address = mp_ioapics[i].mpc_apicaddr;
vioapic->id = mp_ioapics[i].mpc_apicid;
+ vioapic->base_gsi = io_apic_gsi_base(i);
}
vioapic->nr_pins = nr_pins;
vioapic->domain = d;
@@ -590,6 +576,8 @@ int vioapic_init(struct domain *d)
{
unsigned int nr_pins = is_hardware_domain(d) ? nr_ioapic_entries[i] :
ARRAY_SIZE(domain_vioapic(d, 0)->domU.redirtbl);
+ unsigned int base_gsi = is_hardware_domain(d) ? io_apic_gsi_base(i)
+ : 0;
if ( (domain_vioapic(d, i) =
xmalloc_bytes(hvm_vioapic_size(nr_pins))) == NULL )
@@ -598,10 +586,15 @@ int vioapic_init(struct domain *d)
return -ENOMEM;
}
domain_vioapic(d, i)->nr_pins = nr_pins;
- nr_gsis += nr_pins;
+ nr_gsis = max(nr_gsis, base_gsi + nr_pins);
}
- ASSERT(hvm_domain_irq(d)->nr_gsis == nr_gsis);
+ /*
+ * NB: hvm_domain_irq(d)->nr_gsis is actually the highest GSI + 1, but
+ * there might be holes in this range (ie: GSIs that don't belong to any
+ * vIO APIC).
+ */
+ ASSERT(hvm_domain_irq(d)->nr_gsis >= nr_gsis);
d->arch.hvm_domain.nr_vioapics = nr_vioapics;
vioapic_reset(d);
diff --git a/xen/include/asm-x86/hvm/vioapic.h b/xen/include/asm-x86/hvm/vioapic.h
index 8ec91d2bd1..2ceb60eaef 100644
--- a/xen/include/asm-x86/hvm/vioapic.h
+++ b/xen/include/asm-x86/hvm/vioapic.h
@@ -50,6 +50,7 @@
struct hvm_vioapic {
struct domain *domain;
uint32_t nr_pins;
+ unsigned int base_gsi;
union {
XEN_HVM_VIOAPIC(,);
struct hvm_hw_vioapic domU;
--
2.11.0 (Apple Git-81)
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH for-4.9 v2] x86/vioapic: allow holes in the GSI range for PVH Dom0
2017-04-18 9:14 [PATCH for-4.9 v2] x86/vioapic: allow holes in the GSI range for PVH Dom0 Roger Pau Monne
@ 2017-04-18 9:40 ` Jan Beulich
0 siblings, 0 replies; 2+ messages in thread
From: Jan Beulich @ 2017-04-18 9:40 UTC (permalink / raw)
To: Roger Pau Monne; +Cc: Andrew Cooper, Julien Grall, xen-devel, Chao Gao
>>> On 18.04.17 at 11:14, <roger.pau@citrix.com> wrote:
> @@ -554,6 +539,7 @@ void vioapic_reset(struct domain *d)
> {
> vioapic->base_address = mp_ioapics[i].mpc_apicaddr;
> vioapic->id = mp_ioapics[i].mpc_apicid;
> + vioapic->base_gsi = io_apic_gsi_base(i);
The value doesn't change over the lifetime of the vIO-APIC, so I'd
prefer this initialization to be done ...
> @@ -590,6 +576,8 @@ int vioapic_init(struct domain *d)
> {
> unsigned int nr_pins = is_hardware_domain(d) ? nr_ioapic_entries[i] :
> ARRAY_SIZE(domain_vioapic(d, 0)->domU.redirtbl);
> + unsigned int base_gsi = is_hardware_domain(d) ? io_apic_gsi_base(i)
> + : 0;
... somewhere below here, also avoiding to have the same logic
(Dom0 vs DomU) in two places. Perhaps the nr_pins calculation
could then be moved into that new if() (and its else) as well.
Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2017-04-18 9:40 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-04-18 9:14 [PATCH for-4.9 v2] x86/vioapic: allow holes in the GSI range for PVH Dom0 Roger Pau Monne
2017-04-18 9:40 ` Jan Beulich
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.