All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Jan Beulich" <JBeulich@suse.com>
To: xen-devel <xen-devel@lists.xenproject.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
	Wei Liu <wei.liu2@citrix.com>,
	Roger Pau Monne <roger.pau@citrix.com>
Subject: [PATCH v3 06/15] x86/IRQ: fix locking around vector management
Date: Fri, 17 May 2019 04:47:47 -0600	[thread overview]
Message-ID: <5CDE91530200007800230078@prv1-mh.provo.novell.com> (raw)
In-Reply-To: <5CDE8F5B020000780023005F@prv1-mh.provo.novell.com>

All of __{assign,bind,clear}_irq_vector() manipulate struct irq_desc
fields, and hence ought to be called with the descriptor lock held in
addition to vector_lock. This is currently the case for only
set_desc_affinity() (in the common case) and destroy_irq(), which also
clarifies what the nesting behavior between the locks has to be.
Reflect the new expectation by having these functions all take a
descriptor as parameter instead of an interrupt number.

Also take care of the two special cases of calls to set_desc_affinity():
set_ioapic_affinity_irq() and VT-d's dma_msi_set_affinity() get called
directly as well, and in these cases the descriptor locks hadn't got
acquired till now. For set_ioapic_affinity_irq() this means acquiring /
releasing of the IO-APIC lock can be plain spin_{,un}lock() then.

Drop one of the two leading underscores from all three functions at
the same time.

There's one case left where descriptors get manipulated with just
vector_lock held: setup_vector_irq() assumes its caller to acquire
vector_lock, and hence can't itself acquire the descriptor locks (wrong
lock order). I don't currently see how to address this.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com> [VT-d]
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
---
v3: Also drop one leading underscore from a comment. Re-base.
v2: Also adjust set_ioapic_affinity_irq() and VT-d's
    dma_msi_set_affinity().

--- a/xen/arch/x86/io_apic.c
+++ b/xen/arch/x86/io_apic.c
@@ -550,14 +550,14 @@ static void clear_IO_APIC (void)
 static void
 set_ioapic_affinity_irq(struct irq_desc *desc, const cpumask_t *mask)
 {
-    unsigned long flags;
     unsigned int dest;
     int pin, irq;
     struct irq_pin_list *entry;
 
     irq = desc->irq;
 
-    spin_lock_irqsave(&ioapic_lock, flags);
+    spin_lock(&ioapic_lock);
+
     dest = set_desc_affinity(desc, mask);
     if (dest != BAD_APICID) {
         if ( !x2apic_enabled )
@@ -580,8 +580,8 @@ set_ioapic_affinity_irq(struct irq_desc
             entry = irq_2_pin + entry->next;
         }
     }
-    spin_unlock_irqrestore(&ioapic_lock, flags);
 
+    spin_unlock(&ioapic_lock);
 }
 
 /*
@@ -674,16 +674,19 @@ void /*__init*/ setup_ioapic_dest(void)
     for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
         for (pin = 0; pin < nr_ioapic_entries[ioapic]; pin++) {
             struct irq_desc *desc;
+            unsigned long flags;
 
             irq_entry = find_irq_entry(ioapic, pin, mp_INT);
             if (irq_entry == -1)
                 continue;
             irq = pin_2_irq(irq_entry, ioapic, pin);
             desc = irq_to_desc(irq);
+
+            spin_lock_irqsave(&desc->lock, flags);
             BUG_ON(!cpumask_intersects(desc->arch.cpu_mask, &cpu_online_map));
             set_ioapic_affinity_irq(desc, desc->arch.cpu_mask);
+            spin_unlock_irqrestore(&desc->lock, flags);
         }
-
     }
 }
 
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -27,6 +27,7 @@
 #include <public/physdev.h>
 
 static int parse_irq_vector_map_param(const char *s);
+static void _clear_irq_vector(struct irq_desc *desc);
 
 /* opt_noirqbalance: If true, software IRQ balancing/affinity is disabled. */
 bool __read_mostly opt_noirqbalance;
@@ -136,13 +137,12 @@ static void trace_irq_mask(uint32_t even
     trace_var(event, 1, sizeof(d), &d);
 }
 
-static int __init __bind_irq_vector(int irq, int vector, const cpumask_t *cpu_mask)
+static int __init _bind_irq_vector(struct irq_desc *desc, int vector,
+                                   const cpumask_t *cpu_mask)
 {
     cpumask_t online_mask;
     int cpu;
-    struct irq_desc *desc = irq_to_desc(irq);
 
-    BUG_ON((unsigned)irq >= nr_irqs);
     BUG_ON((unsigned)vector >= NR_VECTORS);
 
     cpumask_and(&online_mask, cpu_mask, &cpu_online_map);
@@ -153,9 +153,9 @@ static int __init __bind_irq_vector(int
         return 0;
     if ( desc->arch.vector != IRQ_VECTOR_UNASSIGNED )
         return -EBUSY;
-    trace_irq_mask(TRC_HW_IRQ_BIND_VECTOR, irq, vector, &online_mask);
+    trace_irq_mask(TRC_HW_IRQ_BIND_VECTOR, desc->irq, vector, &online_mask);
     for_each_cpu(cpu, &online_mask)
-        per_cpu(vector_irq, cpu)[vector] = irq;
+        per_cpu(vector_irq, cpu)[vector] = desc->irq;
     desc->arch.vector = vector;
     cpumask_copy(desc->arch.cpu_mask, &online_mask);
     if ( desc->arch.used_vectors )
@@ -169,12 +169,18 @@ static int __init __bind_irq_vector(int
 
 int __init bind_irq_vector(int irq, int vector, const cpumask_t *cpu_mask)
 {
+    struct irq_desc *desc = irq_to_desc(irq);
     unsigned long flags;
     int ret;
 
-    spin_lock_irqsave(&vector_lock, flags);
-    ret = __bind_irq_vector(irq, vector, cpu_mask);
-    spin_unlock_irqrestore(&vector_lock, flags);
+    BUG_ON((unsigned)irq >= nr_irqs);
+
+    spin_lock_irqsave(&desc->lock, flags);
+    spin_lock(&vector_lock);
+    ret = _bind_irq_vector(desc, vector, cpu_mask);
+    spin_unlock(&vector_lock);
+    spin_unlock_irqrestore(&desc->lock, flags);
+
     return ret;
 }
 
@@ -259,18 +265,20 @@ void destroy_irq(unsigned int irq)
 
     spin_lock_irqsave(&desc->lock, flags);
     desc->handler = &no_irq_type;
-    clear_irq_vector(irq);
+    spin_lock(&vector_lock);
+    _clear_irq_vector(desc);
+    spin_unlock(&vector_lock);
     desc->arch.used_vectors = NULL;
     spin_unlock_irqrestore(&desc->lock, flags);
 
     xfree(action);
 }
 
-static void __clear_irq_vector(int irq)
+static void _clear_irq_vector(struct irq_desc *desc)
 {
-    int cpu, vector, old_vector;
+    unsigned int cpu;
+    int vector, old_vector, irq = desc->irq;
     cpumask_t tmp_mask;
-    struct irq_desc *desc = irq_to_desc(irq);
 
     BUG_ON(!desc->arch.vector);
 
@@ -316,11 +324,14 @@ static void __clear_irq_vector(int irq)
 
 void clear_irq_vector(int irq)
 {
+    struct irq_desc *desc = irq_to_desc(irq);
     unsigned long flags;
 
-    spin_lock_irqsave(&vector_lock, flags);
-    __clear_irq_vector(irq);
-    spin_unlock_irqrestore(&vector_lock, flags);
+    spin_lock_irqsave(&desc->lock, flags);
+    spin_lock(&vector_lock);
+    _clear_irq_vector(desc);
+    spin_unlock(&vector_lock);
+    spin_unlock_irqrestore(&desc->lock, flags);
 }
 
 int irq_to_vector(int irq)
@@ -455,8 +466,7 @@ static vmask_t *irq_get_used_vector_mask
     return ret;
 }
 
-static int __assign_irq_vector(
-    int irq, struct irq_desc *desc, const cpumask_t *mask)
+static int _assign_irq_vector(struct irq_desc *desc, const cpumask_t *mask)
 {
     /*
      * NOTE! The local APIC isn't very good at handling
@@ -470,7 +480,8 @@ static int __assign_irq_vector(
      * 0x80, because int 0x80 is hm, kind of importantish. ;)
      */
     static int current_vector = FIRST_DYNAMIC_VECTOR, current_offset = 0;
-    int cpu, err, old_vector;
+    unsigned int cpu;
+    int err, old_vector, irq = desc->irq;
     vmask_t *irq_used_vectors = NULL;
 
     old_vector = irq_to_vector(irq);
@@ -583,8 +594,12 @@ int assign_irq_vector(int irq, const cpu
     
     BUG_ON(irq >= nr_irqs || irq <0);
 
-    spin_lock_irqsave(&vector_lock, flags);
-    ret = __assign_irq_vector(irq, desc, mask ?: TARGET_CPUS);
+    spin_lock_irqsave(&desc->lock, flags);
+
+    spin_lock(&vector_lock);
+    ret = _assign_irq_vector(desc, mask ?: TARGET_CPUS);
+    spin_unlock(&vector_lock);
+
     if ( !ret )
     {
         ret = desc->arch.vector;
@@ -593,7 +608,8 @@ int assign_irq_vector(int irq, const cpu
         else
             cpumask_setall(desc->affinity);
     }
-    spin_unlock_irqrestore(&vector_lock, flags);
+
+    spin_unlock_irqrestore(&desc->lock, flags);
 
     return ret;
 }
@@ -767,7 +783,6 @@ void irq_complete_move(struct irq_desc *
 
 unsigned int set_desc_affinity(struct irq_desc *desc, const cpumask_t *mask)
 {
-    unsigned int irq;
     int ret;
     unsigned long flags;
     cpumask_t dest_mask;
@@ -775,10 +790,8 @@ unsigned int set_desc_affinity(struct ir
     if (!cpumask_intersects(mask, &cpu_online_map))
         return BAD_APICID;
 
-    irq = desc->irq;
-
     spin_lock_irqsave(&vector_lock, flags);
-    ret = __assign_irq_vector(irq, desc, mask);
+    ret = _assign_irq_vector(desc, mask);
     spin_unlock_irqrestore(&vector_lock, flags);
 
     if (ret < 0)
@@ -2442,7 +2455,7 @@ void fixup_irqs(const cpumask_t *mask, b
 
         /*
          * In order for the affinity adjustment below to be successful, we
-         * need __assign_irq_vector() to succeed. This in particular means
+         * need _assign_irq_vector() to succeed. This in particular means
          * clearing desc->arch.move_in_progress if this would otherwise
          * prevent the function from succeeding. Since there's no way for the
          * flag to get cleared anymore when there's no possible destination
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -2134,11 +2134,16 @@ static void adjust_irq_affinity(struct a
     unsigned int node = rhsa ? pxm_to_node(rhsa->proximity_domain)
                              : NUMA_NO_NODE;
     const cpumask_t *cpumask = &cpu_online_map;
+    struct irq_desc *desc;
 
     if ( node < MAX_NUMNODES && node_online(node) &&
          cpumask_intersects(&node_to_cpumask(node), cpumask) )
         cpumask = &node_to_cpumask(node);
-    dma_msi_set_affinity(irq_to_desc(drhd->iommu->msi.irq), cpumask);
+
+    desc = irq_to_desc(drhd->iommu->msi.irq);
+    spin_lock_irq(&desc->lock);
+    dma_msi_set_affinity(desc, cpumask);
+    spin_unlock_irq(&desc->lock);
 }
 
 static int adjust_vtd_irq_affinities(void)




_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

WARNING: multiple messages have this Message-ID (diff)
From: "Jan Beulich" <JBeulich@suse.com>
To: "xen-devel" <xen-devel@lists.xenproject.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
	Wei Liu <wei.liu2@citrix.com>,
	Roger Pau Monne <roger.pau@citrix.com>
Subject: [Xen-devel] [PATCH v3 06/15] x86/IRQ: fix locking around vector management
Date: Fri, 17 May 2019 04:47:47 -0600	[thread overview]
Message-ID: <5CDE91530200007800230078@prv1-mh.provo.novell.com> (raw)
Message-ID: <20190517104747.UuI_whCR_lMUpVM1ECORyAfLcvO-8PHgVdrJCbF6eaM@z> (raw)
In-Reply-To: <5CDE8F5B020000780023005F@prv1-mh.provo.novell.com>

All of __{assign,bind,clear}_irq_vector() manipulate struct irq_desc
fields, and hence ought to be called with the descriptor lock held in
addition to vector_lock. This is currently the case for only
set_desc_affinity() (in the common case) and destroy_irq(), which also
clarifies what the nesting behavior between the locks has to be.
Reflect the new expectation by having these functions all take a
descriptor as parameter instead of an interrupt number.

Also take care of the two special cases of calls to set_desc_affinity():
set_ioapic_affinity_irq() and VT-d's dma_msi_set_affinity() get called
directly as well, and in these cases the descriptor locks hadn't got
acquired till now. For set_ioapic_affinity_irq() this means acquiring /
releasing of the IO-APIC lock can be plain spin_{,un}lock() then.

Drop one of the two leading underscores from all three functions at
the same time.

There's one case left where descriptors get manipulated with just
vector_lock held: setup_vector_irq() assumes its caller to acquire
vector_lock, and hence can't itself acquire the descriptor locks (wrong
lock order). I don't currently see how to address this.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com> [VT-d]
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
---
v3: Also drop one leading underscore from a comment. Re-base.
v2: Also adjust set_ioapic_affinity_irq() and VT-d's
    dma_msi_set_affinity().

--- a/xen/arch/x86/io_apic.c
+++ b/xen/arch/x86/io_apic.c
@@ -550,14 +550,14 @@ static void clear_IO_APIC (void)
 static void
 set_ioapic_affinity_irq(struct irq_desc *desc, const cpumask_t *mask)
 {
-    unsigned long flags;
     unsigned int dest;
     int pin, irq;
     struct irq_pin_list *entry;
 
     irq = desc->irq;
 
-    spin_lock_irqsave(&ioapic_lock, flags);
+    spin_lock(&ioapic_lock);
+
     dest = set_desc_affinity(desc, mask);
     if (dest != BAD_APICID) {
         if ( !x2apic_enabled )
@@ -580,8 +580,8 @@ set_ioapic_affinity_irq(struct irq_desc
             entry = irq_2_pin + entry->next;
         }
     }
-    spin_unlock_irqrestore(&ioapic_lock, flags);
 
+    spin_unlock(&ioapic_lock);
 }
 
 /*
@@ -674,16 +674,19 @@ void /*__init*/ setup_ioapic_dest(void)
     for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
         for (pin = 0; pin < nr_ioapic_entries[ioapic]; pin++) {
             struct irq_desc *desc;
+            unsigned long flags;
 
             irq_entry = find_irq_entry(ioapic, pin, mp_INT);
             if (irq_entry == -1)
                 continue;
             irq = pin_2_irq(irq_entry, ioapic, pin);
             desc = irq_to_desc(irq);
+
+            spin_lock_irqsave(&desc->lock, flags);
             BUG_ON(!cpumask_intersects(desc->arch.cpu_mask, &cpu_online_map));
             set_ioapic_affinity_irq(desc, desc->arch.cpu_mask);
+            spin_unlock_irqrestore(&desc->lock, flags);
         }
-
     }
 }
 
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -27,6 +27,7 @@
 #include <public/physdev.h>
 
 static int parse_irq_vector_map_param(const char *s);
+static void _clear_irq_vector(struct irq_desc *desc);
 
 /* opt_noirqbalance: If true, software IRQ balancing/affinity is disabled. */
 bool __read_mostly opt_noirqbalance;
@@ -136,13 +137,12 @@ static void trace_irq_mask(uint32_t even
     trace_var(event, 1, sizeof(d), &d);
 }
 
-static int __init __bind_irq_vector(int irq, int vector, const cpumask_t *cpu_mask)
+static int __init _bind_irq_vector(struct irq_desc *desc, int vector,
+                                   const cpumask_t *cpu_mask)
 {
     cpumask_t online_mask;
     int cpu;
-    struct irq_desc *desc = irq_to_desc(irq);
 
-    BUG_ON((unsigned)irq >= nr_irqs);
     BUG_ON((unsigned)vector >= NR_VECTORS);
 
     cpumask_and(&online_mask, cpu_mask, &cpu_online_map);
@@ -153,9 +153,9 @@ static int __init __bind_irq_vector(int
         return 0;
     if ( desc->arch.vector != IRQ_VECTOR_UNASSIGNED )
         return -EBUSY;
-    trace_irq_mask(TRC_HW_IRQ_BIND_VECTOR, irq, vector, &online_mask);
+    trace_irq_mask(TRC_HW_IRQ_BIND_VECTOR, desc->irq, vector, &online_mask);
     for_each_cpu(cpu, &online_mask)
-        per_cpu(vector_irq, cpu)[vector] = irq;
+        per_cpu(vector_irq, cpu)[vector] = desc->irq;
     desc->arch.vector = vector;
     cpumask_copy(desc->arch.cpu_mask, &online_mask);
     if ( desc->arch.used_vectors )
@@ -169,12 +169,18 @@ static int __init __bind_irq_vector(int
 
 int __init bind_irq_vector(int irq, int vector, const cpumask_t *cpu_mask)
 {
+    struct irq_desc *desc = irq_to_desc(irq);
     unsigned long flags;
     int ret;
 
-    spin_lock_irqsave(&vector_lock, flags);
-    ret = __bind_irq_vector(irq, vector, cpu_mask);
-    spin_unlock_irqrestore(&vector_lock, flags);
+    BUG_ON((unsigned)irq >= nr_irqs);
+
+    spin_lock_irqsave(&desc->lock, flags);
+    spin_lock(&vector_lock);
+    ret = _bind_irq_vector(desc, vector, cpu_mask);
+    spin_unlock(&vector_lock);
+    spin_unlock_irqrestore(&desc->lock, flags);
+
     return ret;
 }
 
@@ -259,18 +265,20 @@ void destroy_irq(unsigned int irq)
 
     spin_lock_irqsave(&desc->lock, flags);
     desc->handler = &no_irq_type;
-    clear_irq_vector(irq);
+    spin_lock(&vector_lock);
+    _clear_irq_vector(desc);
+    spin_unlock(&vector_lock);
     desc->arch.used_vectors = NULL;
     spin_unlock_irqrestore(&desc->lock, flags);
 
     xfree(action);
 }
 
-static void __clear_irq_vector(int irq)
+static void _clear_irq_vector(struct irq_desc *desc)
 {
-    int cpu, vector, old_vector;
+    unsigned int cpu;
+    int vector, old_vector, irq = desc->irq;
     cpumask_t tmp_mask;
-    struct irq_desc *desc = irq_to_desc(irq);
 
     BUG_ON(!desc->arch.vector);
 
@@ -316,11 +324,14 @@ static void __clear_irq_vector(int irq)
 
 void clear_irq_vector(int irq)
 {
+    struct irq_desc *desc = irq_to_desc(irq);
     unsigned long flags;
 
-    spin_lock_irqsave(&vector_lock, flags);
-    __clear_irq_vector(irq);
-    spin_unlock_irqrestore(&vector_lock, flags);
+    spin_lock_irqsave(&desc->lock, flags);
+    spin_lock(&vector_lock);
+    _clear_irq_vector(desc);
+    spin_unlock(&vector_lock);
+    spin_unlock_irqrestore(&desc->lock, flags);
 }
 
 int irq_to_vector(int irq)
@@ -455,8 +466,7 @@ static vmask_t *irq_get_used_vector_mask
     return ret;
 }
 
-static int __assign_irq_vector(
-    int irq, struct irq_desc *desc, const cpumask_t *mask)
+static int _assign_irq_vector(struct irq_desc *desc, const cpumask_t *mask)
 {
     /*
      * NOTE! The local APIC isn't very good at handling
@@ -470,7 +480,8 @@ static int __assign_irq_vector(
      * 0x80, because int 0x80 is hm, kind of importantish. ;)
      */
     static int current_vector = FIRST_DYNAMIC_VECTOR, current_offset = 0;
-    int cpu, err, old_vector;
+    unsigned int cpu;
+    int err, old_vector, irq = desc->irq;
     vmask_t *irq_used_vectors = NULL;
 
     old_vector = irq_to_vector(irq);
@@ -583,8 +594,12 @@ int assign_irq_vector(int irq, const cpu
     
     BUG_ON(irq >= nr_irqs || irq <0);
 
-    spin_lock_irqsave(&vector_lock, flags);
-    ret = __assign_irq_vector(irq, desc, mask ?: TARGET_CPUS);
+    spin_lock_irqsave(&desc->lock, flags);
+
+    spin_lock(&vector_lock);
+    ret = _assign_irq_vector(desc, mask ?: TARGET_CPUS);
+    spin_unlock(&vector_lock);
+
     if ( !ret )
     {
         ret = desc->arch.vector;
@@ -593,7 +608,8 @@ int assign_irq_vector(int irq, const cpu
         else
             cpumask_setall(desc->affinity);
     }
-    spin_unlock_irqrestore(&vector_lock, flags);
+
+    spin_unlock_irqrestore(&desc->lock, flags);
 
     return ret;
 }
@@ -767,7 +783,6 @@ void irq_complete_move(struct irq_desc *
 
 unsigned int set_desc_affinity(struct irq_desc *desc, const cpumask_t *mask)
 {
-    unsigned int irq;
     int ret;
     unsigned long flags;
     cpumask_t dest_mask;
@@ -775,10 +790,8 @@ unsigned int set_desc_affinity(struct ir
     if (!cpumask_intersects(mask, &cpu_online_map))
         return BAD_APICID;
 
-    irq = desc->irq;
-
     spin_lock_irqsave(&vector_lock, flags);
-    ret = __assign_irq_vector(irq, desc, mask);
+    ret = _assign_irq_vector(desc, mask);
     spin_unlock_irqrestore(&vector_lock, flags);
 
     if (ret < 0)
@@ -2442,7 +2455,7 @@ void fixup_irqs(const cpumask_t *mask, b
 
         /*
          * In order for the affinity adjustment below to be successful, we
-         * need __assign_irq_vector() to succeed. This in particular means
+         * need _assign_irq_vector() to succeed. This in particular means
          * clearing desc->arch.move_in_progress if this would otherwise
          * prevent the function from succeeding. Since there's no way for the
          * flag to get cleared anymore when there's no possible destination
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -2134,11 +2134,16 @@ static void adjust_irq_affinity(struct a
     unsigned int node = rhsa ? pxm_to_node(rhsa->proximity_domain)
                              : NUMA_NO_NODE;
     const cpumask_t *cpumask = &cpu_online_map;
+    struct irq_desc *desc;
 
     if ( node < MAX_NUMNODES && node_online(node) &&
          cpumask_intersects(&node_to_cpumask(node), cpumask) )
         cpumask = &node_to_cpumask(node);
-    dma_msi_set_affinity(irq_to_desc(drhd->iommu->msi.irq), cpumask);
+
+    desc = irq_to_desc(drhd->iommu->msi.irq);
+    spin_lock_irq(&desc->lock);
+    dma_msi_set_affinity(desc, cpumask);
+    spin_unlock_irq(&desc->lock);
 }
 
 static int adjust_vtd_irq_affinities(void)




_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2019-05-17 10:47 UTC|newest]

Thread overview: 196+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-04-29 11:16 [PATCH 0/9] x86: IRQ management adjustments Jan Beulich
2019-04-29 11:16 ` [Xen-devel] " Jan Beulich
2019-04-29 11:22 ` [PATCH RFC 1/9] x86/IRQ: deal with move-in-progress state in fixup_irqs() Jan Beulich
2019-04-29 11:22   ` [Xen-devel] " Jan Beulich
2019-04-29 12:55   ` Jan Beulich
2019-04-29 12:55     ` [Xen-devel] " Jan Beulich
2019-04-29 13:08     ` Jan Beulich
2019-04-29 13:08       ` [Xen-devel] " Jan Beulich
2019-04-29 11:23 ` [PATCH 2/9] x86/IRQ: deal with move cleanup count " Jan Beulich
2019-04-29 11:23   ` [Xen-devel] " Jan Beulich
2019-05-03 15:21   ` Roger Pau Monné
2019-05-03 15:21     ` [Xen-devel] " Roger Pau Monné
2019-05-06  7:44     ` Jan Beulich
2019-05-06  7:44       ` [Xen-devel] " Jan Beulich
2019-05-07  7:28     ` Jan Beulich
2019-05-07  7:28       ` [Xen-devel] " Jan Beulich
2019-05-07  8:12       ` Roger Pau Monné
2019-05-07  8:12         ` [Xen-devel] " Roger Pau Monné
2019-05-07  9:28         ` Jan Beulich
2019-05-07  9:28           ` [Xen-devel] " Jan Beulich
2019-04-29 11:23 ` [PATCH 3/9] x86/IRQ: improve dump_irqs() Jan Beulich
2019-04-29 11:23   ` [Xen-devel] " Jan Beulich
2019-05-03 15:43   ` Roger Pau Monné
2019-05-03 15:43     ` [Xen-devel] " Roger Pau Monné
2019-05-06  8:06     ` Jan Beulich
2019-05-06  8:06       ` [Xen-devel] " Jan Beulich
2019-04-29 11:24 ` [PATCH 4/9] x86/IRQ: desc->affinity should strictly represent the requested value Jan Beulich
2019-04-29 11:24   ` [Xen-devel] " Jan Beulich
2019-05-03 16:21   ` Roger Pau Monné
2019-05-03 16:21     ` [Xen-devel] " Roger Pau Monné
2019-05-06  8:14     ` Jan Beulich
2019-05-06  8:14       ` [Xen-devel] " Jan Beulich
2019-04-29 11:25 ` [PATCH 5/9] x86/IRQ: fix locking around vector management Jan Beulich
2019-04-29 11:25   ` [Xen-devel] " Jan Beulich
2019-05-06 11:48   ` Roger Pau Monné
2019-05-06 11:48     ` [Xen-devel] " Roger Pau Monné
2019-05-06 13:06     ` Jan Beulich
2019-05-06 13:06       ` [Xen-devel] " Jan Beulich
2019-04-29 11:25 ` [PATCH 6/9] x86/IRQ: reduce unused space in struct arch_irq_desc Jan Beulich
2019-04-29 11:25   ` [Xen-devel] " Jan Beulich
2019-04-29 11:46   ` Andrew Cooper
2019-04-29 11:46     ` [Xen-devel] " Andrew Cooper
2019-04-29 11:26 ` [PATCH 7/9] x86/IRQ: drop redundant cpumask_empty() from move_masked_irq() Jan Beulich
2019-04-29 11:26   ` [Xen-devel] " Jan Beulich
2019-05-06 13:39   ` Roger Pau Monné
2019-05-06 13:39     ` [Xen-devel] " Roger Pau Monné
2019-04-29 11:26 ` [PATCH 8/9] x86/IRQ: make fixup_irqs() skip unconnected internally used interrupts Jan Beulich
2019-04-29 11:26   ` [Xen-devel] " Jan Beulich
2019-05-06 13:52   ` Roger Pau Monné
2019-05-06 13:52     ` [Xen-devel] " Roger Pau Monné
2019-05-06 14:25     ` Jan Beulich
2019-05-06 14:25       ` [Xen-devel] " Jan Beulich
2019-05-06 14:37       ` Roger Pau Monné
2019-05-06 14:37         ` [Xen-devel] " Roger Pau Monné
2019-04-29 11:27 ` [PATCH 9/9] x86/IO-APIC: drop an unused variable from setup_IO_APIC_irqs() Jan Beulich
2019-04-29 11:27   ` [Xen-devel] " Jan Beulich
2019-04-29 11:40   ` Andrew Cooper
2019-04-29 11:40     ` [Xen-devel] " Andrew Cooper
2019-04-29 15:40 ` [PATCH v1b 1/9] x86/IRQ: deal with move-in-progress state in fixup_irqs() Jan Beulich
2019-04-29 15:40   ` [Xen-devel] " Jan Beulich
2019-05-03  9:19   ` Roger Pau Monné
2019-05-03  9:19     ` [Xen-devel] " Roger Pau Monné
2019-05-03 14:10     ` Jan Beulich
2019-05-03 14:10       ` [Xen-devel] " Jan Beulich
2019-05-06  7:15       ` Jan Beulich
2019-05-06  7:15         ` [Xen-devel] " Jan Beulich
2019-05-06 14:28         ` Roger Pau Monné
2019-05-06 14:28           ` [Xen-devel] " Roger Pau Monné
2019-05-06 15:00           ` Jan Beulich
2019-05-06 15:00             ` [Xen-devel] " Jan Beulich
2019-05-08 12:59 ` [PATCH v2 00/12] x86: IRQ management adjustments Jan Beulich
2019-05-08 12:59   ` [Xen-devel] " Jan Beulich
2019-05-08 13:03   ` [PATCH v2 01/12] x86/IRQ: deal with move-in-progress state in fixup_irqs() Jan Beulich
2019-05-08 13:03     ` [Xen-devel] " Jan Beulich
2019-05-13  9:04     ` Roger Pau Monné
2019-05-13  9:04       ` [Xen-devel] " Roger Pau Monné
2019-05-13  9:09       ` Jan Beulich
2019-05-13  9:09         ` [Xen-devel] " Jan Beulich
2019-05-08 13:03   ` [PATCH v2 02/12] x86/IRQ: deal with move cleanup count " Jan Beulich
2019-05-08 13:03     ` [Xen-devel] " Jan Beulich
2019-05-08 13:07   ` [PATCH v2 03/12] x86/IRQ: avoid UB (or worse) in trace_irq_mask() Jan Beulich
2019-05-08 13:07     ` [Xen-devel] " Jan Beulich
2019-05-13  9:08     ` Roger Pau Monné
2019-05-13  9:08       ` [Xen-devel] " Roger Pau Monné
2019-05-13 10:42     ` George Dunlap
2019-05-13 10:42       ` [Xen-devel] " George Dunlap
2019-05-13 12:05       ` Jan Beulich
2019-05-13 12:05         ` [Xen-devel] " Jan Beulich
2019-05-08 13:08   ` [PATCH v2 04/12] x86/IRQ: improve dump_irqs() Jan Beulich
2019-05-08 13:08     ` [Xen-devel] " Jan Beulich
2019-05-08 13:09   ` [PATCH v2 05/12] x86/IRQ: desc->affinity should strictly represent the requested value Jan Beulich
2019-05-08 13:09     ` [Xen-devel] " Jan Beulich
2019-05-08 13:10   ` [PATCH v2 06/12] x86/IRQ: consolidate use of ->arch.cpu_mask Jan Beulich
2019-05-08 13:10     ` [Xen-devel] " Jan Beulich
2019-05-13 11:32     ` Roger Pau Monné
2019-05-13 11:32       ` [Xen-devel] " Roger Pau Monné
2019-05-13 15:21       ` Jan Beulich
2019-05-13 15:21         ` [Xen-devel] " Jan Beulich
2019-05-08 13:10   ` [PATCH v2 07/12] x86/IRQ: fix locking around vector management Jan Beulich
2019-05-08 13:10     ` [Xen-devel] " Jan Beulich
2019-05-08 13:16     ` Jan Beulich
2019-05-08 13:16       ` [Xen-devel] " Jan Beulich
2019-05-11  0:11       ` Tian, Kevin
2019-05-11  0:11         ` [Xen-devel] " Tian, Kevin
2019-05-13 13:48     ` Roger Pau Monné
2019-05-13 13:48       ` [Xen-devel] " Roger Pau Monné
2019-05-13 14:19       ` Jan Beulich
2019-05-13 14:19         ` [Xen-devel] " Jan Beulich
2019-05-13 14:45         ` Roger Pau Monné
2019-05-13 14:45           ` [Xen-devel] " Roger Pau Monné
2019-05-13 15:05           ` Jan Beulich
2019-05-13 15:05             ` [Xen-devel] " Jan Beulich
2019-05-08 13:11   ` [PATCH v2 08/12] x86/IRQs: correct/tighten vector check in _clear_irq_vector() Jan Beulich
2019-05-08 13:11     ` [Xen-devel] " Jan Beulich
2019-05-13 14:01     ` Roger Pau Monné
2019-05-13 14:01       ` [Xen-devel] " Roger Pau Monné
2019-05-08 13:12   ` [PATCH v2 09/12] x86/IRQ: make fixup_irqs() skip unconnected internally used interrupts Jan Beulich
2019-05-08 13:12     ` [Xen-devel] " Jan Beulich
2019-05-08 13:13   ` [PATCH v2 10/12] x86/IRQ: reduce unused space in struct arch_irq_desc Jan Beulich
2019-05-08 13:13     ` [Xen-devel] " Jan Beulich
2019-05-08 13:13   ` [PATCH v2 11/12] x86/IRQ: drop redundant cpumask_empty() from move_masked_irq() Jan Beulich
2019-05-08 13:13     ` [Xen-devel] " Jan Beulich
2019-05-08 13:14   ` [PATCH v2 12/12] x86/IRQ: simplify and rename pirq_acktype() Jan Beulich
2019-05-08 13:14     ` [Xen-devel] " Jan Beulich
2019-05-13 14:14     ` Roger Pau Monné
2019-05-13 14:14       ` [Xen-devel] " Roger Pau Monné
2019-05-17 10:39 ` [PATCH v3 00/15] x86: IRQ management adjustments Jan Beulich
2019-05-17 10:39   ` [Xen-devel] " Jan Beulich
2019-05-17 10:44   ` [PATCH v3 01/15] x86/IRQ: deal with move-in-progress state in fixup_irqs() Jan Beulich
2019-05-17 10:44     ` [Xen-devel] " Jan Beulich
2019-07-03 15:39     ` Andrew Cooper
2019-07-04  9:32       ` Jan Beulich
2019-05-17 10:45   ` [PATCH v3 02/15] x86/IRQ: deal with move cleanup count " Jan Beulich
2019-05-17 10:45     ` [Xen-devel] " Jan Beulich
2019-07-03 16:32     ` Andrew Cooper
2019-05-17 10:46   ` [PATCH v3 03/15] x86/IRQ: improve dump_irqs() Jan Beulich
2019-05-17 10:46     ` [Xen-devel] " Jan Beulich
2019-07-03 16:39     ` Andrew Cooper
2019-05-17 10:46   ` [PATCH v3 04/15] x86/IRQ: desc->affinity should strictly represent the requested value Jan Beulich
2019-05-17 10:46     ` [Xen-devel] " Jan Beulich
2019-07-03 17:58     ` Andrew Cooper
2019-07-04  9:37       ` Jan Beulich
2019-05-17 10:47   ` [PATCH v3 05/15] x86/IRQ: consolidate use of ->arch.cpu_mask Jan Beulich
2019-05-17 10:47     ` [Xen-devel] " Jan Beulich
2019-07-03 18:07     ` Andrew Cooper
2019-05-17 10:47   ` Jan Beulich [this message]
2019-05-17 10:47     ` [Xen-devel] [PATCH v3 06/15] x86/IRQ: fix locking around vector management Jan Beulich
2019-07-03 18:23     ` Andrew Cooper
2019-07-04  9:54       ` Jan Beulich
2019-05-17 10:48   ` [PATCH v3 07/15] x86/IRQ: target online CPUs when binding guest IRQ Jan Beulich
2019-05-17 10:48     ` [Xen-devel] " Jan Beulich
2019-05-20 11:40     ` Roger Pau Monné
2019-05-20 11:40       ` [Xen-devel] " Roger Pau Monné
2019-05-20 15:17       ` Jan Beulich
2019-05-20 15:17         ` [Xen-devel] " Jan Beulich
2019-05-22  9:41         ` Roger Pau Monné
2019-05-22  9:41           ` [Xen-devel] " Roger Pau Monné
2019-07-03 18:30     ` Andrew Cooper
2019-05-17 10:49   ` [PATCH v3 08/15] x86/IRQs: correct/tighten vector check in _clear_irq_vector() Jan Beulich
2019-05-17 10:49     ` [Xen-devel] " Jan Beulich
2019-07-03 18:31     ` Andrew Cooper
2019-05-17 10:49   ` [PATCH v3 09/15] x86/IRQ: make fixup_irqs() skip unconnected internally used interrupts Jan Beulich
2019-05-17 10:49     ` [Xen-devel] " Jan Beulich
2019-07-03 18:36     ` Andrew Cooper
2019-05-17 10:50   ` [PATCH v3 10/15] x86/IRQ: drop redundant cpumask_empty() from move_masked_irq() Jan Beulich
2019-05-17 10:50     ` [Xen-devel] " Jan Beulich
2019-07-03 18:38     ` Andrew Cooper
2019-05-17 10:51   ` [PATCH v3 11/15] x86/IRQ: simplify and rename pirq_acktype() Jan Beulich
2019-05-17 10:51     ` [Xen-devel] " Jan Beulich
2019-07-03 18:39     ` Andrew Cooper
2019-05-17 10:51   ` [PATCH v3 12/15] x86/IRQ: add explicit tracing-enabled check to trace_irq_mask() Jan Beulich
2019-05-17 10:51     ` [Xen-devel] " Jan Beulich
2019-05-20 11:46     ` Roger Pau Monné
2019-05-20 11:46       ` [Xen-devel] " Roger Pau Monné
2019-07-03 18:41     ` Andrew Cooper
2019-07-04 10:01       ` Jan Beulich
2019-05-17 10:52   ` [PATCH v3 13/15] x86/IRQ: tighten vector checks Jan Beulich
2019-05-17 10:52     ` [Xen-devel] " Jan Beulich
2019-05-20 14:04     ` Roger Pau Monné
2019-05-20 14:04       ` [Xen-devel] " Roger Pau Monné
2019-05-20 15:26       ` Jan Beulich
2019-05-20 15:26         ` [Xen-devel] " Jan Beulich
2019-05-22 16:42         ` Roger Pau Monné
2019-05-22 16:42           ` [Xen-devel] " Roger Pau Monné
2019-05-23  8:36           ` Jan Beulich
2019-05-23  8:36             ` [Xen-devel] " Jan Beulich
2019-07-03 18:42     ` Andrew Cooper
2019-05-17 10:52   ` [PATCH v3 14/15] x86/IRQ: eliminate some on-stack cpumask_t instances Jan Beulich
2019-05-17 10:52     ` [Xen-devel] " Jan Beulich
2019-05-20 14:22     ` Roger Pau Monné
2019-05-20 14:22       ` [Xen-devel] " Roger Pau Monné
2019-07-03 18:44       ` Andrew Cooper
2019-07-04 10:04         ` Jan Beulich
2019-05-17 10:53   ` [PATCH v3 15/15] x86/IRQ: move {,_}clear_irq_vector() Jan Beulich
2019-05-17 10:53     ` [Xen-devel] " Jan Beulich
2019-07-03 18:45     ` [Xen-devel] [PATCH v3 15/15] x86/IRQ: move {, _}clear_irq_vector() Andrew Cooper

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5CDE91530200007800230078@prv1-mh.provo.novell.com \
    --to=jbeulich@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=roger.pau@citrix.com \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.