All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3] arm: support fewer LR registers than virtual irqs
@ 2012-02-15 14:03 Stefano Stabellini
  2012-02-15 14:03 ` [PATCH] arm: replace list_del and INIT_LIST_HEAD with list_del_init Stefano Stabellini
  2012-02-17 12:46 ` [PATCH v3] arm: support fewer LR registers than virtual irqs Ian Campbell
  0 siblings, 2 replies; 8+ messages in thread
From: Stefano Stabellini @ 2012-02-15 14:03 UTC (permalink / raw)
  To: xen-devel; +Cc: Stefano Stabellini, Ian.Campbell, david.vrabel

If the vgic needs to inject a virtual irq into the guest, but no free
LR registers are available, add the irq to a list and return.
Whenever an LR register becomes available we add the queued irq to it
and remove it from the list.
We use the gic lock to protect the list and the bitmask.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
---
 xen/arch/arm/gic.c           |   61 ++++++++++++++++++++++++++++++++++++++---
 xen/include/asm-arm/domain.h |    1 +
 2 files changed, 57 insertions(+), 5 deletions(-)

diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index adc10bb..520a400 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -25,6 +25,7 @@
 #include <xen/sched.h>
 #include <xen/errno.h>
 #include <xen/softirq.h>
+#include <xen/list.h>
 #include <asm/p2m.h>
 #include <asm/domain.h>
 
@@ -45,6 +46,8 @@ static struct {
     unsigned int lines;
     unsigned int cpus;
     spinlock_t lock;
+    uint64_t lr_mask;
+    struct list_head lr_pending;
 } gic;
 
 irq_desc_t irq_desc[NR_IRQS];
@@ -247,6 +250,8 @@ static void __cpuinit gic_hyp_init(void)
 
     GICH[GICH_HCR] = GICH_HCR_EN;
     GICH[GICH_MISR] = GICH_MISR_EOI;
+    gic.lr_mask = 0ULL;
+    INIT_LIST_HEAD(&gic.lr_pending);
 }
 
 /* Set up the GIC */
@@ -345,16 +350,51 @@ int __init setup_irq(unsigned int irq, struct irqaction *new)
     return rc;
 }
 
-void gic_set_guest_irq(unsigned int virtual_irq,
+static inline void gic_set_lr(int lr, unsigned int virtual_irq,
         unsigned int state, unsigned int priority)
 {
-    BUG_ON(virtual_irq > nr_lrs);
-    GICH[GICH_LR + virtual_irq] = state |
+    BUG_ON(lr > nr_lrs);
+    GICH[GICH_LR + lr] = state |
         GICH_LR_MAINTENANCE_IRQ |
         ((priority >> 3) << GICH_LR_PRIORITY_SHIFT) |
         ((virtual_irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT);
 }
 
+void gic_set_guest_irq(unsigned int virtual_irq,
+        unsigned int state, unsigned int priority)
+{
+    int i;
+    struct pending_irq *iter, *n;
+
+    spin_lock(&gic.lock);
+
+    if ( list_empty(&gic.lr_pending) )
+    {
+        for (i = 0; i < nr_lrs; i++) {
+            if (!test_and_set_bit(i, &gic.lr_mask))
+            {
+                gic_set_lr(i, virtual_irq, state, priority);
+                spin_unlock(&gic.lock);
+                return;
+            }
+        }
+    }
+
+    n = irq_to_pending(current, virtual_irq);
+    list_for_each_entry ( iter, &gic.lr_pending, lr_link )
+    {
+        if ( iter->priority < priority )
+        {
+            list_add_tail(&n->lr_link, &iter->lr_link);
+            spin_unlock(&gic.lock);
+            return;
+        }
+    }
+    list_add(&n->lr_link, &gic.lr_pending);
+    spin_unlock(&gic.lock);
+    return;
+}
+
 void gic_inject_irq_start(void)
 {
     uint32_t hcr;
@@ -435,13 +475,25 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
     uint32_t lr;
     uint64_t eisr = GICH[GICH_EISR0] | (((uint64_t) GICH[GICH_EISR1]) << 32);
 
-    for ( i = 0; i < 64; i++ ) {
+    for ( i = 0; i < nr_lrs; i++ ) {
         if ( eisr & ((uint64_t)1 << i) ) {
             struct pending_irq *p;
 
+            spin_lock(&gic.lock);
             lr = GICH[GICH_LR + i];
             virq = lr & GICH_LR_VIRTUAL_MASK;
             GICH[GICH_LR + i] = 0;
+            clear_bit(i, &gic.lr_mask);
+
+            if ( !list_empty(gic.lr_pending.next) ) {
+                p = list_entry(gic.lr_pending.next, typeof(*p), lr_link);
+                gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority);
+                list_del_init(&p->lr_link);
+                set_bit(i, &gic.lr_mask);
+            } else {
+                gic_inject_irq_stop();
+            }
+            spin_unlock(&gic.lock);
 
             spin_lock(&current->arch.vgic.lock);
             p = irq_to_pending(current, virq);
@@ -449,7 +501,6 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
                 p->desc->status &= ~IRQ_INPROGRESS;
                 GICC[GICC_DIR] = virq;
             }
-            gic_inject_irq_stop();
             list_del(&p->link);
             INIT_LIST_HEAD(&p->link);
             cpu_raise_softirq(current->processor, VGIC_SOFTIRQ);
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 3372d14..75095ff 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -21,6 +21,7 @@ struct pending_irq
     struct irq_desc *desc; /* only set it the irq corresponds to a physical irq */
     uint8_t priority;
     struct list_head link;
+    struct list_head lr_link;
 };
 
 struct arch_domain
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH] arm: replace list_del and INIT_LIST_HEAD with list_del_init
  2012-02-15 14:03 [PATCH v3] arm: support fewer LR registers than virtual irqs Stefano Stabellini
@ 2012-02-15 14:03 ` Stefano Stabellini
  2012-02-17 12:50   ` Ian Campbell
  2012-02-17 12:46 ` [PATCH v3] arm: support fewer LR registers than virtual irqs Ian Campbell
  1 sibling, 1 reply; 8+ messages in thread
From: Stefano Stabellini @ 2012-02-15 14:03 UTC (permalink / raw)
  To: xen-devel; +Cc: Stefano Stabellini, Ian.Campbell, david.vrabel

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
---
 xen/arch/arm/gic.c |    3 +--
 1 files changed, 1 insertions(+), 2 deletions(-)

diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index 520a400..c34661b 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -501,8 +501,7 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
                 p->desc->status &= ~IRQ_INPROGRESS;
                 GICC[GICC_DIR] = virq;
             }
-            list_del(&p->link);
-            INIT_LIST_HEAD(&p->link);
+            list_del_init(&p->link);
             cpu_raise_softirq(current->processor, VGIC_SOFTIRQ);
             spin_unlock(&current->arch.vgic.lock);
         }
-- 
1.7.2.5

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH v3] arm: support fewer LR registers than virtual irqs
  2012-02-15 14:03 [PATCH v3] arm: support fewer LR registers than virtual irqs Stefano Stabellini
  2012-02-15 14:03 ` [PATCH] arm: replace list_del and INIT_LIST_HEAD with list_del_init Stefano Stabellini
@ 2012-02-17 12:46 ` Ian Campbell
  2012-02-23 15:45   ` Stefano Stabellini
  1 sibling, 1 reply; 8+ messages in thread
From: Ian Campbell @ 2012-02-17 12:46 UTC (permalink / raw)
  To: Stefano Stabellini; +Cc: xen-devel, David Vrabel

On Wed, 2012-02-15 at 14:03 +0000, Stefano Stabellini wrote:
> If the vgic needs to inject a virtual irq into the guest, but no free
> LR registers are available, add the irq to a list and return.
> Whenever an LR register becomes available we add the queued irq to it
> and remove it from the list.
> We use the gic lock to protect the list and the bitmask.
> 
> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> ---
>  xen/arch/arm/gic.c           |   61 ++++++++++++++++++++++++++++++++++++++---
>  xen/include/asm-arm/domain.h |    1 +
>  2 files changed, 57 insertions(+), 5 deletions(-)
> 
> diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
> index adc10bb..520a400 100644
> --- a/xen/arch/arm/gic.c
> +++ b/xen/arch/arm/gic.c
> @@ -25,6 +25,7 @@
>  #include <xen/sched.h>
>  #include <xen/errno.h>
>  #include <xen/softirq.h>
> +#include <xen/list.h>
>  #include <asm/p2m.h>
>  #include <asm/domain.h>
>  
> @@ -45,6 +46,8 @@ static struct {
>      unsigned int lines;
>      unsigned int cpus;
>      spinlock_t lock;
> +    uint64_t lr_mask;

Is there somewhere in the code which clamps the number of LRs reported
by the h/w to 64? (and warns appropriately)

This is a mask of in-use LRs, right?

> +    struct list_head lr_pending;
>  } gic;
>  
>  irq_desc_t irq_desc[NR_IRQS];
> @@ -247,6 +250,8 @@ static void __cpuinit gic_hyp_init(void)
>  
>      GICH[GICH_HCR] = GICH_HCR_EN;
>      GICH[GICH_MISR] = GICH_MISR_EOI;
> +    gic.lr_mask = 0ULL;
> +    INIT_LIST_HEAD(&gic.lr_pending);
>  }
>  
>  /* Set up the GIC */
> @@ -345,16 +350,51 @@ int __init setup_irq(unsigned int irq, struct irqaction *new)
>      return rc;
>  }
>  
> -void gic_set_guest_irq(unsigned int virtual_irq,
> +static inline void gic_set_lr(int lr, unsigned int virtual_irq,
>          unsigned int state, unsigned int priority)
>  {
> -    BUG_ON(virtual_irq > nr_lrs);
> -    GICH[GICH_LR + virtual_irq] = state |
> +    BUG_ON(lr > nr_lrs);
> +    GICH[GICH_LR + lr] = state |
>          GICH_LR_MAINTENANCE_IRQ |
>          ((priority >> 3) << GICH_LR_PRIORITY_SHIFT) |
>          ((virtual_irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT);
>  }
>  
> +void gic_set_guest_irq(unsigned int virtual_irq,
> +        unsigned int state, unsigned int priority)
> +{
> +    int i;
> +    struct pending_irq *iter, *n;
> +
> +    spin_lock(&gic.lock);
> +
> +    if ( list_empty(&gic.lr_pending) )
> +    {
> +        for (i = 0; i < nr_lrs; i++) {

One of the bitops.h functions should be usable avoid this loop. We don't
seem to have a find-and-set type operation so you still need the lock.

Do we have any per-LR state which we keep? If we did then it be worth
chaining them into a free list, which you could cheaply enqueue and
dequeue entries from.

> +            if (!test_and_set_bit(i, &gic.lr_mask))

test_and_set_bit is atomic which is unnecessary since you are also
protecting this field with a lock, or if you use the find_first_zero
idea you could just use __set_bit.

> +            {
> +                gic_set_lr(i, virtual_irq, state, priority);
> +                spin_unlock(&gic.lock);
> +                return;
> +            }
> +        }
> +    }
> +
> +    n = irq_to_pending(current, virtual_irq);
> +    list_for_each_entry ( iter, &gic.lr_pending, lr_link )
> +    {
> +        if ( iter->priority < priority )
> +        {
> +            list_add_tail(&n->lr_link, &iter->lr_link);
> +            spin_unlock(&gic.lock);
> +            return;
> +        }
> +    }
> +    list_add(&n->lr_link, &gic.lr_pending);
> +    spin_unlock(&gic.lock);

I'm not sure I follow the logic here -- it seems that only one IRQ will
ever be pending in the LRs at a time, but if we've got 4 LRs wouldn't we
want to have 4 active at once and only trap every 4 Acks?

If there's only going to be one active LR then we can jut always use LR0
and do away with the bitmap for the time being.

Are interrupts only on lr_pending if they are not active in an LR? Are
pending irqs which are in an lr kept in a list somewhere else?

Also it would be nice to have a single exit and unlock path.

> +    return;
> +}
> +
>  void gic_inject_irq_start(void)
>  {
>      uint32_t hcr;
> @@ -435,13 +475,25 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
>      uint32_t lr;
>      uint64_t eisr = GICH[GICH_EISR0] | (((uint64_t) GICH[GICH_EISR1]) << 32);
>  
> -    for ( i = 0; i < 64; i++ ) {
> +    for ( i = 0; i < nr_lrs; i++ ) {
>          if ( eisr & ((uint64_t)1 << i) ) {

This loop and test could also use a call to whichever bitops.h thing is
appropriate.

Maybe doesn't matter for loops of the order of 64 iterations but would
be a nice cleanup to make.

>              struct pending_irq *p;
>  
> +            spin_lock(&gic.lock);
>              lr = GICH[GICH_LR + i];
>              virq = lr & GICH_LR_VIRTUAL_MASK;
>              GICH[GICH_LR + i] = 0;
> +            clear_bit(i, &gic.lr_mask);
> +
> +            if ( !list_empty(gic.lr_pending.next) ) {
> +                p = list_entry(gic.lr_pending.next, typeof(*p), lr_link);
> +                gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority);
> +                list_del_init(&p->lr_link);
> +                set_bit(i, &gic.lr_mask);
> +            } else {
> +                gic_inject_irq_stop();
> +            }
> +            spin_unlock(&gic.lock);
>  
>              spin_lock(&current->arch.vgic.lock);
>              p = irq_to_pending(current, virq);
> @@ -449,7 +501,6 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
>                  p->desc->status &= ~IRQ_INPROGRESS;
>                  GICC[GICC_DIR] = virq;
>              }
> -            gic_inject_irq_stop();
>              list_del(&p->link);
>              INIT_LIST_HEAD(&p->link);
>              cpu_raise_softirq(current->processor, VGIC_SOFTIRQ);
> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index 3372d14..75095ff 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -21,6 +21,7 @@ struct pending_irq
>      struct irq_desc *desc; /* only set it the irq corresponds to a physical irq */
>      uint8_t priority;
>      struct list_head link;
> +    struct list_head lr_link;
>  };
>  
>  struct arch_domain

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] arm: replace list_del and INIT_LIST_HEAD with list_del_init
  2012-02-15 14:03 ` [PATCH] arm: replace list_del and INIT_LIST_HEAD with list_del_init Stefano Stabellini
@ 2012-02-17 12:50   ` Ian Campbell
  2012-02-23 14:55     ` Stefano Stabellini
  0 siblings, 1 reply; 8+ messages in thread
From: Ian Campbell @ 2012-02-17 12:50 UTC (permalink / raw)
  To: Stefano Stabellini; +Cc: xen-devel, David Vrabel

On Wed, 2012-02-15 at 14:03 +0000, Stefano Stabellini wrote:
> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>

Is this safe to take without the other vgic patch?

> ---
>  xen/arch/arm/gic.c |    3 +--
>  1 files changed, 1 insertions(+), 2 deletions(-)
> 
> diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
> index 520a400..c34661b 100644
> --- a/xen/arch/arm/gic.c
> +++ b/xen/arch/arm/gic.c
> @@ -501,8 +501,7 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
>                  p->desc->status &= ~IRQ_INPROGRESS;
>                  GICC[GICC_DIR] = virq;
>              }
> -            list_del(&p->link);
> -            INIT_LIST_HEAD(&p->link);
> +            list_del_init(&p->link);

David said:
        
        I don't think you need the INIT_LIST_HEAD() here (and even if
        you did you should use list_del_init()).  You only need to init
        nodes if you need to test if they are in a list or not.
        
But I'm not seeing where we test if a node is in a list or not, have I
missed it?

>              cpu_raise_softirq(current->processor, VGIC_SOFTIRQ);
>              spin_unlock(&current->arch.vgic.lock);
>          }

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] arm: replace list_del and INIT_LIST_HEAD with list_del_init
  2012-02-17 12:50   ` Ian Campbell
@ 2012-02-23 14:55     ` Stefano Stabellini
  0 siblings, 0 replies; 8+ messages in thread
From: Stefano Stabellini @ 2012-02-23 14:55 UTC (permalink / raw)
  To: Ian Campbell; +Cc: xen-devel, David Vrabel, Stefano Stabellini

On Fri, 17 Feb 2012, Ian Campbell wrote:
> On Wed, 2012-02-15 at 14:03 +0000, Stefano Stabellini wrote:
> > Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> 
> Is this safe to take without the other vgic patch?

yep


> > ---
> >  xen/arch/arm/gic.c |    3 +--
> >  1 files changed, 1 insertions(+), 2 deletions(-)
> > 
> > diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
> > index 520a400..c34661b 100644
> > --- a/xen/arch/arm/gic.c
> > +++ b/xen/arch/arm/gic.c
> > @@ -501,8 +501,7 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
> >                  p->desc->status &= ~IRQ_INPROGRESS;
> >                  GICC[GICC_DIR] = virq;
> >              }
> > -            list_del(&p->link);
> > -            INIT_LIST_HEAD(&p->link);
> > +            list_del_init(&p->link);
> 
> David said:
>         
>         I don't think you need the INIT_LIST_HEAD() here (and even if
>         you did you should use list_del_init()).  You only need to init
>         nodes if you need to test if they are in a list or not.
>         
> But I'm not seeing where we test if a node is in a list or not, have I
> missed it?

the test is in xen/arch/arm/vgic.c:vgic_vcpu_inject_irq

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v3] arm: support fewer LR registers than virtual irqs
  2012-02-17 12:46 ` [PATCH v3] arm: support fewer LR registers than virtual irqs Ian Campbell
@ 2012-02-23 15:45   ` Stefano Stabellini
  2012-02-27 17:22     ` Ian Campbell
  0 siblings, 1 reply; 8+ messages in thread
From: Stefano Stabellini @ 2012-02-23 15:45 UTC (permalink / raw)
  To: Ian Campbell; +Cc: xen-devel, David Vrabel, Stefano Stabellini

On Fri, 17 Feb 2012, Ian Campbell wrote:
> On Wed, 2012-02-15 at 14:03 +0000, Stefano Stabellini wrote:
> > If the vgic needs to inject a virtual irq into the guest, but no free
> > LR registers are available, add the irq to a list and return.
> > Whenever an LR register becomes available we add the queued irq to it
> > and remove it from the list.
> > We use the gic lock to protect the list and the bitmask.
> > 
> > Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> > ---
> >  xen/arch/arm/gic.c           |   61 ++++++++++++++++++++++++++++++++++++++---
> >  xen/include/asm-arm/domain.h |    1 +
> >  2 files changed, 57 insertions(+), 5 deletions(-)
> > 
> > diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
> > index adc10bb..520a400 100644
> > --- a/xen/arch/arm/gic.c
> > +++ b/xen/arch/arm/gic.c
> > @@ -25,6 +25,7 @@
> >  #include <xen/sched.h>
> >  #include <xen/errno.h>
> >  #include <xen/softirq.h>
> > +#include <xen/list.h>
> >  #include <asm/p2m.h>
> >  #include <asm/domain.h>
> >  
> > @@ -45,6 +46,8 @@ static struct {
> >      unsigned int lines;
> >      unsigned int cpus;
> >      spinlock_t lock;
> > +    uint64_t lr_mask;
> 
> Is there somewhere in the code which clamps the number of LRs reported
> by the h/w to 64? (and warns appropriately)

The hardware spec :)
64 is the maximum number of LR registers.


> This is a mask of in-use LRs, right?

Yes


> > +    struct list_head lr_pending;
> >  } gic;
> >  
> >  irq_desc_t irq_desc[NR_IRQS];
> > @@ -247,6 +250,8 @@ static void __cpuinit gic_hyp_init(void)
> >  
> >      GICH[GICH_HCR] = GICH_HCR_EN;
> >      GICH[GICH_MISR] = GICH_MISR_EOI;
> > +    gic.lr_mask = 0ULL;
> > +    INIT_LIST_HEAD(&gic.lr_pending);
> >  }
> >  
> >  /* Set up the GIC */
> > @@ -345,16 +350,51 @@ int __init setup_irq(unsigned int irq, struct irqaction *new)
> >      return rc;
> >  }
> >  
> > -void gic_set_guest_irq(unsigned int virtual_irq,
> > +static inline void gic_set_lr(int lr, unsigned int virtual_irq,
> >          unsigned int state, unsigned int priority)
> >  {
> > -    BUG_ON(virtual_irq > nr_lrs);
> > -    GICH[GICH_LR + virtual_irq] = state |
> > +    BUG_ON(lr > nr_lrs);
> > +    GICH[GICH_LR + lr] = state |
> >          GICH_LR_MAINTENANCE_IRQ |
> >          ((priority >> 3) << GICH_LR_PRIORITY_SHIFT) |
> >          ((virtual_irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT);
> >  }
> >  
> > +void gic_set_guest_irq(unsigned int virtual_irq,
> > +        unsigned int state, unsigned int priority)
> > +{
> > +    int i;
> > +    struct pending_irq *iter, *n;
> > +
> > +    spin_lock(&gic.lock);
> > +
> > +    if ( list_empty(&gic.lr_pending) )
> > +    {
> > +        for (i = 0; i < nr_lrs; i++) {
> 
> One of the bitops.h functions should be usable avoid this loop. We don't
> seem to have a find-and-set type operation so you still need the lock.
> 
> Do we have any per-LR state which we keep? If we did then it be worth
> chaining them into a free list, which you could cheaply enqueue and
> dequeue entries from.
> 
> > +            if (!test_and_set_bit(i, &gic.lr_mask))
> 
> test_and_set_bit is atomic which is unnecessary since you are also
> protecting this field with a lock, or if you use the find_first_zero
> idea you could just use __set_bit.

good idea


> > +            {
> > +                gic_set_lr(i, virtual_irq, state, priority);
> > +                spin_unlock(&gic.lock);
> > +                return;
> > +            }
> > +        }
> > +    }
> > +
> > +    n = irq_to_pending(current, virtual_irq);
> > +    list_for_each_entry ( iter, &gic.lr_pending, lr_link )
> > +    {
> > +        if ( iter->priority < priority )
> > +        {
> > +            list_add_tail(&n->lr_link, &iter->lr_link);
> > +            spin_unlock(&gic.lock);
> > +            return;
> > +        }
> > +    }
> > +    list_add(&n->lr_link, &gic.lr_pending);
> > +    spin_unlock(&gic.lock);
> 
> I'm not sure I follow the logic here -- it seems that only one IRQ will
> ever be pending in the LRs at a time, but if we've got 4 LRs wouldn't we
> want to have 4 active at once and only trap every 4 Acks?
> 
> If there's only going to be one active LR then we can jut always use LR0
> and do away with the bitmap for the time being.

We have two lists: one list, inflight_irqs, is kept by the vgic to keep
track of which irqs the vgic has injected and have not been eoi'd by the
guest yet.

The second list, gic.lr_pending, is a list of irqs that from the vgic
POV have been already injected (they have been added to
inflight_irqs already) but because of hardware limitations
(limited availability of LR registers) the gic hasn't been able to
inject them yet.

Here we are going through the second list, gic.lr_pending, that is
ordered by priority, and we are inserting this last guest irq in the
right spot so that as soon as an LR register is available we can
actually inject it into the guest.

The vgic is not actually aware that the gic hasn't managed to inject the
irq yet.



> Are interrupts only on lr_pending if they are not active in an LR?

Yes


> Are
> pending irqs which are in an lr kept in a list somewhere else?

They are in inflight_irqs until eoi'd by the guest.


> Also it would be nice to have a single exit and unlock path.

OK


> > +    return;
> > +}
> > +
> >  void gic_inject_irq_start(void)
> >  {
> >      uint32_t hcr;
> > @@ -435,13 +475,25 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
> >      uint32_t lr;
> >      uint64_t eisr = GICH[GICH_EISR0] | (((uint64_t) GICH[GICH_EISR1]) << 32);
> >  
> > -    for ( i = 0; i < 64; i++ ) {
> > +    for ( i = 0; i < nr_lrs; i++ ) {
> >          if ( eisr & ((uint64_t)1 << i) ) {
> 
> This loop and test could also use a call to whichever bitops.h thing is
> appropriate.
> 
> Maybe doesn't matter for loops of the order of 64 iterations but would
> be a nice cleanup to make.

OK.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v3] arm: support fewer LR registers than virtual irqs
  2012-02-23 15:45   ` Stefano Stabellini
@ 2012-02-27 17:22     ` Ian Campbell
  2012-02-29 17:18       ` Stefano Stabellini
  0 siblings, 1 reply; 8+ messages in thread
From: Ian Campbell @ 2012-02-27 17:22 UTC (permalink / raw)
  To: Stefano Stabellini; +Cc: xen-devel, David Vrabel

On Thu, 2012-02-23 at 15:45 +0000, Stefano Stabellini wrote:
> On Fri, 17 Feb 2012, Ian Campbell wrote:
> > If there's only going to be one active LR then we can jut always use LR0
> > and do away with the bitmap for the time being.
> 
> We have two lists: one list, inflight_irqs, is kept by the vgic to keep
> track of which irqs the vgic has injected and have not been eoi'd by the
> guest yet.

This means "currently live in an LR" ?

> The second list, gic.lr_pending, is a list of irqs that from the vgic
> POV have been already injected (they have been added to
> inflight_irqs already) but because of hardware limitations
> (limited availability of LR registers) the gic hasn't been able to
> inject them yet.

This means "would be in an LR if there was space" ?

Is lr_pending list a superset of the inflight_irqs ones? Or are they
always disjoint? If they are disjoint then doesn't a single list next
pointer in struct pending_irq suffice?

It would be really nice to have a comment somewhere which describes the
purpose of these lists and what the invariants are for an entry on them.

> Here we are going through the second list, gic.lr_pending, that is
> ordered by priority, and we are inserting this last guest irq in the
> right spot so that as soon as an LR register is available we can
> actually inject it into the guest.
> 
> The vgic is not actually aware that the gic hasn't managed to inject the
> irq yet.

I was just looking at applying v4 and it has:

> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index 3372d14..75095ff 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -21,6 +21,7 @@ struct pending_irq
>      struct irq_desc *desc; /* only set it the irq corresponds to a physical irq */
>      uint8_t priority;
>      struct list_head link;
> +    struct list_head lr_link;

I think two list fields named link and lr_link need something to
describe and/or distinguish them, their purpose etc.

The use of the name "pending" as the field in the struct is a little
confusing, because there are multiple ways in which an interrupt can be
pending, both in and out of an LR.

Ian.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v3] arm: support fewer LR registers than virtual irqs
  2012-02-27 17:22     ` Ian Campbell
@ 2012-02-29 17:18       ` Stefano Stabellini
  0 siblings, 0 replies; 8+ messages in thread
From: Stefano Stabellini @ 2012-02-29 17:18 UTC (permalink / raw)
  To: Ian Campbell; +Cc: xen-devel, David Vrabel, Stefano Stabellini

On Mon, 27 Feb 2012, Ian Campbell wrote:
> On Thu, 2012-02-23 at 15:45 +0000, Stefano Stabellini wrote:
> > On Fri, 17 Feb 2012, Ian Campbell wrote:
> > > If there's only going to be one active LR then we can jut always use LR0
> > > and do away with the bitmap for the time being.
> > 
> > We have two lists: one list, inflight_irqs, is kept by the vgic to keep
> > track of which irqs the vgic has injected and have not been eoi'd by the
> > guest yet.
> 
> This means "currently live in an LR" ?

nope, that means "I want to be injected in the guest"


> > The second list, gic.lr_pending, is a list of irqs that from the vgic
> > POV have been already injected (they have been added to
> > inflight_irqs already) but because of hardware limitations
> > (limited availability of LR registers) the gic hasn't been able to
> > inject them yet.
> 
> This means "would be in an LR if there was space" ?

yes


> Is lr_pending list a superset of the inflight_irqs ones? Or are they
> always disjoint? If they are disjoint then doesn't a single list next
> pointer in struct pending_irq suffice?

inflight_irqs is a superset of lr_pending


> It would be really nice to have a comment somewhere which describes the
> purpose of these lists and what the invariants are for an entry on them.

yeah


> > Here we are going through the second list, gic.lr_pending, that is
> > ordered by priority, and we are inserting this last guest irq in the
> > right spot so that as soon as an LR register is available we can
> > actually inject it into the guest.
> > 
> > The vgic is not actually aware that the gic hasn't managed to inject the
> > irq yet.
> 
> I was just looking at applying v4 and it has:
> 
> > diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> > index 3372d14..75095ff 100644
> > --- a/xen/include/asm-arm/domain.h
> > +++ b/xen/include/asm-arm/domain.h
> > @@ -21,6 +21,7 @@ struct pending_irq
> >      struct irq_desc *desc; /* only set it the irq corresponds to a physical irq */
> >      uint8_t priority;
> >      struct list_head link;
> > +    struct list_head lr_link;
> 
> I think two list fields named link and lr_link need something to
> describe and/or distinguish them, their purpose etc.
> 
> The use of the name "pending" as the field in the struct is a little
> confusing, because there are multiple ways in which an interrupt can be
> pending, both in and out of an LR.
 
yes, they deserve at least a comment

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2012-02-29 17:18 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-02-15 14:03 [PATCH v3] arm: support fewer LR registers than virtual irqs Stefano Stabellini
2012-02-15 14:03 ` [PATCH] arm: replace list_del and INIT_LIST_HEAD with list_del_init Stefano Stabellini
2012-02-17 12:50   ` Ian Campbell
2012-02-23 14:55     ` Stefano Stabellini
2012-02-17 12:46 ` [PATCH v3] arm: support fewer LR registers than virtual irqs Ian Campbell
2012-02-23 15:45   ` Stefano Stabellini
2012-02-27 17:22     ` Ian Campbell
2012-02-29 17:18       ` Stefano Stabellini

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.