From mboxrd@z Thu Jan 1 00:00:00 1970 From: vijay.kilari@gmail.com Subject: [PATCH v2 05/15] xen/arm: segregate GIC low level functionality Date: Fri, 4 Apr 2014 17:26:23 +0530 Message-ID: <1396612593-443-6-git-send-email-vijay.kilari@gmail.com> References: <1396612593-443-1-git-send-email-vijay.kilari@gmail.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <1396612593-443-1-git-send-email-vijay.kilari@gmail.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: Ian.Campbell@citrix.com, julien.grall@linaro.org, stefano.stabellini@eu.citrix.com, stefano.stabellini@citrix.com, xen-devel@lists.xen.org Cc: Prasun.Kapoor@caviumnetworks.com, Vijaya Kumar K , vijay.kilari@gmail.com List-Id: xen-devel@lists.xenproject.org From: Vijaya Kumar K GIC low level functionality is segregated into separate functions and are called using registered callback wherever required. This helps to separate generic and hardware functionality later Signed-off-by: Vijaya Kumar K --- xen/arch/arm/gic.c | 362 ++++++++++++++++++++++++++++--------- xen/include/asm-arm/gic.h | 50 +++++ xen/include/asm-arm/gic_v2_defs.h | 16 +- 3 files changed, 328 insertions(+), 100 deletions(-) diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c index 64699e4..9f03135 100644 --- a/xen/arch/arm/gic.c +++ b/xen/arch/arm/gic.c @@ -57,8 +57,21 @@ static irq_desc_t irq_desc[NR_IRQS]; static DEFINE_PER_CPU(irq_desc_t[NR_LOCAL_IRQS], local_irq_desc); static DEFINE_PER_CPU(uint64_t, lr_mask); +static struct gic_hw_operations *gic_hw_ops; +static struct gic_hw_operations gic_ops; + +void register_gic_ops(struct gic_hw_operations *ops) +{ + gic_hw_ops = ops; +} + +void update_cpu_lr_mask(void) +{ + this_cpu(lr_mask) = 0ULL; +} + static uint8_t nr_lrs; -#define lr_all_full() (this_cpu(lr_mask) == ((1 << nr_lrs) - 1)) +#define lr_all_full() (this_cpu(lr_mask) == ((1 << gic_hw_ops->nr_lrs()) - 1)) /* The GIC mapping of CPU interfaces does not necessarily match the * logical CPU numbering. Let's use mapping as returned by the GIC @@ -89,48 +102,124 @@ static unsigned int gic_cpu_mask(const cpumask_t *cpumask) unsigned int gic_number_lines(void) { + return gic_hw_ops->nr_lines(); +} + +static unsigned int gic_nr_lines(void) +{ return gic.lines; } -irq_desc_t *__irq_to_desc(int irq) +static unsigned int gic_nr_lrs(void) { - if (irq < NR_LOCAL_IRQS) return &this_cpu(local_irq_desc)[irq]; - return &irq_desc[irq-NR_LOCAL_IRQS]; + return nr_lrs; } -void gic_save_state(struct vcpu *v) +static int gic_state_init(struct vcpu *v) +{ + v->arch.gic_state = xzalloc(struct gic_state_data); + if(!v->arch.gic_state) + return -ENOMEM; + return 0; +} + +static void save_state(struct vcpu *v) { int i; - ASSERT(!local_irq_is_enabled()); /* No need for spinlocks here because interrupts are disabled around * this call and it only accesses struct vcpu fields that cannot be * accessed simultaneously by another pCPU. */ - for ( i = 0; i < nr_lrs; i++ ) + for ( i = 0; i < nr_lrs; i++) v->arch.gic_state->gic_lr[i] = GICH[GICH_LR + i]; - v->arch.lr_mask = this_cpu(lr_mask); v->arch.gic_state->gic_apr = GICH[GICH_APR]; v->arch.gic_state->gic_vmcr = GICH[GICH_VMCR]; /* Disable until next VCPU scheduled */ GICH[GICH_HCR] = 0; +} + +static void restore_state(struct vcpu *v) +{ + int i; + + for ( i = 0; i < nr_lrs; i++ ) + GICH[GICH_LR + i] = v->arch.gic_state->gic_lr[i]; + GICH[GICH_APR] = v->arch.gic_state->gic_apr; + GICH[GICH_VMCR] = v->arch.gic_state->gic_vmcr; + GICH[GICH_HCR] = GICH_HCR_EN; +} + +static void gic_dump_state(struct vcpu *v) +{ + int i; + if ( v == current ) + { + for ( i = 0; i < nr_lrs; i++ ) + printk(" HW_LR[%d]=%x\n", i, GICH[GICH_LR + i]); + } else { + for ( i = 0; i < nr_lrs; i++ ) + printk(" VCPU_LR[%d]=%x\n", i, v->arch.gic_state->gic_lr[i]); + } +} + +static void gic_enable_irq(int irq) +{ + /* Enable routing */ + GICD[GICD_ISENABLER + irq / 32] = (1u << (irq % 32)); +} + +static void gic_disable_irq(int irq) +{ + /* Disable routing */ + GICD[GICD_ICENABLER + irq / 32] = (1u << (irq % 32)); +} + +static void gic_eoi_irq(int irq) +{ + /* Lower the priority */ + GICC[GICC_EOIR] = irq; +} + +static void gic_dir_irq(int irq) +{ + /* Deactivate */ + GICC[GICC_DIR] = irq; +} + +static unsigned int gic_ack_irq(void) +{ + return (GICC[GICC_IAR] & GICC_IA_IRQ); +} + +irq_desc_t *__irq_to_desc(int irq) +{ + if (irq < NR_LOCAL_IRQS) return &this_cpu(local_irq_desc)[irq]; + return &irq_desc[irq-NR_LOCAL_IRQS]; +} + +void gic_save_state(struct vcpu *v) +{ + ASSERT(!local_irq_is_enabled()); + + /* No need for spinlocks here because interrupts are disabled around + * this call and it only accesses struct vcpu fields that cannot be + * accessed simultaneously by another pCPU. + */ + v->arch.lr_mask = this_cpu(lr_mask); + gic_hw_ops->save_state(v); isb(); } void gic_restore_state(struct vcpu *v) { - int i; ASSERT(!local_irq_is_enabled()); if ( is_idle_vcpu(v) ) return; this_cpu(lr_mask) = v->arch.lr_mask; - for ( i = 0; i < nr_lrs; i++ ) - GICH[GICH_LR + i] = v->arch.gic_state->gic_lr[i]; - GICH[GICH_APR] = v->arch.gic_state->gic_apr; - GICH[GICH_VMCR] = v->arch.gic_state->gic_vmcr; - GICH[GICH_HCR] = GICH_HCR_EN; + gic_hw_ops->restore_state(v); isb(); gic_restore_pending_irqs(v); @@ -146,7 +235,7 @@ static void gic_irq_enable(struct irq_desc *desc) desc->status &= ~IRQ_DISABLED; dsb(sy); /* Enable routing */ - GICD[GICD_ISENABLER + irq / 32] = (1u << (irq % 32)); + gic_hw_ops->enable_irq(irq); spin_unlock(&gic.lock); spin_unlock_irqrestore(&desc->lock, flags); } @@ -159,7 +248,7 @@ static void gic_irq_disable(struct irq_desc *desc) spin_lock_irqsave(&desc->lock, flags); spin_lock(&gic.lock); /* Disable routing */ - GICD[GICD_ICENABLER + irq / 32] = (1u << (irq % 32)); + gic_hw_ops->disable_irq(irq); desc->status |= IRQ_DISABLED; spin_unlock(&gic.lock); spin_unlock_irqrestore(&desc->lock, flags); @@ -185,16 +274,16 @@ static void gic_host_irq_end(struct irq_desc *desc) { int irq = desc->irq; /* Lower the priority */ - GICC[GICC_EOIR] = irq; + gic_hw_ops->eoi_irq(irq); /* Deactivate */ - GICC[GICC_DIR] = irq; + gic_hw_ops->deactivate_irq(irq); } static void gic_guest_irq_end(struct irq_desc *desc) { int irq = desc->irq; /* Lower the priority of the IRQ */ - GICC[GICC_EOIR] = irq; + gic_hw_ops->eoi_irq(irq); /* Deactivation happens in maintenance interrupt / via GICV */ } @@ -230,7 +319,7 @@ static hw_irq_controller gic_guest_irq_type = { * - needs to be called with a valid cpu_mask, ie each cpu in the mask has * already called gic_cpu_init */ -static void gic_set_irq_properties(unsigned int irq, bool_t level, +static void gic_set_irq_property(unsigned int irq, bool_t level, const cpumask_t *cpu_mask, unsigned int priority) { @@ -257,6 +346,13 @@ static void gic_set_irq_properties(unsigned int irq, bool_t level, } +static void gic_set_irq_properties(unsigned int irq, bool_t level, + const cpumask_t *cpu_mask, + unsigned int priority) +{ + return gic_hw_ops->set_irq_property(irq, level, cpu_mask, priority); +} + /* Program the GIC to route an interrupt */ static int gic_route_irq(unsigned int irq, bool_t level, const cpumask_t *cpu_mask, unsigned int priority) @@ -377,7 +473,7 @@ static void __cpuinit gic_hyp_init(void) nr_lrs = (vtr & GICH_VTR_NRLRGS) + 1; GICH[GICH_MISR] = GICH_MISR_EOI; - this_cpu(lr_mask) = 0ULL; + update_cpu_lr_mask(); } static void __cpuinit gic_hyp_disable(void) @@ -478,10 +574,22 @@ void __init gic_init(void) gic_cpu_init(); gic_hyp_init(); + register_gic_ops(&gic_ops); spin_unlock(&gic.lock); } -void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi) +static void gic_secondary_cpu_init(void) +{ + gic_cpu_init(); + gic_hyp_init(); +} + +static struct dt_irq * gic_maintenance_irq(void) +{ + return &gic.maintenance; +} + +static void gic_send_sgi(const cpumask_t *cpumask, enum gic_sgi sgi) { unsigned int mask = 0; cpumask_t online_mask; @@ -498,30 +606,26 @@ void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi) | sgi; } -void send_SGI_one(unsigned int cpu, enum gic_sgi sgi) +void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi) { - ASSERT(cpu < NR_GIC_CPU_IF); /* Targets bitmap only supports 8 CPUs */ - send_SGI_mask(cpumask_of(cpu), sgi); + gic_hw_ops->send_sgi(cpumask, sgi); } -void send_SGI_self(enum gic_sgi sgi) +void send_SGI_one(unsigned int cpu, enum gic_sgi sgi) { - ASSERT(sgi < 16); /* There are only 16 SGIs */ - - dsb(sy); - - GICD[GICD_SGIR] = GICD_SGI_TARGET_SELF - | sgi; + ASSERT(cpu < NR_GIC_CPU_IF); /* Targets bitmap only supports 8 CPUs */ + send_SGI_mask(cpumask_of(cpu), sgi); } void send_SGI_allbutself(enum gic_sgi sgi) { - ASSERT(sgi < 16); /* There are only 16 SGIs */ + cpumask_t all_others_mask; + ASSERT(sgi < 16); /* There are only 16 SGIs */ - dsb(sy); + cpumask_andnot(&all_others_mask, &cpu_possible_map, cpumask_of(smp_processor_id())); - GICD[GICD_SGIR] = GICD_SGI_TARGET_OTHERS - | sgi; + dsb(sy); + send_SGI_mask(&all_others_mask, sgi); } void smp_send_state_dump(unsigned int cpu) @@ -533,26 +637,30 @@ void smp_send_state_dump(unsigned int cpu) void __cpuinit gic_init_secondary_cpu(void) { spin_lock(&gic.lock); - gic_cpu_init(); - gic_hyp_init(); + gic_hw_ops->secondary_init(); spin_unlock(&gic.lock); } /* Shut down the per-CPU GIC interface */ +static void gic_disable_interface(void) +{ + gic_cpu_disable(); + gic_hyp_disable(); +} + void gic_disable_cpu(void) { ASSERT(!local_irq_is_enabled()); spin_lock(&gic.lock); - gic_cpu_disable(); - gic_hyp_disable(); + gic_hw_ops->disable_interface(); spin_unlock(&gic.lock); } void gic_route_ppis(void) { /* GIC maintenance */ - gic_route_dt_irq(&gic.maintenance, cpumask_of(smp_processor_id()), + gic_route_dt_irq(gic_hw_ops->get_maintenance_irq(), cpumask_of(smp_processor_id()), GIC_PRI_IRQ); /* Route timer interrupt */ route_timer_interrupt(); @@ -627,23 +735,31 @@ int __init setup_dt_irq(const struct dt_irq *irq, struct irqaction *new) return rc; } -static inline void gic_set_lr(int lr, struct pending_irq *p, - unsigned int state) +static void gic_update_lr(int lr, struct pending_irq *p, unsigned int state) { - uint32_t lr_reg; + int maintenance_int = GICH_LR_MAINTENANCE_IRQ; - ASSERT(!local_irq_is_enabled()); BUG_ON(lr >= nr_lrs); BUG_ON(lr < 0); BUG_ON(state & ~(GICH_LR_STATE_MASK<priority >> 3) << GICH_LR_PRIORITY_SHIFT) | + GICH[GICH_LR + lr] = ((state & 0x3) << GICH_LR_STATE_SHIFT) | + maintenance_int | + ((p->priority >> 3) << GICH_LR_PRIORITY_SHIFT) | ((p->irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT); - if ( p->desc != NULL ) - lr_reg |= GICH_LR_HW | (p->desc->irq << GICH_LR_PHYSICAL_SHIFT); +} - GICH[GICH_LR + lr] = lr_reg; +static void gic_clear_lr(int lr) +{ + GICH[GICH_LR + lr] = 0; +} + +static inline void gic_set_lr(int lr, struct pending_irq *p, + unsigned int state) +{ + ASSERT(!local_irq_is_enabled()); + gic_hw_ops->update_lr(lr, p, state); set_bit(GIC_IRQ_GUEST_VISIBLE, &p->status); clear_bit(GIC_IRQ_GUEST_PENDING, &p->status); p->lr = lr; @@ -685,6 +801,7 @@ void gic_raise_guest_irq(struct vcpu *v, unsigned int virtual_irq, { int i; struct pending_irq *n = irq_to_pending(v, virtual_irq); + unsigned int nr_lrs = gic_hw_ops->nr_lrs(); ASSERT(spin_is_locked(&v->arch.vgic.lock)); @@ -711,29 +828,32 @@ void gic_raise_guest_irq(struct vcpu *v, unsigned int virtual_irq, static void gic_clear_one_lr(struct vcpu *v, int i) { struct pending_irq *p; - uint32_t lr; int irq; + struct gic_lr lr_val; ASSERT(!local_irq_is_enabled()); ASSERT(spin_is_locked(&v->arch.vgic.lock)); ASSERT(!local_irq_is_enabled()); - lr = GICH[GICH_LR + i]; - irq = (lr >> GICH_LR_VIRTUAL_SHIFT) & GICH_LR_VIRTUAL_MASK; + gic_hw_ops->read_lr(i, &lr_val); + irq = lr_val.virq; p = irq_to_pending(v, irq); - if ( lr & GICH_LR_ACTIVE ) + if ( lr_val.state & GICH_LR_ACTIVE ) { set_bit(GIC_IRQ_GUEST_ACTIVE, &p->status); /* HW interrupts cannot be ACTIVE and PENDING */ if ( p->desc == NULL && test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) && test_and_clear_bit(GIC_IRQ_GUEST_PENDING, &p->status) ) - GICH[GICH_LR + i] = lr | GICH_LR_PENDING; - } else if ( lr & GICH_LR_PENDING ) { + { + lr_val.state |= GICH_LR_PENDING; + gic_hw_ops->write_lr(i, &lr_val); + } + } else if ( lr_val.state & GICH_LR_PENDING ) { clear_bit(GIC_IRQ_GUEST_PENDING, &p->status); } else { - GICH[GICH_LR + i] = 0; + gic_hw_ops->clear_lr(i); clear_bit(i, &this_cpu(lr_mask)); if ( p->desc != NULL ) @@ -754,6 +874,7 @@ void gic_clear_lrs(struct vcpu *v) { int i = 0; unsigned long flags; + unsigned int nr_lrs = gic_hw_ops->nr_lrs(); spin_lock_irqsave(&v->arch.vgic.lock, flags); @@ -768,10 +889,12 @@ void gic_clear_lrs(struct vcpu *v) static void gic_restore_pending_irqs(struct vcpu *v) { - int i = 0, lrs = nr_lrs; + int i = 0, lrs; struct pending_irq *p, *t, *p_r; unsigned long flags; + unsigned int nr_lrs = gic_hw_ops->nr_lrs(); + lrs = nr_lrs; if ( list_empty(&v->arch.vgic.lr_pending) ) return; @@ -828,13 +951,15 @@ void gic_clear_pending_irqs(struct vcpu *v) int gic_events_need_delivery(void) { - int mask_priority, lrs = nr_lrs; + int mask_priority, lrs; int max_priority = 0xff, active_priority = 0xff; struct vcpu *v = current; struct pending_irq *p; unsigned long flags; + unsigned int nr_lrs = gic_hw_ops->nr_lrs(); + lrs = nr_lrs; - mask_priority = (GICH[GICH_VMCR] >> GICH_VMCR_PRIORITY_SHIFT) & GICH_VMCR_PRIORITY_MASK; + mask_priority = gic_hw_ops->read_vmcr_priority(); spin_lock_irqsave(&v->arch.vgic.lock, flags); @@ -871,9 +996,9 @@ void gic_inject(void) gic_restore_pending_irqs(current); if ( !list_empty(¤t->arch.vgic.lr_pending) && lr_all_full() ) - GICH[GICH_HCR] |= GICH_HCR_UIE; + gic_hw_ops->update_hcr_status(GICH_HCR_UIE, 1); else - GICH[GICH_HCR] &= ~GICH_HCR_UIE; + gic_hw_ops->update_hcr_status(GICH_HCR_UIE, 0); } int gic_route_irq_to_guest(struct domain *d, const struct dt_irq *irq, @@ -921,10 +1046,10 @@ out: return retval; } -static void do_sgi(struct cpu_user_regs *regs, int othercpu, enum gic_sgi sgi) +static void do_sgi(struct cpu_user_regs *regs, enum gic_sgi sgi) { /* Lower the priority */ - GICC[GICC_EOIR] = sgi; + gic_hw_ops->eoi_irq(sgi); switch (sgi) { @@ -943,19 +1068,16 @@ static void do_sgi(struct cpu_user_regs *regs, int othercpu, enum gic_sgi sgi) } /* Deactivate */ - GICC[GICC_DIR] = sgi; + gic_hw_ops->deactivate_irq(sgi); } /* Accept an interrupt from the GIC and dispatch its handler */ void gic_interrupt(struct cpu_user_regs *regs, int is_fiq) { - uint32_t intack; unsigned int irq; - do { - intack = GICC[GICC_IAR]; - irq = intack & GICC_IA_IRQ; + irq = gic_hw_ops->ack_irq(); if ( likely(irq >= 16 && irq < 1021) ) { @@ -965,8 +1087,7 @@ void gic_interrupt(struct cpu_user_regs *regs, int is_fiq) } else if (unlikely(irq < 16)) { - unsigned int cpu = (intack & GICC_IA_CPU_MASK) >> GICC_IA_CPU_SHIFT; - do_sgi(regs, cpu, irq); + do_sgi(regs, irq); } else { @@ -976,15 +1097,7 @@ void gic_interrupt(struct cpu_user_regs *regs, int is_fiq) } while (1); } -int vcpu_gic_init(struct vcpu *v) -{ - v->arch.gic_state = xzalloc(struct gic_state_data); - if(!v->arch.gic_state) - return -ENOMEM; - return 0; -} - -int gicv_setup(struct domain *d) +static int gicv_init(struct domain *d) { int ret; @@ -1031,6 +1144,85 @@ int gicv_setup(struct domain *d) } +int vcpu_gic_init(struct vcpu *v) +{ + return gic_hw_ops->state_init(v); +} + +int gicv_setup(struct domain *d) +{ + int ret; + + ret = gic_hw_ops->gicv_setup(d); + return ret; + +} + +static void gic_read_lr(int lr, struct gic_lr *lr_reg) +{ + uint32_t lrv; + + lrv = GICH[GICH_LR + lr]; + lr_reg->pirq = (lrv >> GICH_LR_PHYSICAL_SHIFT) & GICH_LR_PHYSICAL_MASK; + lr_reg->virq = (lrv >> GICH_LR_VIRTUAL_SHIFT) & GICH_LR_VIRTUAL_MASK; + lr_reg->priority = (lrv >> GICH_LR_PRIORITY_SHIFT) & GICH_LR_PRIORITY_MASK; + lr_reg->state = (lrv >> GICH_LR_STATE_SHIFT) & GICH_LR_STATE_MASK; + lr_reg->hw_status = (lrv >> GICH_LR_HW_SHIFT) & GICH_LR_HW_MASK; + lr_reg->grp = (lrv >> GICH_LR_GRP_SHIFT) & GICH_LR_GRP_MASK; +} + +static void gic_write_lr(int lr, struct gic_lr *lr_reg) +{ + uint32_t lrv = 0; + lrv = ( ((lr_reg->pirq & GICH_LR_PHYSICAL_MASK) << GICH_LR_PHYSICAL_SHIFT) | + ((lr_reg->virq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT) | + ((uint32_t)(lr_reg->priority & GICH_LR_PRIORITY_MASK) << GICH_LR_PRIORITY_SHIFT) | + ((uint32_t)(lr_reg->state & GICH_LR_STATE_MASK) << GICH_LR_STATE_SHIFT) | + ((uint32_t)(lr_reg->hw_status & GICH_LR_HW_MASK) << GICH_LR_HW_SHIFT) | + ((uint32_t)(lr_reg->grp & GICH_LR_GRP_MASK) << GICH_LR_GRP_SHIFT) ); + + GICH[GICH_LR + lr] = lrv; +} + +static void gic_hcr_status(uint32_t flag, uint8_t status) +{ + if ( status ) + GICH[GICH_HCR] |= flag; + else + GICH[GICH_HCR] &= ~flag; +} + +static unsigned int gic_read_vmcr_priority(void) +{ + return (GICH[GICH_VMCR] >> GICH_VMCR_PRIORITY_SHIFT) & GICH_VMCR_PRIORITY_MASK; +} + +static struct gic_hw_operations gic_ops = { + .nr_lines = gic_nr_lines, + .nr_lrs = gic_nr_lrs, + .secondary_init = gic_secondary_cpu_init, + .get_maintenance_irq = gic_maintenance_irq, + .state_init = gic_state_init, + .save_state = save_state, + .restore_state = restore_state, + .dump_state = gic_dump_state, + .gicv_setup = gicv_init, + .enable_irq = gic_enable_irq, + .disable_irq = gic_disable_irq, + .eoi_irq = gic_eoi_irq, + .deactivate_irq = gic_dir_irq, + .ack_irq = gic_ack_irq, + .set_irq_property = gic_set_irq_property, + .send_sgi = gic_send_sgi, + .disable_interface = gic_disable_interface, + .update_lr = gic_update_lr, + .update_hcr_status = gic_hcr_status, + .clear_lr = gic_clear_lr, + .read_lr = gic_read_lr, + .write_lr = gic_write_lr, + .read_vmcr_priority = gic_read_vmcr_priority, +}; + static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs) { /* @@ -1043,18 +1235,10 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r void gic_dump_info(struct vcpu *v) { - int i; struct pending_irq *p; printk("GICH_LRs (vcpu %d) mask=%"PRIx64"\n", v->vcpu_id, v->arch.lr_mask); - if ( v == current ) - { - for ( i = 0; i < nr_lrs; i++ ) - printk(" HW_LR[%d]=%x\n", i, GICH[GICH_LR + i]); - } else { - for ( i = 0; i < nr_lrs; i++ ) - printk(" VCPU_LR[%d]=%x\n", i, v->arch.gic_state->gic_lr[i]); - } + gic_hw_ops->dump_state(v); list_for_each_entry ( p, &v->arch.vgic.inflight_irqs, inflight ) { @@ -1070,7 +1254,7 @@ void gic_dump_info(struct vcpu *v) void __cpuinit init_maintenance_interrupt(void) { - request_dt_irq(&gic.maintenance, maintenance_interrupt, + request_dt_irq(gic_hw_ops->get_maintenance_irq(), maintenance_interrupt, "irq-maintenance", NULL); } diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h index 5f49eb1..27d2792 100644 --- a/xen/include/asm-arm/gic.h +++ b/xen/include/asm-arm/gic.h @@ -39,6 +39,17 @@ #define GIC_PRI_IPI 0x90 /* IPIs must preempt normal interrupts */ #define GIC_PRI_HIGHEST 0x80 /* Higher priorities belong to Secure-World */ +#define GICH_LR_PENDING 1 +#define GICH_LR_ACTIVE 2 + +#define GICH_HCR_EN (1 << 0) +#define GICH_HCR_UIE (1 << 1) +#define GICH_HCR_LRENPIE (1 << 2) +#define GICH_HCR_NPIE (1 << 3) +#define GICH_HCR_VGRP0EIE (1 << 4) +#define GICH_HCR_VGRP0DIE (1 << 5) +#define GICH_HCR_VGRP1EIE (1 << 6) +#define GICH_HCR_VGRP1DIE (1 << 7) #ifndef __ASSEMBLY__ #include @@ -61,6 +72,15 @@ struct gic_state_data { uint32_t gic_lr[64]; }; +struct gic_lr { + uint32_t pirq; + uint32_t virq; + uint8_t priority; + uint8_t state; + uint8_t hw_status; + uint8_t grp; +}; + extern int domain_vgic_init(struct domain *d); extern void domain_vgic_free(struct domain *d); @@ -129,6 +149,36 @@ int gic_irq_xlate(const u32 *intspec, unsigned int intsize, unsigned int *out_hwirq, unsigned int *out_type); void gic_clear_lrs(struct vcpu *v); +struct gic_hw_operations { + struct dt_irq * (*get_maintenance_irq)(void); + unsigned int (*nr_lines)(void); + unsigned int (*nr_lrs)(void); + int (*state_init)(struct vcpu *); + void (*save_state)(struct vcpu *); + void (*restore_state)(struct vcpu *); + void (*dump_state)(struct vcpu *); + int (*gicv_setup)(struct domain *); + void (*enable_irq)(int); + void (*disable_irq)(int); + void (*eoi_irq)(int); + void (*deactivate_irq)(int); + unsigned int (*ack_irq)(void); + void (*set_irq_property)(unsigned int irq, bool_t level, + const cpumask_t *cpu_mask, + unsigned int priority); + void (*send_sgi)(const cpumask_t *, enum gic_sgi); + void (*disable_interface)(void); + void (*update_lr)(int lr, struct pending_irq *, unsigned int state); + void (*update_hcr_status)(uint32_t flag, uint8_t set); + void (*clear_lr)(int lr); + void (*read_lr)(int lr, struct gic_lr *); + void (*write_lr)(int lr, struct gic_lr *); + unsigned int (*read_vmcr_priority)(void); + void (*secondary_init)(void); +}; + +void register_gic_ops(struct gic_hw_operations *ops); +extern void update_cpu_lr_mask(void); #endif /* __ASSEMBLY__ */ #endif diff --git a/xen/include/asm-arm/gic_v2_defs.h b/xen/include/asm-arm/gic_v2_defs.h index f9ff885..e1bb09c 100644 --- a/xen/include/asm-arm/gic_v2_defs.h +++ b/xen/include/asm-arm/gic_v2_defs.h @@ -93,15 +93,6 @@ #define GICC_IA_CPU_MASK 0x1c00 #define GICC_IA_CPU_SHIFT 10 -#define GICH_HCR_EN (1 << 0) -#define GICH_HCR_UIE (1 << 1) -#define GICH_HCR_LRENPIE (1 << 2) -#define GICH_HCR_NPIE (1 << 3) -#define GICH_HCR_VGRP0EIE (1 << 4) -#define GICH_HCR_VGRP0DIE (1 << 5) -#define GICH_HCR_VGRP1EIE (1 << 6) -#define GICH_HCR_VGRP1DIE (1 << 7) - #define GICH_MISR_EOI (1 << 0) #define GICH_MISR_U (1 << 1) #define GICH_MISR_LRENP (1 << 2) @@ -118,9 +109,12 @@ #define GICH_LR_STATE_MASK 0x3 #define GICH_LR_STATE_SHIFT 28 #define GICH_LR_PRIORITY_SHIFT 23 +#define GICH_LR_PRIORITY_MASK 0x1f +#define GICH_LR_HW_SHIFT 31 +#define GICH_LR_HW_MASK 0x1 +#define GICH_LR_GRP_SHIFT 30 +#define GICH_LR_GRP_MASK 0x1 #define GICH_LR_MAINTENANCE_IRQ (1<<19) -#define GICH_LR_PENDING (1<<28) -#define GICH_LR_ACTIVE (1<<29) #define GICH_LR_GRP1 (1<<30) #define GICH_LR_HW (1<<31) #define GICH_LR_CPUID_SHIFT 9 -- 1.7.9.5