The decision to which CPUs an interrupt is effectively routed happens in the various apic->cpu_mask_to_apicid() implementations To support effective affinity masks this information needs to be updated in irq_data. Add a pointer to irq_data to the callbacks and feed it through the call chain. Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/apic.h | 5 +++++ arch/x86/kernel/apic/apic.c | 9 +++++++-- arch/x86/kernel/apic/vector.c | 25 +++++++++++++++---------- arch/x86/kernel/apic/x2apic_cluster.c | 3 ++- arch/x86/kernel/apic/x2apic_uv_x.c | 5 +++-- 5 files changed, 32 insertions(+), 15 deletions(-) --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -252,6 +252,8 @@ static inline int x2apic_enabled(void) { #define x2apic_supported() (0) #endif /* !CONFIG_X86_X2APIC */ +struct irq_data; + /* * Copyright 2004 James Cleverdon, IBM. * Subject to the GNU Public License, v.2 @@ -297,6 +299,7 @@ struct apic { unsigned long (*set_apic_id)(unsigned int id); int (*cpu_mask_to_apicid)(const struct cpumask *cpumask, + struct irq_data *irqdata, unsigned int *apicid); /* ipi */ @@ -540,8 +543,10 @@ static inline int default_phys_pkg_id(in #endif extern int flat_cpu_mask_to_apicid(const struct cpumask *cpumask, + struct irq_data *irqdata, unsigned int *apicid); extern int default_cpu_mask_to_apicid(const struct cpumask *cpumask, + struct irq_data *irqdata, unsigned int *apicid); static inline void --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -2201,7 +2201,9 @@ void default_init_apic_ldr(void) apic_write(APIC_LDR, val); } -int default_cpu_mask_to_apicid(const struct cpumask *mask, unsigned int *apicid) +int default_cpu_mask_to_apicid(const struct cpumask *mask, + struct irq_data *irqdata, + unsigned int *apicid) { unsigned int cpu = cpumask_first(mask); @@ -2211,7 +2213,10 @@ int default_cpu_mask_to_apicid(const str return 0; } -int flat_cpu_mask_to_apicid(const struct cpumask *mask, unsigned int *apicid) +int flat_cpu_mask_to_apicid(const struct cpumask *mask, + struct irq_data *irqdata, + unsigned int *apicid) + { unsigned long cpu_mask = cpumask_bits(mask)[0] & APIC_ALL_CPUS; --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -103,7 +103,8 @@ static void free_apic_chip_data(struct a } static int __assign_irq_vector(int irq, struct apic_chip_data *d, - const struct cpumask *mask) + const struct cpumask *mask, + struct irq_data *irqdata) { /* * NOTE! The local APIC isn't very good at handling @@ -226,32 +227,35 @@ static int __assign_irq_vector(int irq, * cpus masked out. */ cpumask_and(vector_searchmask, vector_searchmask, mask); - BUG_ON(apic->cpu_mask_to_apicid(vector_searchmask, &d->cfg.dest_apicid)); + BUG_ON(apic->cpu_mask_to_apicid(vector_searchmask, irqdata, + &d->cfg.dest_apicid)); return 0; } static int assign_irq_vector(int irq, struct apic_chip_data *data, - const struct cpumask *mask) + const struct cpumask *mask, + struct irq_data *irqdata) { int err; unsigned long flags; raw_spin_lock_irqsave(&vector_lock, flags); - err = __assign_irq_vector(irq, data, mask); + err = __assign_irq_vector(irq, data, mask, irqdata); raw_spin_unlock_irqrestore(&vector_lock, flags); return err; } static int assign_irq_vector_policy(int irq, int node, struct apic_chip_data *data, - struct irq_alloc_info *info) + struct irq_alloc_info *info, + struct irq_data *irqdata) { if (info && info->mask) - return assign_irq_vector(irq, data, info->mask); + return assign_irq_vector(irq, data, info->mask, irqdata); if (node != NUMA_NO_NODE && - assign_irq_vector(irq, data, cpumask_of_node(node)) == 0) + assign_irq_vector(irq, data, cpumask_of_node(node), irqdata) == 0) return 0; - return assign_irq_vector(irq, data, apic->target_cpus()); + return assign_irq_vector(irq, data, apic->target_cpus(), irqdata); } static void clear_irq_vector(int irq, struct apic_chip_data *data) @@ -363,7 +367,8 @@ static int x86_vector_alloc_irqs(struct irq_data->chip = &lapic_controller; irq_data->chip_data = data; irq_data->hwirq = virq + i; - err = assign_irq_vector_policy(virq + i, node, data, info); + err = assign_irq_vector_policy(virq + i, node, data, info, + irq_data); if (err) goto error; } @@ -537,7 +542,7 @@ static int apic_set_affinity(struct irq_ if (!cpumask_intersects(dest, cpu_online_mask)) return -EINVAL; - err = assign_irq_vector(irq, data, dest); + err = assign_irq_vector(irq, data, dest, irq_data); return err ? err : IRQ_SET_MASK_OK; } --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -104,7 +104,8 @@ static void x2apic_send_IPI_all(int vect } static int -x2apic_cpu_mask_to_apicid(const struct cpumask *mask, unsigned int *apicid) +x2apic_cpu_mask_to_apicid(const struct cpumask *mask, struct irq_data *irqdata, + unsigned int *apicid) { unsigned int cpu; u32 dest = 0; --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -526,9 +526,10 @@ static void uv_init_apic_ldr(void) } static int -uv_cpu_mask_to_apicid(const struct cpumask *mask, unsigned int *apicid) +uv_cpu_mask_to_apicid(const struct cpumask *mask, struct irq_data *irqdata, + unsigned int *apicid) { - int ret = default_cpu_mask_to_apicid(mask, apicid); + int ret = default_cpu_mask_to_apicid(mask, irqdata, apicid); if (!ret) *apicid |= uv_apicid_hibits;