diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 801c6f040faa..eb8939d28f96 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1510,6 +1510,12 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = &init_task; EXPORT_PER_CPU_SYMBOL(current_task); +/* + * The initial hardirq_stack_ptr value of NULL is invalid. To prevent it + * from being used if an IRQ happens too early, we initialize irq_count to 1, + * which effectively disables ENTER_IRQ_STACK. The code that maps the IRQ + * stack will reset irq_count to -1. + */ DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr); DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 48caa3d31662..61c691889362 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -56,6 +56,7 @@ static int map_irq_stack(unsigned int cpu) return -ENOMEM; per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE; + pr_err("*** CPU %u: hardirq_stack_ptr = 0x%lx\n", cpu, (unsigned long)(va + IRQ_STACK_SIZE)); return 0; } #else @@ -74,7 +75,14 @@ static int map_irq_stack(unsigned int cpu) int irq_init_percpu_irqstack(unsigned int cpu) { + int ret; + if (per_cpu(hardirq_stack_ptr, cpu)) return 0; - return map_irq_stack(cpu); + ret = map_irq_stack(cpu); + if (ret) + return ret; + + per_cpu(irq_count, cpu) = -1; + return 0; } diff --git a/mm/slab.c b/mm/slab.c index 329bfe67f2ca..198e9948a874 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1481,6 +1481,7 @@ static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, *addr++ = caller; *addr++ = smp_processor_id(); size -= 3 * sizeof(unsigned long); + /* { unsigned long *sptr = &caller; unsigned long svalue; @@ -1496,6 +1497,7 @@ static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, } } + */ *addr++ = 0x87654321; }