x86_32 only provides a special way to obtain the local per cpu area offset via x86_read_percpu. Otherwise it can fully use the generic handling. Cc: tglx@linutronix.de Cc: mingo@redhat.com Cc: ak@suse.de Signed-off-by: Christoph Lameter Signed-off-by: Mike Travis --- include/asm-x86/percpu_32.h | 30 +++++++++--------------------- 1 file changed, 9 insertions(+), 21 deletions(-) --- a/include/asm-x86/percpu_32.h +++ b/include/asm-x86/percpu_32.h @@ -42,34 +42,22 @@ */ #ifdef CONFIG_SMP -/* This is used for other cpus to find our section. */ -extern unsigned long __per_cpu_offset[]; - -#define per_cpu_offset(x) (__per_cpu_offset[x]) - -#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name -/* We can use this directly for local CPU (faster). */ -DECLARE_PER_CPU(unsigned long, this_cpu_off); - -/* var is in discarded region: offset to particular copy we want */ -#define per_cpu(var, cpu) (*({ \ - extern int simple_indentifier_##var(void); \ - RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]); })) - -#define __raw_get_cpu_var(var) (*({ \ - extern int simple_indentifier_##var(void); \ - RELOC_HIDE(&per_cpu__##var, x86_read_percpu(this_cpu_off)); \ -})) - -#define __get_cpu_var(var) __raw_get_cpu_var(var) +#define __my_cpu_offset x86_read_percpu(this_cpu_off) /* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */ #define __percpu_seg "%%fs:" + #else /* !SMP */ -#include + #define __percpu_seg "" + #endif /* SMP */ +#include + +/* We can use this directly for local CPU (faster). */ +DECLARE_PER_CPU(unsigned long, this_cpu_off); + /* For arch-specific code, we can use direct single-insn ops (they * don't give an lvalue though). */ extern void __bad_percpu_size(void); --