From mboxrd@z Thu Jan 1 00:00:00 1970 From: Daniel Borkmann Subject: [PATCH bpf-next 1/3] tools: add smp_* barrier variants to include infrastructure Date: Wed, 17 Oct 2018 16:41:54 +0200 Message-ID: <20181017144156.16639-2-daniel@iogearbox.net> References: <20181017144156.16639-1-daniel@iogearbox.net> Cc: peterz@infradead.org, paulmck@linux.vnet.ibm.com, will.deacon@arm.com, acme@redhat.com, yhs@fb.com, john.fastabend@gmail.com, netdev@vger.kernel.org, Daniel Borkmann To: alexei.starovoitov@gmail.com Return-path: Received: from www62.your-server.de ([213.133.104.62]:36520 "EHLO www62.your-server.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727186AbeJQWjm (ORCPT ); Wed, 17 Oct 2018 18:39:42 -0400 In-Reply-To: <20181017144156.16639-1-daniel@iogearbox.net> Sender: netdev-owner@vger.kernel.org List-ID: Add the definition for smp_rmb(), smp_wmb(), and smp_mb() to the tools include infrastructure. This patch adds the implementation for x86-64 and arm64, and have it fall back for other archs which do not have it implemented at this point such that others can be added successively for those who have access to test machines. The x86-64 one uses lock + add combination for smp_mb() with address below red zone. Signed-off-by: Daniel Borkmann Cc: Peter Zijlstra Cc: "Paul E. McKenney" Cc: Will Deacon Cc: Arnaldo Carvalho de Melo --- tools/arch/arm64/include/asm/barrier.h | 10 ++++++++++ tools/arch/x86/include/asm/barrier.h | 9 ++++++--- tools/include/asm/barrier.h | 11 +++++++++++ 3 files changed, 27 insertions(+), 3 deletions(-) diff --git a/tools/arch/arm64/include/asm/barrier.h b/tools/arch/arm64/include/asm/barrier.h index 40bde6b..acf1f06 100644 --- a/tools/arch/arm64/include/asm/barrier.h +++ b/tools/arch/arm64/include/asm/barrier.h @@ -14,4 +14,14 @@ #define wmb() asm volatile("dmb ishst" ::: "memory") #define rmb() asm volatile("dmb ishld" ::: "memory") +/* + * Kernel uses dmb variants on arm64 for smp_*() barriers. Pretty much the same + * implementation as above mb()/wmb()/rmb(), though for the latter kernel uses + * dsb. In any case, should above mb()/wmb()/rmb() change, make sure the below + * smp_*() don't. + */ +#define smp_mb() asm volatile("dmb ish" ::: "memory") +#define smp_wmb() asm volatile("dmb ishst" ::: "memory") +#define smp_rmb() asm volatile("dmb ishld" ::: "memory") + #endif /* _TOOLS_LINUX_ASM_AARCH64_BARRIER_H */ diff --git a/tools/arch/x86/include/asm/barrier.h b/tools/arch/x86/include/asm/barrier.h index 8774dee..c97c0c5 100644 --- a/tools/arch/x86/include/asm/barrier.h +++ b/tools/arch/x86/include/asm/barrier.h @@ -21,9 +21,12 @@ #define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") #define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") #elif defined(__x86_64__) -#define mb() asm volatile("mfence":::"memory") -#define rmb() asm volatile("lfence":::"memory") -#define wmb() asm volatile("sfence" ::: "memory") +#define mb() asm volatile("mfence" ::: "memory") +#define rmb() asm volatile("lfence" ::: "memory") +#define wmb() asm volatile("sfence" ::: "memory") +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_mb() asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc") #endif #endif /* _TOOLS_LINUX_ASM_X86_BARRIER_H */ diff --git a/tools/include/asm/barrier.h b/tools/include/asm/barrier.h index 391d942..e4c8845 100644 --- a/tools/include/asm/barrier.h +++ b/tools/include/asm/barrier.h @@ -1,4 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#include #if defined(__i386__) || defined(__x86_64__) #include "../../arch/x86/include/asm/barrier.h" #elif defined(__arm__) @@ -26,3 +27,13 @@ #else #include #endif +/* Fallback definitions for archs that haven't been updated yet. */ +#ifndef smp_rmb +# define smp_rmb() rmb() +#endif +#ifndef smp_wmb +# define smp_wmb() wmb() +#endif +#ifndef smp_mb +# define smp_mb() mb() +#endif -- 2.9.5